diff options
143 files changed, 4876 insertions, 2300 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 68e7694c0ac7..a2d8805c03d5 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -823,6 +823,9 @@ and is between 256 and 4096 characters. It is defined in the file | |||
823 | 823 | ||
824 | hlt [BUGS=ARM,SH] | 824 | hlt [BUGS=ARM,SH] |
825 | 825 | ||
826 | hvc_iucv= [S390] Number of z/VM IUCV Hypervisor console (HVC) | ||
827 | back-ends. Valid parameters: 0..8 | ||
828 | |||
826 | i8042.debug [HW] Toggle i8042 debug mode | 829 | i8042.debug [HW] Toggle i8042 debug mode |
827 | i8042.direct [HW] Put keyboard port into non-translated mode | 830 | i8042.direct [HW] Put keyboard port into non-translated mode |
828 | i8042.dumbkbd [HW] Pretend that controller can only read data from | 831 | i8042.dumbkbd [HW] Pretend that controller can only read data from |
@@ -2292,6 +2295,14 @@ and is between 256 and 4096 characters. It is defined in the file | |||
2292 | See comment before function dc390_setup() in | 2295 | See comment before function dc390_setup() in |
2293 | drivers/scsi/tmscsim.c. | 2296 | drivers/scsi/tmscsim.c. |
2294 | 2297 | ||
2298 | topology= [S390] | ||
2299 | Format: {off | on} | ||
2300 | Specify if the kernel should make use of the cpu | ||
2301 | topology informations if the hardware supports these. | ||
2302 | The scheduler will make use of these informations and | ||
2303 | e.g. base its process migration decisions on it. | ||
2304 | Default is off. | ||
2305 | |||
2295 | tp720= [HW,PS2] | 2306 | tp720= [HW,PS2] |
2296 | 2307 | ||
2297 | trix= [HW,OSS] MediaTrix AudioTrix Pro | 2308 | trix= [HW,OSS] MediaTrix AudioTrix Pro |
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index d812929390e4..cd46f023ec6d 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h | |||
@@ -267,7 +267,7 @@ extern int ucache_bsize; | |||
267 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | 267 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES |
268 | struct linux_binprm; | 268 | struct linux_binprm; |
269 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 269 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
270 | int executable_stack); | 270 | int uses_interp); |
271 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); | 271 | #define VDSO_AUX_ENT(a,b) NEW_AUX_ENT(a,b); |
272 | 272 | ||
273 | #endif /* __KERNEL__ */ | 273 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 65639a43e644..f7ec7d0888fe 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -184,8 +184,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
184 | * This is called from binfmt_elf, we create the special vma for the | 184 | * This is called from binfmt_elf, we create the special vma for the |
185 | * vDSO and insert it into the mm struct tree | 185 | * vDSO and insert it into the mm struct tree |
186 | */ | 186 | */ |
187 | int arch_setup_additional_pages(struct linux_binprm *bprm, | 187 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
188 | int executable_stack) | ||
189 | { | 188 | { |
190 | struct mm_struct *mm = current->mm; | 189 | struct mm_struct *mm = current->mm; |
191 | struct page **vdso_pagelist; | 190 | struct page **vdso_pagelist; |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8116a3328a19..8152fefc97b9 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -43,6 +43,9 @@ config GENERIC_HWEIGHT | |||
43 | config GENERIC_TIME | 43 | config GENERIC_TIME |
44 | def_bool y | 44 | def_bool y |
45 | 45 | ||
46 | config GENERIC_TIME_VSYSCALL | ||
47 | def_bool y | ||
48 | |||
46 | config GENERIC_CLOCKEVENTS | 49 | config GENERIC_CLOCKEVENTS |
47 | def_bool y | 50 | def_bool y |
48 | 51 | ||
@@ -66,10 +69,15 @@ config PGSTE | |||
66 | bool | 69 | bool |
67 | default y if KVM | 70 | default y if KVM |
68 | 71 | ||
72 | config VIRT_CPU_ACCOUNTING | ||
73 | def_bool y | ||
74 | |||
69 | mainmenu "Linux Kernel Configuration" | 75 | mainmenu "Linux Kernel Configuration" |
70 | 76 | ||
71 | config S390 | 77 | config S390 |
72 | def_bool y | 78 | def_bool y |
79 | select USE_GENERIC_SMP_HELPERS if SMP | ||
80 | select HAVE_FUNCTION_TRACER | ||
73 | select HAVE_OPROFILE | 81 | select HAVE_OPROFILE |
74 | select HAVE_KPROBES | 82 | select HAVE_KPROBES |
75 | select HAVE_KRETPROBES | 83 | select HAVE_KRETPROBES |
@@ -225,6 +233,14 @@ config MARCH_Z9_109 | |||
225 | Class (z9 BC). The kernel will be slightly faster but will not | 233 | Class (z9 BC). The kernel will be slightly faster but will not |
226 | work on older machines such as the z990, z890, z900, and z800. | 234 | work on older machines such as the z990, z890, z900, and z800. |
227 | 235 | ||
236 | config MARCH_Z10 | ||
237 | bool "IBM System z10" | ||
238 | help | ||
239 | Select this to enable optimizations for IBM System z10. The | ||
240 | kernel will be slightly faster but will not work on older | ||
241 | machines such as the z990, z890, z900, z800, z9-109, z9-ec | ||
242 | and z9-bc. | ||
243 | |||
228 | endchoice | 244 | endchoice |
229 | 245 | ||
230 | config PACK_STACK | 246 | config PACK_STACK |
@@ -343,16 +359,6 @@ config QDIO | |||
343 | 359 | ||
344 | If unsure, say Y. | 360 | If unsure, say Y. |
345 | 361 | ||
346 | config QDIO_DEBUG | ||
347 | bool "Extended debugging information" | ||
348 | depends on QDIO | ||
349 | help | ||
350 | Say Y here to get extended debugging output in | ||
351 | /sys/kernel/debug/s390dbf/qdio... | ||
352 | Warning: this option reduces the performance of the QDIO module. | ||
353 | |||
354 | If unsure, say N. | ||
355 | |||
356 | config CHSC_SCH | 362 | config CHSC_SCH |
357 | tristate "Support for CHSC subchannels" | 363 | tristate "Support for CHSC subchannels" |
358 | help | 364 | help |
@@ -466,22 +472,9 @@ config PAGE_STATES | |||
466 | hypervisor. The ESSA instruction is used to do the states | 472 | hypervisor. The ESSA instruction is used to do the states |
467 | changes between a page that has content and the unused state. | 473 | changes between a page that has content and the unused state. |
468 | 474 | ||
469 | config VIRT_TIMER | ||
470 | bool "Virtual CPU timer support" | ||
471 | help | ||
472 | This provides a kernel interface for virtual CPU timers. | ||
473 | Default is disabled. | ||
474 | |||
475 | config VIRT_CPU_ACCOUNTING | ||
476 | bool "Base user process accounting on virtual cpu timer" | ||
477 | depends on VIRT_TIMER | ||
478 | help | ||
479 | Select this option to use CPU timer deltas to do user | ||
480 | process accounting. | ||
481 | |||
482 | config APPLDATA_BASE | 475 | config APPLDATA_BASE |
483 | bool "Linux - VM Monitor Stream, base infrastructure" | 476 | bool "Linux - VM Monitor Stream, base infrastructure" |
484 | depends on PROC_FS && VIRT_TIMER=y | 477 | depends on PROC_FS |
485 | help | 478 | help |
486 | This provides a kernel interface for creating and updating z/VM APPLDATA | 479 | This provides a kernel interface for creating and updating z/VM APPLDATA |
487 | monitor records. The monitor records are updated at certain time | 480 | monitor records. The monitor records are updated at certain time |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 792a4e7743ce..578c61f15a4b 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) | |||
34 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) | 34 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) |
35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) | 35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) |
36 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) | 36 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) |
37 | cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) | ||
37 | 38 | ||
38 | #KBUILD_IMAGE is necessary for make rpm | 39 | #KBUILD_IMAGE is necessary for make rpm |
39 | KBUILD_IMAGE :=arch/s390/boot/image | 40 | KBUILD_IMAGE :=arch/s390/boot/image |
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h index 17a2636fec0a..f0b23fc759ba 100644 --- a/arch/s390/appldata/appldata.h +++ b/arch/s390/appldata/appldata.h | |||
@@ -26,10 +26,6 @@ | |||
26 | #define CTL_APPLDATA_NET_SUM 2125 | 26 | #define CTL_APPLDATA_NET_SUM 2125 |
27 | #define CTL_APPLDATA_PROC 2126 | 27 | #define CTL_APPLDATA_PROC 2126 |
28 | 28 | ||
29 | #define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) | ||
30 | #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) | ||
31 | #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) | ||
32 | |||
33 | struct appldata_ops { | 29 | struct appldata_ops { |
34 | struct list_head list; | 30 | struct list_head list; |
35 | struct ctl_table_header *sysctl_header; | 31 | struct ctl_table_header *sysctl_header; |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index a06a47cdd5e0..27b70d8a359c 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -10,6 +10,9 @@ | |||
10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define KMSG_COMPONENT "appldata" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/module.h> | 16 | #include <linux/module.h> |
14 | #include <linux/init.h> | 17 | #include <linux/init.h> |
15 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
@@ -32,7 +35,6 @@ | |||
32 | #include "appldata.h" | 35 | #include "appldata.h" |
33 | 36 | ||
34 | 37 | ||
35 | #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ | ||
36 | #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for | 38 | #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for |
37 | sampling interval in | 39 | sampling interval in |
38 | milliseconds */ | 40 | milliseconds */ |
@@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
390 | (unsigned long) ops->data, ops->size, | 392 | (unsigned long) ops->data, ops->size, |
391 | ops->mod_lvl); | 393 | ops->mod_lvl); |
392 | if (rc != 0) { | 394 | if (rc != 0) { |
393 | P_ERROR("START DIAG 0xDC for %s failed, " | 395 | pr_err("Starting the data collection for %s " |
394 | "return code: %d\n", ops->name, rc); | 396 | "failed with rc=%d\n", ops->name, rc); |
395 | module_put(ops->owner); | 397 | module_put(ops->owner); |
396 | } else | 398 | } else |
397 | ops->active = 1; | 399 | ops->active = 1; |
@@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
401 | (unsigned long) ops->data, ops->size, | 403 | (unsigned long) ops->data, ops->size, |
402 | ops->mod_lvl); | 404 | ops->mod_lvl); |
403 | if (rc != 0) | 405 | if (rc != 0) |
404 | P_ERROR("STOP DIAG 0xDC for %s failed, " | 406 | pr_err("Stopping the data collection for %s " |
405 | "return code: %d\n", ops->name, rc); | 407 | "failed with rc=%d\n", ops->name, rc); |
406 | module_put(ops->owner); | 408 | module_put(ops->owner); |
407 | } | 409 | } |
408 | spin_unlock(&appldata_ops_lock); | 410 | spin_unlock(&appldata_ops_lock); |
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index eb44f9f8ab91..55c80ffd42b9 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 9 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "appldata" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -22,7 +25,6 @@ | |||
22 | #include "appldata.h" | 25 | #include "appldata.h" |
23 | 26 | ||
24 | 27 | ||
25 | #define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */ | ||
26 | #define LOAD_INT(x) ((x) >> FSHIFT) | 28 | #define LOAD_INT(x) ((x) >> FSHIFT) |
27 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | 29 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) |
28 | 30 | ||
@@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data) | |||
143 | (unsigned long) ops.data, new_size, | 145 | (unsigned long) ops.data, new_size, |
144 | ops.mod_lvl); | 146 | ops.mod_lvl); |
145 | if (rc != 0) | 147 | if (rc != 0) |
146 | P_ERROR("os: START NEW DIAG 0xDC failed, " | 148 | pr_err("Starting a new OS data collection " |
147 | "return code: %d, new size = %i\n", rc, | 149 | "failed with rc=%d\n", rc); |
148 | new_size); | ||
149 | 150 | ||
150 | rc = appldata_diag(APPLDATA_RECORD_OS_ID, | 151 | rc = appldata_diag(APPLDATA_RECORD_OS_ID, |
151 | APPLDATA_STOP_REC, | 152 | APPLDATA_STOP_REC, |
152 | (unsigned long) ops.data, ops.size, | 153 | (unsigned long) ops.data, ops.size, |
153 | ops.mod_lvl); | 154 | ops.mod_lvl); |
154 | if (rc != 0) | 155 | if (rc != 0) |
155 | P_ERROR("os: STOP OLD DIAG 0xDC failed, " | 156 | pr_err("Stopping a faulty OS data " |
156 | "return code: %d, old size = %i\n", rc, | 157 | "collection failed with rc=%d\n", rc); |
157 | ops.size); | ||
158 | else | ||
159 | P_INFO("os: old record size = %i stopped\n", | ||
160 | ops.size); | ||
161 | } | 158 | } |
162 | ops.size = new_size; | 159 | ops.size = new_size; |
163 | } | 160 | } |
@@ -178,8 +175,8 @@ static int __init appldata_os_init(void) | |||
178 | max_size = sizeof(struct appldata_os_data) + | 175 | max_size = sizeof(struct appldata_os_data) + |
179 | (NR_CPUS * sizeof(struct appldata_os_per_cpu)); | 176 | (NR_CPUS * sizeof(struct appldata_os_per_cpu)); |
180 | if (max_size > APPLDATA_MAX_REC_SIZE) { | 177 | if (max_size > APPLDATA_MAX_REC_SIZE) { |
181 | P_ERROR("Max. size of OS record = %i, bigger than maximum " | 178 | pr_err("Maximum OS record size %i exceeds the maximum " |
182 | "record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE); | 179 | "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE); |
183 | rc = -ENOMEM; | 180 | rc = -ENOMEM; |
184 | goto out; | 181 | goto out; |
185 | } | 182 | } |
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index e33f32b54c08..c42cd898f68b 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -17,6 +17,9 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #define KMSG_COMPONENT "aes_s390" | ||
21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
22 | |||
20 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
21 | #include <crypto/algapi.h> | 24 | #include <crypto/algapi.h> |
22 | #include <linux/err.h> | 25 | #include <linux/err.h> |
@@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm) | |||
169 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 172 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
170 | 173 | ||
171 | if (IS_ERR(sctx->fallback.cip)) { | 174 | if (IS_ERR(sctx->fallback.cip)) { |
172 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | 175 | pr_err("Allocating AES fallback algorithm %s failed\n", |
176 | name); | ||
173 | return PTR_ERR(sctx->fallback.blk); | 177 | return PTR_ERR(sctx->fallback.blk); |
174 | } | 178 | } |
175 | 179 | ||
@@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm) | |||
349 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 353 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
350 | 354 | ||
351 | if (IS_ERR(sctx->fallback.blk)) { | 355 | if (IS_ERR(sctx->fallback.blk)) { |
352 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | 356 | pr_err("Allocating AES fallback algorithm %s failed\n", |
357 | name); | ||
353 | return PTR_ERR(sctx->fallback.blk); | 358 | return PTR_ERR(sctx->fallback.blk); |
354 | } | 359 | } |
355 | 360 | ||
@@ -515,9 +520,8 @@ static int __init aes_s390_init(void) | |||
515 | 520 | ||
516 | /* z9 109 and z9 BC/EC only support 128 bit key length */ | 521 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
517 | if (keylen_flag == AES_KEYLEN_128) | 522 | if (keylen_flag == AES_KEYLEN_128) |
518 | printk(KERN_INFO | 523 | pr_info("AES hardware acceleration is only available for" |
519 | "aes_s390: hardware acceleration only available for " | 524 | " 128-bit keys\n"); |
520 | "128 bit keys\n"); | ||
521 | 525 | ||
522 | ret = crypto_register_alg(&aes_alg); | 526 | ret = crypto_register_alg(&aes_alg); |
523 | if (ret) | 527 | if (ret) |
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index b9a1ce1f28e4..b1e892a43816 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c | |||
@@ -3,10 +3,13 @@ | |||
3 | * Hypervisor filesystem for Linux on s390. Diag 204 and 224 | 3 | * Hypervisor filesystem for Linux on s390. Diag 204 and 224 |
4 | * implementation. | 4 | * implementation. |
5 | * | 5 | * |
6 | * Copyright (C) IBM Corp. 2006 | 6 | * Copyright IBM Corp. 2006, 2008 |
7 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | 7 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "hypfs" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/types.h> | 13 | #include <linux/types.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
12 | #include <linux/string.h> | 15 | #include <linux/string.h> |
@@ -527,13 +530,14 @@ __init int hypfs_diag_init(void) | |||
527 | int rc; | 530 | int rc; |
528 | 531 | ||
529 | if (diag204_probe()) { | 532 | if (diag204_probe()) { |
530 | printk(KERN_ERR "hypfs: diag 204 not working."); | 533 | pr_err("The hardware system does not support hypfs\n"); |
531 | return -ENODATA; | 534 | return -ENODATA; |
532 | } | 535 | } |
533 | rc = diag224_get_name_table(); | 536 | rc = diag224_get_name_table(); |
534 | if (rc) { | 537 | if (rc) { |
535 | diag204_free_buffer(); | 538 | diag204_free_buffer(); |
536 | printk(KERN_ERR "hypfs: could not get name table.\n"); | 539 | pr_err("The hardware system does not provide all " |
540 | "functions required by hypfs\n"); | ||
537 | } | 541 | } |
538 | return rc; | 542 | return rc; |
539 | } | 543 | } |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 8aadcd7a7cf8..9d4f8e6c0800 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -2,10 +2,13 @@ | |||
2 | * arch/s390/hypfs/inode.c | 2 | * arch/s390/hypfs/inode.c |
3 | * Hypervisor filesystem for Linux on s390. | 3 | * Hypervisor filesystem for Linux on s390. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2006 | 5 | * Copyright IBM Corp. 2006, 2008 |
6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | 6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "hypfs" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/types.h> | 12 | #include <linux/types.h> |
10 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
11 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
@@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
200 | else | 203 | else |
201 | rc = hypfs_diag_create_files(sb, sb->s_root); | 204 | rc = hypfs_diag_create_files(sb, sb->s_root); |
202 | if (rc) { | 205 | if (rc) { |
203 | printk(KERN_ERR "hypfs: Update failed\n"); | 206 | pr_err("Updating the hypfs tree failed\n"); |
204 | hypfs_delete_tree(sb->s_root); | 207 | hypfs_delete_tree(sb->s_root); |
205 | goto out; | 208 | goto out; |
206 | } | 209 | } |
@@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb) | |||
252 | break; | 255 | break; |
253 | case opt_err: | 256 | case opt_err: |
254 | default: | 257 | default: |
255 | printk(KERN_ERR "hypfs: Unrecognized mount option " | 258 | pr_err("%s is not a valid mount option\n", str); |
256 | "\"%s\" or missing value\n", str); | ||
257 | return -EINVAL; | 259 | return -EINVAL; |
258 | } | 260 | } |
259 | } | 261 | } |
@@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) | |||
317 | } | 319 | } |
318 | hypfs_update_update(sb); | 320 | hypfs_update_update(sb); |
319 | sb->s_root = root_dentry; | 321 | sb->s_root = root_dentry; |
320 | printk(KERN_INFO "hypfs: Hypervisor filesystem mounted\n"); | 322 | pr_info("Hypervisor filesystem mounted\n"); |
321 | return 0; | 323 | return 0; |
322 | 324 | ||
323 | err_tree: | 325 | err_tree: |
@@ -513,7 +515,7 @@ fail_sysfs: | |||
513 | if (!MACHINE_IS_VM) | 515 | if (!MACHINE_IS_VM) |
514 | hypfs_diag_exit(); | 516 | hypfs_diag_exit(); |
515 | fail_diag: | 517 | fail_diag: |
516 | printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); | 518 | pr_err("Initialization of hypfs failed with rc=%i\n", rc); |
517 | return rc; | 519 | return rc; |
518 | } | 520 | } |
519 | 521 | ||
diff --git a/arch/s390/include/asm/auxvec.h b/arch/s390/include/asm/auxvec.h index 0d340720fd99..a1f153e89133 100644 --- a/arch/s390/include/asm/auxvec.h +++ b/arch/s390/include/asm/auxvec.h | |||
@@ -1,4 +1,6 @@ | |||
1 | #ifndef __ASMS390_AUXVEC_H | 1 | #ifndef __ASMS390_AUXVEC_H |
2 | #define __ASMS390_AUXVEC_H | 2 | #define __ASMS390_AUXVEC_H |
3 | 3 | ||
4 | #define AT_SYSINFO_EHDR 33 | ||
5 | |||
4 | #endif | 6 | #endif |
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 384e3621e341..7efd0abe8887 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h | |||
@@ -47,7 +47,10 @@ | |||
47 | 47 | ||
48 | #endif /* CONFIG_DEBUG_BUGVERBOSE */ | 48 | #endif /* CONFIG_DEBUG_BUGVERBOSE */ |
49 | 49 | ||
50 | #define BUG() __EMIT_BUG(0) | 50 | #define BUG() do { \ |
51 | __EMIT_BUG(0); \ | ||
52 | for (;;); \ | ||
53 | } while (0) | ||
51 | 54 | ||
52 | #define WARN_ON(x) ({ \ | 55 | #define WARN_ON(x) ({ \ |
53 | int __ret_warn_on = !!(x); \ | 56 | int __ret_warn_on = !!(x); \ |
diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h index 1fe2492baa8d..8bcf277c8468 100644 --- a/arch/s390/include/asm/byteorder.h +++ b/arch/s390/include/asm/byteorder.h | |||
@@ -11,32 +11,39 @@ | |||
11 | 11 | ||
12 | #include <asm/types.h> | 12 | #include <asm/types.h> |
13 | 13 | ||
14 | #ifdef __GNUC__ | 14 | #define __BIG_ENDIAN |
15 | |||
16 | #ifndef __s390x__ | ||
17 | # define __SWAB_64_THRU_32__ | ||
18 | #endif | ||
15 | 19 | ||
16 | #ifdef __s390x__ | 20 | #ifdef __s390x__ |
17 | static inline __u64 ___arch__swab64p(const __u64 *x) | 21 | static inline __u64 __arch_swab64p(const __u64 *x) |
18 | { | 22 | { |
19 | __u64 result; | 23 | __u64 result; |
20 | 24 | ||
21 | asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); | 25 | asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); |
22 | return result; | 26 | return result; |
23 | } | 27 | } |
28 | #define __arch_swab64p __arch_swab64p | ||
24 | 29 | ||
25 | static inline __u64 ___arch__swab64(__u64 x) | 30 | static inline __u64 __arch_swab64(__u64 x) |
26 | { | 31 | { |
27 | __u64 result; | 32 | __u64 result; |
28 | 33 | ||
29 | asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); | 34 | asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); |
30 | return result; | 35 | return result; |
31 | } | 36 | } |
37 | #define __arch_swab64 __arch_swab64 | ||
32 | 38 | ||
33 | static inline void ___arch__swab64s(__u64 *x) | 39 | static inline void __arch_swab64s(__u64 *x) |
34 | { | 40 | { |
35 | *x = ___arch__swab64p(x); | 41 | *x = __arch_swab64p(x); |
36 | } | 42 | } |
43 | #define __arch_swab64s __arch_swab64s | ||
37 | #endif /* __s390x__ */ | 44 | #endif /* __s390x__ */ |
38 | 45 | ||
39 | static inline __u32 ___arch__swab32p(const __u32 *x) | 46 | static inline __u32 __arch_swab32p(const __u32 *x) |
40 | { | 47 | { |
41 | __u32 result; | 48 | __u32 result; |
42 | 49 | ||
@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x) | |||
53 | #endif /* __s390x__ */ | 60 | #endif /* __s390x__ */ |
54 | return result; | 61 | return result; |
55 | } | 62 | } |
63 | #define __arch_swab32p __arch_swab32p | ||
56 | 64 | ||
57 | static inline __u32 ___arch__swab32(__u32 x) | 65 | #ifdef __s390x__ |
66 | static inline __u32 __arch_swab32(__u32 x) | ||
58 | { | 67 | { |
59 | #ifndef __s390x__ | ||
60 | return ___arch__swab32p(&x); | ||
61 | #else /* __s390x__ */ | ||
62 | __u32 result; | 68 | __u32 result; |
63 | 69 | ||
64 | asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); | 70 | asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); |
65 | return result; | 71 | return result; |
66 | #endif /* __s390x__ */ | ||
67 | } | ||
68 | |||
69 | static __inline__ void ___arch__swab32s(__u32 *x) | ||
70 | { | ||
71 | *x = ___arch__swab32p(x); | ||
72 | } | 72 | } |
73 | #define __arch_swab32 __arch_swab32 | ||
74 | #endif /* __s390x__ */ | ||
73 | 75 | ||
74 | static __inline__ __u16 ___arch__swab16p(const __u16 *x) | 76 | static inline __u16 __arch_swab16p(const __u16 *x) |
75 | { | 77 | { |
76 | __u16 result; | 78 | __u16 result; |
77 | 79 | ||
@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x) | |||
86 | #endif /* __s390x__ */ | 88 | #endif /* __s390x__ */ |
87 | return result; | 89 | return result; |
88 | } | 90 | } |
91 | #define __arch_swab16p __arch_swab16p | ||
89 | 92 | ||
90 | static __inline__ __u16 ___arch__swab16(__u16 x) | 93 | #include <linux/byteorder.h> |
91 | { | ||
92 | return ___arch__swab16p(&x); | ||
93 | } | ||
94 | |||
95 | static __inline__ void ___arch__swab16s(__u16 *x) | ||
96 | { | ||
97 | *x = ___arch__swab16p(x); | ||
98 | } | ||
99 | |||
100 | #ifdef __s390x__ | ||
101 | #define __arch__swab64(x) ___arch__swab64(x) | ||
102 | #define __arch__swab64p(x) ___arch__swab64p(x) | ||
103 | #define __arch__swab64s(x) ___arch__swab64s(x) | ||
104 | #endif /* __s390x__ */ | ||
105 | #define __arch__swab32(x) ___arch__swab32(x) | ||
106 | #define __arch__swab16(x) ___arch__swab16(x) | ||
107 | #define __arch__swab32p(x) ___arch__swab32p(x) | ||
108 | #define __arch__swab16p(x) ___arch__swab16p(x) | ||
109 | #define __arch__swab32s(x) ___arch__swab32s(x) | ||
110 | #define __arch__swab16s(x) ___arch__swab16s(x) | ||
111 | |||
112 | #ifndef __s390x__ | ||
113 | #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) | ||
114 | # define __BYTEORDER_HAS_U64__ | ||
115 | # define __SWAB_64_THRU_32__ | ||
116 | #endif | ||
117 | #else /* __s390x__ */ | ||
118 | #define __BYTEORDER_HAS_U64__ | ||
119 | #endif /* __s390x__ */ | ||
120 | |||
121 | #endif /* __GNUC__ */ | ||
122 | |||
123 | #include <linux/byteorder/big_endian.h> | ||
124 | 94 | ||
125 | #endif /* _S390_BYTEORDER_H */ | 95 | #endif /* _S390_BYTEORDER_H */ |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 261785ab5b22..d480f39d65e6 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t; | |||
120 | #include <asm/system.h> /* for save_access_regs */ | 120 | #include <asm/system.h> /* for save_access_regs */ |
121 | #include <asm/mmu_context.h> | 121 | #include <asm/mmu_context.h> |
122 | 122 | ||
123 | #include <asm/vdso.h> | ||
124 | |||
125 | extern unsigned int vdso_enabled; | ||
126 | |||
123 | /* | 127 | /* |
124 | * This is used to ensure we don't load something for the wrong architecture. | 128 | * This is used to ensure we don't load something for the wrong architecture. |
125 | */ | 129 | */ |
@@ -191,4 +195,16 @@ do { \ | |||
191 | current->mm->context.noexec == 0; \ | 195 | current->mm->context.noexec == 0; \ |
192 | }) | 196 | }) |
193 | 197 | ||
198 | #define ARCH_DLINFO \ | ||
199 | do { \ | ||
200 | if (vdso_enabled) \ | ||
201 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | ||
202 | (unsigned long)current->mm->context.vdso_base); \ | ||
203 | } while (0) | ||
204 | |||
205 | struct linux_binprm; | ||
206 | |||
207 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
208 | int arch_setup_additional_pages(struct linux_binprm *, int); | ||
209 | |||
194 | #endif | 210 | #endif |
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h index 8be1f3a58042..ef6170995076 100644 --- a/arch/s390/include/asm/fcx.h +++ b/arch/s390/include/asm/fcx.h | |||
@@ -248,8 +248,8 @@ struct dcw { | |||
248 | #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ | 248 | #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ |
249 | TCCB_MAX_DCW * sizeof(struct dcw) + \ | 249 | TCCB_MAX_DCW * sizeof(struct dcw) + \ |
250 | sizeof(struct tccb_tcat)) | 250 | sizeof(struct tccb_tcat)) |
251 | #define TCCB_SAC_DEFAULT 0xf901 | 251 | #define TCCB_SAC_DEFAULT 0x1ffe |
252 | #define TCCB_SAC_INTRG 0xf902 | 252 | #define TCCB_SAC_INTRG 0x1fff |
253 | 253 | ||
254 | /** | 254 | /** |
255 | * struct tccb_tcah - Transport-Command-Area Header (TCAH) | 255 | * struct tccb_tcah - Transport-Command-Area Header (TCAH) |
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h new file mode 100644 index 000000000000..5a5bc75e19d4 --- /dev/null +++ b/arch/s390/include/asm/ftrace.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASM_S390_FTRACE_H | ||
2 | #define _ASM_S390_FTRACE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | extern void _mcount(void); | ||
6 | #endif | ||
7 | |||
8 | #endif /* _ASM_S390_FTRACE_H */ | ||
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h index 34bb8916db4f..1420a1115948 100644 --- a/arch/s390/include/asm/isc.h +++ b/arch/s390/include/asm/isc.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define CHSC_SCH_ISC 7 /* CHSC subchannels */ | 17 | #define CHSC_SCH_ISC 7 /* CHSC subchannels */ |
18 | /* Adapter interrupts. */ | 18 | /* Adapter interrupts. */ |
19 | #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ | 19 | #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ |
20 | #define AP_ISC 6 /* adjunct processor (crypto) devices */ | ||
20 | 21 | ||
21 | /* Functions for registration of I/O interruption subclasses */ | 22 | /* Functions for registration of I/O interruption subclasses */ |
22 | void isc_register(unsigned int isc); | 23 | void isc_register(unsigned int isc); |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index d2b4ff831477..3b59216e6284 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -6,6 +6,7 @@ typedef struct { | |||
6 | struct list_head pgtable_list; | 6 | struct list_head pgtable_list; |
7 | unsigned long asce_bits; | 7 | unsigned long asce_bits; |
8 | unsigned long asce_limit; | 8 | unsigned long asce_limit; |
9 | unsigned long vdso_base; | ||
9 | int noexec; | 10 | int noexec; |
10 | int has_pgste; /* The mmu context has extended page tables */ | 11 | int has_pgste; /* The mmu context has extended page tables */ |
11 | int alloc_pgste; /* cloned contexts will have extended page tables */ | 12 | int alloc_pgste; /* cloned contexts will have extended page tables */ |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 991ba939408c..32e8f6aa4384 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order); | |||
152 | #include <asm-generic/memory_model.h> | 152 | #include <asm-generic/memory_model.h> |
153 | #include <asm-generic/page.h> | 153 | #include <asm-generic/page.h> |
154 | 154 | ||
155 | #define __HAVE_ARCH_GATE_AREA 1 | ||
156 | |||
155 | #endif /* _S390_PAGE_H */ | 157 | #endif /* _S390_PAGE_H */ |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f5b2bf3d7c1d..b2658b9220fe 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *); | |||
28 | 28 | ||
29 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | 29 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
30 | { | 30 | { |
31 | typedef struct { char _[n]; } addrtype; | ||
32 | |||
31 | *s = val; | 33 | *s = val; |
32 | n = (n / 256) - 1; | 34 | n = (n / 256) - 1; |
33 | asm volatile( | 35 | asm volatile( |
@@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | |||
39 | "0: mvc 256(256,%0),0(%0)\n" | 41 | "0: mvc 256(256,%0),0(%0)\n" |
40 | " la %0,256(%0)\n" | 42 | " la %0,256(%0)\n" |
41 | " brct %1,0b\n" | 43 | " brct %1,0b\n" |
42 | : "+a" (s), "+d" (n)); | 44 | : "+a" (s), "+d" (n), "=m" (*(addrtype *) s) |
45 | : "m" (*(addrtype *) s)); | ||
43 | } | 46 | } |
44 | 47 | ||
45 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) | 48 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 4af80af2a88f..066b99502e09 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifndef __ASM_S390_PROCESSOR_H | 13 | #ifndef __ASM_S390_PROCESSOR_H |
14 | #define __ASM_S390_PROCESSOR_H | 14 | #define __ASM_S390_PROCESSOR_H |
15 | 15 | ||
16 | #include <linux/linkage.h> | ||
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | 18 | ||
18 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
@@ -258,7 +259,7 @@ static inline void enabled_wait(void) | |||
258 | * Function to drop a processor into disabled wait state | 259 | * Function to drop a processor into disabled wait state |
259 | */ | 260 | */ |
260 | 261 | ||
261 | static inline void disabled_wait(unsigned long code) | 262 | static inline void ATTRIB_NORET disabled_wait(unsigned long code) |
262 | { | 263 | { |
263 | unsigned long ctl_buf; | 264 | unsigned long ctl_buf; |
264 | psw_t dw_psw; | 265 | psw_t dw_psw; |
@@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code) | |||
322 | : "=m" (ctl_buf) | 323 | : "=m" (ctl_buf) |
323 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); | 324 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); |
324 | #endif /* __s390x__ */ | 325 | #endif /* __s390x__ */ |
326 | while (1); | ||
325 | } | 327 | } |
326 | 328 | ||
327 | /* | 329 | /* |
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 4734c3f05354..27fc1746de15 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -373,16 +373,16 @@ struct qdio_initialize { | |||
373 | #define QDIO_FLAG_SYNC_OUTPUT 0x02 | 373 | #define QDIO_FLAG_SYNC_OUTPUT 0x02 |
374 | #define QDIO_FLAG_PCI_OUT 0x10 | 374 | #define QDIO_FLAG_PCI_OUT 0x10 |
375 | 375 | ||
376 | extern int qdio_initialize(struct qdio_initialize *init_data); | 376 | extern int qdio_initialize(struct qdio_initialize *); |
377 | extern int qdio_allocate(struct qdio_initialize *init_data); | 377 | extern int qdio_allocate(struct qdio_initialize *); |
378 | extern int qdio_establish(struct qdio_initialize *init_data); | 378 | extern int qdio_establish(struct qdio_initialize *); |
379 | extern int qdio_activate(struct ccw_device *); | 379 | extern int qdio_activate(struct ccw_device *); |
380 | 380 | ||
381 | extern int do_QDIO(struct ccw_device*, unsigned int flags, | 381 | extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, |
382 | int q_nr, int qidx, int count); | 382 | int q_nr, int bufnr, int count); |
383 | extern int qdio_cleanup(struct ccw_device*, int how); | 383 | extern int qdio_cleanup(struct ccw_device*, int); |
384 | extern int qdio_shutdown(struct ccw_device*, int how); | 384 | extern int qdio_shutdown(struct ccw_device*, int); |
385 | extern int qdio_free(struct ccw_device *); | 385 | extern int qdio_free(struct ccw_device *); |
386 | extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev); | 386 | extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); |
387 | 387 | ||
388 | #endif /* __QDIO_H__ */ | 388 | #endif /* __QDIO_H__ */ |
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index e16d56f8dfe1..ec403d4304f8 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h | |||
@@ -61,6 +61,7 @@ typedef enum | |||
61 | { | 61 | { |
62 | ec_schedule=0, | 62 | ec_schedule=0, |
63 | ec_call_function, | 63 | ec_call_function, |
64 | ec_call_function_single, | ||
64 | ec_bit_last | 65 | ec_bit_last |
65 | } ec_bit_sig; | 66 | } ec_bit_sig; |
66 | 67 | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ae89cf2478fc..024b91e06239 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu); | |||
91 | extern struct mutex smp_cpu_state_mutex; | 91 | extern struct mutex smp_cpu_state_mutex; |
92 | extern int smp_cpu_polarization[]; | 92 | extern int smp_cpu_polarization[]; |
93 | 93 | ||
94 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 94 | extern void arch_send_call_function_single_ipi(int cpu); |
95 | void *info, int wait); | 95 | extern void arch_send_call_function_ipi(cpumask_t mask); |
96 | |||
96 | #endif | 97 | #endif |
97 | 98 | ||
98 | #ifndef CONFIG_SMP | 99 | #ifndef CONFIG_SMP |
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 79d01343f8b0..ad93212d9e16 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h | |||
@@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) | |||
118 | return r0; | 118 | return r0; |
119 | } | 119 | } |
120 | 120 | ||
121 | /* | ||
122 | * Service level reporting interface. | ||
123 | */ | ||
124 | struct service_level { | ||
125 | struct list_head list; | ||
126 | void (*seq_print)(struct seq_file *, struct service_level *); | ||
127 | }; | ||
128 | |||
129 | int register_service_level(struct service_level *); | ||
130 | int unregister_service_level(struct service_level *); | ||
131 | |||
121 | #endif /* __ASM_S390_SYSINFO_H */ | 132 | #endif /* __ASM_S390_SYSINFO_H */ |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 819e7d99ca0c..024ef42ed6d7 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __ASM_SYSTEM_H | 12 | #define __ASM_SYSTEM_H |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | ||
15 | #include <asm/types.h> | 16 | #include <asm/types.h> |
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
@@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
98 | prev = __switch_to(prev,next); \ | 99 | prev = __switch_to(prev,next); \ |
99 | } while (0) | 100 | } while (0) |
100 | 101 | ||
101 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
102 | extern void account_vtime(struct task_struct *); | 102 | extern void account_vtime(struct task_struct *); |
103 | extern void account_tick_vtime(struct task_struct *); | 103 | extern void account_tick_vtime(struct task_struct *); |
104 | extern void account_system_vtime(struct task_struct *); | 104 | extern void account_system_vtime(struct task_struct *); |
105 | #else | ||
106 | #define account_vtime(x) do { /* empty */ } while (0) | ||
107 | #endif | ||
108 | 105 | ||
109 | #ifdef CONFIG_PFAULT | 106 | #ifdef CONFIG_PFAULT |
110 | extern void pfault_irq_init(void); | 107 | extern void pfault_irq_init(void); |
@@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask) | |||
413 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | 410 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
414 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | 411 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) |
415 | 412 | ||
416 | int stfle(unsigned long long *list, int doublewords); | ||
417 | |||
418 | #ifdef CONFIG_SMP | 413 | #ifdef CONFIG_SMP |
419 | 414 | ||
420 | extern void smp_ctl_set_bit(int cr, int bit); | 415 | extern void smp_ctl_set_bit(int cr, int bit); |
@@ -438,6 +433,23 @@ static inline unsigned int stfl(void) | |||
438 | return S390_lowcore.stfl_fac_list; | 433 | return S390_lowcore.stfl_fac_list; |
439 | } | 434 | } |
440 | 435 | ||
436 | static inline int __stfle(unsigned long long *list, int doublewords) | ||
437 | { | ||
438 | typedef struct { unsigned long long _[doublewords]; } addrtype; | ||
439 | register unsigned long __nr asm("0") = doublewords - 1; | ||
440 | |||
441 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | ||
442 | : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); | ||
443 | return __nr + 1; | ||
444 | } | ||
445 | |||
446 | static inline int stfle(unsigned long long *list, int doublewords) | ||
447 | { | ||
448 | if (!(stfl() & (1UL << 24))) | ||
449 | return -EOPNOTSUPP; | ||
450 | return __stfle(list, doublewords); | ||
451 | } | ||
452 | |||
441 | static inline unsigned short stap(void) | 453 | static inline unsigned short stap(void) |
442 | { | 454 | { |
443 | unsigned short cpu_address; | 455 | unsigned short cpu_address; |
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h index d98d79e35cd6..61705d60f995 100644 --- a/arch/s390/include/asm/timer.h +++ b/arch/s390/include/asm/timer.h | |||
@@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer); | |||
48 | extern void init_cpu_vtimer(void); | 48 | extern void init_cpu_vtimer(void); |
49 | extern void vtime_init(void); | 49 | extern void vtime_init(void); |
50 | 50 | ||
51 | #ifdef CONFIG_VIRT_TIMER | ||
52 | |||
53 | extern void vtime_start_cpu_timer(void); | 51 | extern void vtime_start_cpu_timer(void); |
54 | extern void vtime_stop_cpu_timer(void); | 52 | extern void vtime_stop_cpu_timer(void); |
55 | 53 | ||
56 | #else | ||
57 | |||
58 | static inline void vtime_start_cpu_timer(void) { } | ||
59 | static inline void vtime_stop_cpu_timer(void) { } | ||
60 | |||
61 | #endif /* CONFIG_VIRT_TIMER */ | ||
62 | |||
63 | #endif /* __KERNEL__ */ | 54 | #endif /* __KERNEL__ */ |
64 | 55 | ||
65 | #endif /* _ASM_S390_TIMER_H */ | 56 | #endif /* _ASM_S390_TIMER_H */ |
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h new file mode 100644 index 000000000000..a44f4fe16a35 --- /dev/null +++ b/arch/s390/include/asm/vdso.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __S390_VDSO_H__ | ||
2 | #define __S390_VDSO_H__ | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | /* Default link addresses for the vDSOs */ | ||
7 | #define VDSO32_LBASE 0 | ||
8 | #define VDSO64_LBASE 0 | ||
9 | |||
10 | #define VDSO_VERSION_STRING LINUX_2.6.26 | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | /* | ||
15 | * Note about this structure: | ||
16 | * | ||
17 | * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this | ||
18 | * structure is supposed to be known only to the function in the vdso | ||
19 | * itself and may change without notice. | ||
20 | */ | ||
21 | |||
22 | struct vdso_data { | ||
23 | __u64 tb_update_count; /* Timebase atomicity ctr 0x00 */ | ||
24 | __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ | ||
25 | __u64 xtime_clock_sec; /* Kernel time 0x10 */ | ||
26 | __u64 xtime_clock_nsec; /* 0x18 */ | ||
27 | __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ | ||
28 | __u64 wtom_clock_nsec; /* 0x28 */ | ||
29 | __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ | ||
30 | __u32 tz_dsttime; /* Type of dst correction 0x34 */ | ||
31 | }; | ||
32 | |||
33 | extern struct vdso_data *vdso_data; | ||
34 | |||
35 | #endif /* __ASSEMBLY__ */ | ||
36 | |||
37 | #endif /* __KERNEL__ */ | ||
38 | |||
39 | #endif /* __S390_VDSO_H__ */ | ||
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 50f657e77344..3edc6c6f258b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -2,6 +2,11 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | ifdef CONFIG_FUNCTION_TRACER | ||
6 | # Do not trace early boot code | ||
7 | CFLAGS_REMOVE_early.o = -pg | ||
8 | endif | ||
9 | |||
5 | # | 10 | # |
6 | # Passing null pointers is ok for smp code, since we access the lowcore here. | 11 | # Passing null pointers is ok for smp code, since we access the lowcore here. |
7 | # | 12 | # |
@@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull | |||
12 | # | 17 | # |
13 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 18 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
14 | 19 | ||
15 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ | 20 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ |
16 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 21 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
17 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o | 22 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ |
23 | vdso.o vtime.o | ||
18 | 24 | ||
19 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 25 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
20 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 26 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
@@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | |||
30 | compat_wrapper.o compat_exec_domain.o \ | 36 | compat_wrapper.o compat_exec_domain.o \ |
31 | $(compat-obj-y) | 37 | $(compat-obj-y) |
32 | 38 | ||
33 | obj-$(CONFIG_VIRT_TIMER) += vtime.o | ||
34 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 39 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
35 | obj-$(CONFIG_KPROBES) += kprobes.o | 40 | obj-$(CONFIG_KPROBES) += kprobes.o |
41 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o | ||
36 | 42 | ||
37 | # Kexec part | 43 | # Kexec part |
38 | S390_KEXEC_OBJS := machine_kexec.o crash.o | 44 | S390_KEXEC_OBJS := machine_kexec.o crash.o |
39 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) | 45 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) |
40 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) | 46 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) |
41 | 47 | ||
48 | # vdso | ||
49 | obj-$(CONFIG_64BIT) += vdso64/ | ||
50 | obj-$(CONFIG_32BIT) += vdso32/ | ||
51 | obj-$(CONFIG_COMPAT) += vdso32/ | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 3d144e6020c6..e641f60bac99 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/kbuild.h> | 8 | #include <linux/kbuild.h> |
9 | #include <asm/vdso.h> | ||
9 | 10 | ||
10 | int main(void) | 11 | int main(void) |
11 | { | 12 | { |
@@ -38,5 +39,19 @@ int main(void) | |||
38 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); | 39 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
39 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); | 40 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); |
40 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); | 41 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); |
42 | BLANK(); | ||
43 | /* timeval/timezone offsets for use by vdso */ | ||
44 | DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count)); | ||
45 | DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); | ||
46 | DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); | ||
47 | DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); | ||
48 | DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); | ||
49 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); | ||
50 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); | ||
51 | /* constants used by the vdso */ | ||
52 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); | ||
53 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); | ||
54 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); | ||
55 | |||
41 | return 0; | 56 | return 0; |
42 | } | 57 | } |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d8c1131e0815..3e8b8816f309 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Christian Borntraeger (cborntra@de.ibm.com), | 7 | * Christian Borntraeger (cborntra@de.ibm.com), |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "cpcmd" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
@@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
104 | (((unsigned long)response + rlen) >> 31)) { | 107 | (((unsigned long)response + rlen) >> 31)) { |
105 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); | 108 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); |
106 | if (!lowbuf) { | 109 | if (!lowbuf) { |
107 | printk(KERN_WARNING | 110 | pr_warning("The cpcmd kernel function failed to " |
108 | "cpcmd: could not allocate response buffer\n"); | 111 | "allocate a response buffer\n"); |
109 | return -ENOMEM; | 112 | return -ENOMEM; |
110 | } | 113 | } |
111 | spin_lock_irqsave(&cpcmd_lock, flags); | 114 | spin_lock_irqsave(&cpcmd_lock, flags); |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index d80fcd4a7fe1..ba03fc0a3a56 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -10,6 +10,9 @@ | |||
10 | * Bugreports to: <Linux390@de.ibm.com> | 10 | * Bugreports to: <Linux390@de.ibm.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define KMSG_COMPONENT "s390dbf" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/stddef.h> | 16 | #include <linux/stddef.h> |
14 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
@@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode) | |||
388 | debug_info_free(rc); | 391 | debug_info_free(rc); |
389 | } while (1); | 392 | } while (1); |
390 | 393 | ||
391 | if(!rc || (mode == NO_AREAS)) | 394 | if (mode == NO_AREAS) |
392 | goto out; | 395 | goto out; |
393 | 396 | ||
394 | for(i = 0; i < in->nr_areas; i++){ | 397 | for(i = 0; i < in->nr_areas; i++){ |
@@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, | |||
693 | /* Since debugfs currently does not support uid/gid other than root, */ | 696 | /* Since debugfs currently does not support uid/gid other than root, */ |
694 | /* we do not allow gid/uid != 0 until we get support for that. */ | 697 | /* we do not allow gid/uid != 0 until we get support for that. */ |
695 | if ((uid != 0) || (gid != 0)) | 698 | if ((uid != 0) || (gid != 0)) |
696 | printk(KERN_WARNING "debug: Warning - Currently only uid/gid " | 699 | pr_warning("Root becomes the owner of all s390dbf files " |
697 | "= 0 are supported. Using root as owner now!"); | 700 | "in sysfs\n"); |
698 | if (!initialized) | 701 | if (!initialized) |
699 | BUG(); | 702 | BUG(); |
700 | mutex_lock(&debug_mutex); | 703 | mutex_lock(&debug_mutex); |
@@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, | |||
709 | debug_register_view(rc, &debug_pages_view); | 712 | debug_register_view(rc, &debug_pages_view); |
710 | out: | 713 | out: |
711 | if (!rc){ | 714 | if (!rc){ |
712 | printk(KERN_ERR "debug: debug_register failed for %s\n",name); | 715 | pr_err("Registering debug feature %s failed\n", name); |
713 | } | 716 | } |
714 | mutex_unlock(&debug_mutex); | 717 | mutex_unlock(&debug_mutex); |
715 | return rc; | 718 | return rc; |
@@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) | |||
763 | if(pages_per_area > 0){ | 766 | if(pages_per_area > 0){ |
764 | new_areas = debug_areas_alloc(pages_per_area, nr_areas); | 767 | new_areas = debug_areas_alloc(pages_per_area, nr_areas); |
765 | if(!new_areas) { | 768 | if(!new_areas) { |
766 | printk(KERN_WARNING "debug: could not allocate memory "\ | 769 | pr_info("Allocating memory for %i pages failed\n", |
767 | "for pagenumber: %i\n",pages_per_area); | 770 | pages_per_area); |
768 | rc = -ENOMEM; | 771 | rc = -ENOMEM; |
769 | goto out; | 772 | goto out; |
770 | } | 773 | } |
@@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) | |||
780 | memset(id->active_entries,0,sizeof(int)*id->nr_areas); | 783 | memset(id->active_entries,0,sizeof(int)*id->nr_areas); |
781 | memset(id->active_pages, 0, sizeof(int)*id->nr_areas); | 784 | memset(id->active_pages, 0, sizeof(int)*id->nr_areas); |
782 | spin_unlock_irqrestore(&id->lock,flags); | 785 | spin_unlock_irqrestore(&id->lock,flags); |
783 | printk(KERN_INFO "debug: %s: set new size (%i pages)\n"\ | 786 | pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area); |
784 | ,id->name, pages_per_area); | ||
785 | out: | 787 | out: |
786 | return rc; | 788 | return rc; |
787 | } | 789 | } |
@@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level) | |||
800 | spin_lock_irqsave(&id->lock,flags); | 802 | spin_lock_irqsave(&id->lock,flags); |
801 | if(new_level == DEBUG_OFF_LEVEL){ | 803 | if(new_level == DEBUG_OFF_LEVEL){ |
802 | id->level = DEBUG_OFF_LEVEL; | 804 | id->level = DEBUG_OFF_LEVEL; |
803 | printk(KERN_INFO "debug: %s: switched off\n",id->name); | 805 | pr_info("%s: switched off\n",id->name); |
804 | } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { | 806 | } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { |
805 | printk(KERN_INFO | 807 | pr_info("%s: level %i is out of range (%i - %i)\n", |
806 | "debug: %s: level %i is out of range (%i - %i)\n", | ||
807 | id->name, new_level, 0, DEBUG_MAX_LEVEL); | 808 | id->name, new_level, 0, DEBUG_MAX_LEVEL); |
808 | } else { | 809 | } else { |
809 | id->level = new_level; | 810 | id->level = new_level; |
@@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1108 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, | 1109 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, |
1109 | id , &debug_file_ops); | 1110 | id , &debug_file_ops); |
1110 | if (!pde){ | 1111 | if (!pde){ |
1111 | printk(KERN_WARNING "debug: debugfs_create_file() failed!"\ | 1112 | pr_err("Registering view %s/%s failed due to out of " |
1112 | " Cannot register view %s/%s\n", id->name,view->name); | 1113 | "memory\n", id->name,view->name); |
1113 | rc = -1; | 1114 | rc = -1; |
1114 | goto out; | 1115 | goto out; |
1115 | } | 1116 | } |
@@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1119 | break; | 1120 | break; |
1120 | } | 1121 | } |
1121 | if (i == DEBUG_MAX_VIEWS) { | 1122 | if (i == DEBUG_MAX_VIEWS) { |
1122 | printk(KERN_WARNING "debug: cannot register view %s/%s\n", | 1123 | pr_err("Registering view %s/%s would exceed the maximum " |
1123 | id->name,view->name); | 1124 | "number of views %i\n", id->name, view->name, i); |
1124 | printk(KERN_WARNING | ||
1125 | "debug: maximum number of views reached (%i)!\n", i); | ||
1126 | debugfs_remove(pde); | 1125 | debugfs_remove(pde); |
1127 | rc = -1; | 1126 | rc = -1; |
1128 | } else { | 1127 | } else { |
@@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view, | |||
1303 | new_level = debug_get_uint(str); | 1302 | new_level = debug_get_uint(str); |
1304 | } | 1303 | } |
1305 | if(new_level < 0) { | 1304 | if(new_level < 0) { |
1306 | printk(KERN_INFO "debug: level `%s` is not valid\n", str); | 1305 | pr_warning("%s is not a valid level for a debug " |
1306 | "feature\n", str); | ||
1307 | rc = -EINVAL; | 1307 | rc = -EINVAL; |
1308 | } else { | 1308 | } else { |
1309 | debug_set_level(id, new_level); | 1309 | debug_set_level(id, new_level); |
@@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view, | |||
1380 | goto out; | 1380 | goto out; |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]); | 1383 | pr_info("Flushing debug data failed because %c is not a valid " |
1384 | "area\n", input_buf[0]); | ||
1384 | 1385 | ||
1385 | out: | 1386 | out: |
1386 | *offset += user_len; | 1387 | *offset += user_len; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 198ea18a534d..55de521aef77 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
109 | * R15 - kernel stack pointer | 109 | * R15 - kernel stack pointer |
110 | */ | 110 | */ |
111 | 111 | ||
112 | .macro STORE_TIMER lc_offset | ||
113 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
114 | stpt \lc_offset | ||
115 | #endif | ||
116 | .endm | ||
117 | |||
118 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
119 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | 112 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
120 | lm %r10,%r11,\lc_from | 113 | lm %r10,%r11,\lc_from |
121 | sl %r10,\lc_to | 114 | sl %r10,\lc_to |
@@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
128 | al %r10,BASED(.Lc_1) | 121 | al %r10,BASED(.Lc_1) |
129 | 1: stm %r10,%r11,\lc_sum | 122 | 1: stm %r10,%r11,\lc_sum |
130 | .endm | 123 | .endm |
131 | #endif | ||
132 | 124 | ||
133 | .macro SAVE_ALL_BASE savearea | 125 | .macro SAVE_ALL_BASE savearea |
134 | stm %r12,%r15,\savearea | 126 | stm %r12,%r15,\savearea |
@@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
198 | ni \psworg+1,0xfd # clear wait state bit | 190 | ni \psworg+1,0xfd # clear wait state bit |
199 | .endif | 191 | .endif |
200 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | 192 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user |
201 | STORE_TIMER __LC_EXIT_TIMER | 193 | stpt __LC_EXIT_TIMER |
202 | lpsw \psworg # back to caller | 194 | lpsw \psworg # back to caller |
203 | .endm | 195 | .endm |
204 | 196 | ||
@@ -247,20 +239,18 @@ __critical_start: | |||
247 | 239 | ||
248 | .globl system_call | 240 | .globl system_call |
249 | system_call: | 241 | system_call: |
250 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 242 | stpt __LC_SYNC_ENTER_TIMER |
251 | sysc_saveall: | 243 | sysc_saveall: |
252 | SAVE_ALL_BASE __LC_SAVE_AREA | 244 | SAVE_ALL_BASE __LC_SAVE_AREA |
253 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 245 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
254 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 246 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
255 | lh %r7,0x8a # get svc number from lowcore | 247 | lh %r7,0x8a # get svc number from lowcore |
256 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
257 | sysc_vtime: | 248 | sysc_vtime: |
258 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 249 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
259 | sysc_stime: | 250 | sysc_stime: |
260 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 251 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
261 | sysc_update: | 252 | sysc_update: |
262 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 253 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
263 | #endif | ||
264 | sysc_do_svc: | 254 | sysc_do_svc: |
265 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 255 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
266 | ltr %r7,%r7 # test for svc 0 | 256 | ltr %r7,%r7 # test for svc 0 |
@@ -436,7 +426,7 @@ ret_from_fork: | |||
436 | basr %r14,%r1 | 426 | basr %r14,%r1 |
437 | TRACE_IRQS_ON | 427 | TRACE_IRQS_ON |
438 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 428 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
439 | b BASED(sysc_return) | 429 | b BASED(sysc_tracenogo) |
440 | 430 | ||
441 | # | 431 | # |
442 | # kernel_execve function needs to deal with pt_regs that is not | 432 | # kernel_execve function needs to deal with pt_regs that is not |
@@ -490,20 +480,18 @@ pgm_check_handler: | |||
490 | * we just ignore the PER event (FIXME: is there anything we have to do | 480 | * we just ignore the PER event (FIXME: is there anything we have to do |
491 | * for LPSW?). | 481 | * for LPSW?). |
492 | */ | 482 | */ |
493 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 483 | stpt __LC_SYNC_ENTER_TIMER |
494 | SAVE_ALL_BASE __LC_SAVE_AREA | 484 | SAVE_ALL_BASE __LC_SAVE_AREA |
495 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 485 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
496 | bnz BASED(pgm_per) # got per exception -> special case | 486 | bnz BASED(pgm_per) # got per exception -> special case |
497 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 487 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
498 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 488 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
499 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
500 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 489 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
501 | bz BASED(pgm_no_vtime) | 490 | bz BASED(pgm_no_vtime) |
502 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 491 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
503 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 492 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
504 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 493 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
505 | pgm_no_vtime: | 494 | pgm_no_vtime: |
506 | #endif | ||
507 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 495 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
508 | TRACE_IRQS_OFF | 496 | TRACE_IRQS_OFF |
509 | l %r3,__LC_PGM_ILC # load program interruption code | 497 | l %r3,__LC_PGM_ILC # load program interruption code |
@@ -536,14 +524,12 @@ pgm_per: | |||
536 | pgm_per_std: | 524 | pgm_per_std: |
537 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 525 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
538 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 526 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
539 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
540 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 527 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
541 | bz BASED(pgm_no_vtime2) | 528 | bz BASED(pgm_no_vtime2) |
542 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 529 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
543 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 530 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
544 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 531 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
545 | pgm_no_vtime2: | 532 | pgm_no_vtime2: |
546 | #endif | ||
547 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 533 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
548 | TRACE_IRQS_OFF | 534 | TRACE_IRQS_OFF |
549 | l %r1,__TI_task(%r9) | 535 | l %r1,__TI_task(%r9) |
@@ -565,11 +551,9 @@ pgm_no_vtime2: | |||
565 | pgm_svcper: | 551 | pgm_svcper: |
566 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 552 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
567 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 553 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
568 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
569 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 554 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
570 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 555 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
571 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 556 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
572 | #endif | ||
573 | lh %r7,0x8a # get svc number from lowcore | 557 | lh %r7,0x8a # get svc number from lowcore |
574 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 558 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
575 | TRACE_IRQS_OFF | 559 | TRACE_IRQS_OFF |
@@ -599,19 +583,17 @@ kernel_per: | |||
599 | 583 | ||
600 | .globl io_int_handler | 584 | .globl io_int_handler |
601 | io_int_handler: | 585 | io_int_handler: |
602 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 586 | stpt __LC_ASYNC_ENTER_TIMER |
603 | stck __LC_INT_CLOCK | 587 | stck __LC_INT_CLOCK |
604 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | 588 | SAVE_ALL_BASE __LC_SAVE_AREA+16 |
605 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 589 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
606 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 590 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
607 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
608 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 591 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
609 | bz BASED(io_no_vtime) | 592 | bz BASED(io_no_vtime) |
610 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 593 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
611 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 594 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
612 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 595 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
613 | io_no_vtime: | 596 | io_no_vtime: |
614 | #endif | ||
615 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 597 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
616 | TRACE_IRQS_OFF | 598 | TRACE_IRQS_OFF |
617 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ | 599 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ |
@@ -741,19 +723,17 @@ io_notify_resume: | |||
741 | 723 | ||
742 | .globl ext_int_handler | 724 | .globl ext_int_handler |
743 | ext_int_handler: | 725 | ext_int_handler: |
744 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 726 | stpt __LC_ASYNC_ENTER_TIMER |
745 | stck __LC_INT_CLOCK | 727 | stck __LC_INT_CLOCK |
746 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | 728 | SAVE_ALL_BASE __LC_SAVE_AREA+16 |
747 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 729 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
748 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 730 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
749 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
750 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 731 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
751 | bz BASED(ext_no_vtime) | 732 | bz BASED(ext_no_vtime) |
752 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 733 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
753 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 734 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
754 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 735 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
755 | ext_no_vtime: | 736 | ext_no_vtime: |
756 | #endif | ||
757 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 737 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
758 | TRACE_IRQS_OFF | 738 | TRACE_IRQS_OFF |
759 | la %r2,SP_PTREGS(%r15) # address of register-save area | 739 | la %r2,SP_PTREGS(%r15) # address of register-save area |
@@ -776,7 +756,6 @@ mcck_int_handler: | |||
776 | la %r12,__LC_MCK_OLD_PSW | 756 | la %r12,__LC_MCK_OLD_PSW |
777 | tm __LC_MCCK_CODE,0x80 # system damage? | 757 | tm __LC_MCCK_CODE,0x80 # system damage? |
778 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid | 758 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid |
779 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
780 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER | 759 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER |
781 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | 760 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA |
782 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 761 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
@@ -793,9 +772,7 @@ mcck_int_handler: | |||
793 | la %r14,__LC_LAST_UPDATE_TIMER | 772 | la %r14,__LC_LAST_UPDATE_TIMER |
794 | 0: spt 0(%r14) | 773 | 0: spt 0(%r14) |
795 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | 774 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) |
796 | 1: | 775 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
797 | #endif | ||
798 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | ||
799 | bno BASED(mcck_int_main) # no -> skip cleanup critical | 776 | bno BASED(mcck_int_main) # no -> skip cleanup critical |
800 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 777 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
801 | bnz BASED(mcck_int_main) # from user -> load async stack | 778 | bnz BASED(mcck_int_main) # from user -> load async stack |
@@ -812,7 +789,6 @@ mcck_int_main: | |||
812 | be BASED(0f) | 789 | be BASED(0f) |
813 | l %r15,__LC_PANIC_STACK # load panic stack | 790 | l %r15,__LC_PANIC_STACK # load panic stack |
814 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 | 791 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 |
815 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
816 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 792 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
817 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical | 793 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical |
818 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 794 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -821,7 +797,6 @@ mcck_int_main: | |||
821 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 797 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
822 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 798 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
823 | mcck_no_vtime: | 799 | mcck_no_vtime: |
824 | #endif | ||
825 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 800 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
826 | la %r2,SP_PTREGS(%r15) # load pt_regs | 801 | la %r2,SP_PTREGS(%r15) # load pt_regs |
827 | l %r1,BASED(.Ls390_mcck) | 802 | l %r1,BASED(.Ls390_mcck) |
@@ -843,16 +818,13 @@ mcck_no_vtime: | |||
843 | mcck_return: | 818 | mcck_return: |
844 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW | 819 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW |
845 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 820 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
846 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
847 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 | 821 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 |
848 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 822 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
849 | bno BASED(0f) | 823 | bno BASED(0f) |
850 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 824 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
851 | stpt __LC_EXIT_TIMER | 825 | stpt __LC_EXIT_TIMER |
852 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 826 | lpsw __LC_RETURN_MCCK_PSW # back to caller |
853 | 0: | 827 | 0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
854 | #endif | ||
855 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | ||
856 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 828 | lpsw __LC_RETURN_MCCK_PSW # back to caller |
857 | 829 | ||
858 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 | 830 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 |
@@ -976,13 +948,11 @@ cleanup_system_call: | |||
976 | b BASED(1f) | 948 | b BASED(1f) |
977 | 0: la %r12,__LC_SAVE_AREA+32 | 949 | 0: la %r12,__LC_SAVE_AREA+32 |
978 | 1: | 950 | 1: |
979 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
980 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | 951 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) |
981 | bh BASED(0f) | 952 | bh BASED(0f) |
982 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 953 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
983 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | 954 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) |
984 | bhe BASED(cleanup_vtime) | 955 | bhe BASED(cleanup_vtime) |
985 | #endif | ||
986 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | 956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) |
987 | bh BASED(0f) | 957 | bh BASED(0f) |
988 | mvc __LC_SAVE_AREA(16),0(%r12) | 958 | mvc __LC_SAVE_AREA(16),0(%r12) |
@@ -993,7 +963,6 @@ cleanup_system_call: | |||
993 | l %r12,__LC_SAVE_AREA+48 # argh | 963 | l %r12,__LC_SAVE_AREA+48 # argh |
994 | st %r15,12(%r12) | 964 | st %r15,12(%r12) |
995 | lh %r7,0x8a | 965 | lh %r7,0x8a |
996 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
997 | cleanup_vtime: | 966 | cleanup_vtime: |
998 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 967 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) |
999 | bhe BASED(cleanup_stime) | 968 | bhe BASED(cleanup_stime) |
@@ -1004,18 +973,15 @@ cleanup_stime: | |||
1004 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 973 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
1005 | cleanup_update: | 974 | cleanup_update: |
1006 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 975 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
1007 | #endif | ||
1008 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) | 976 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) |
1009 | la %r12,__LC_RETURN_PSW | 977 | la %r12,__LC_RETURN_PSW |
1010 | br %r14 | 978 | br %r14 |
1011 | cleanup_system_call_insn: | 979 | cleanup_system_call_insn: |
1012 | .long sysc_saveall + 0x80000000 | 980 | .long sysc_saveall + 0x80000000 |
1013 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1014 | .long system_call + 0x80000000 | 981 | .long system_call + 0x80000000 |
1015 | .long sysc_vtime + 0x80000000 | 982 | .long sysc_vtime + 0x80000000 |
1016 | .long sysc_stime + 0x80000000 | 983 | .long sysc_stime + 0x80000000 |
1017 | .long sysc_update + 0x80000000 | 984 | .long sysc_update + 0x80000000 |
1018 | #endif | ||
1019 | 985 | ||
1020 | cleanup_sysc_return: | 986 | cleanup_sysc_return: |
1021 | mvc __LC_RETURN_PSW(4),0(%r12) | 987 | mvc __LC_RETURN_PSW(4),0(%r12) |
@@ -1026,11 +992,9 @@ cleanup_sysc_return: | |||
1026 | cleanup_sysc_leave: | 992 | cleanup_sysc_leave: |
1027 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) | 993 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) |
1028 | be BASED(2f) | 994 | be BASED(2f) |
1029 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1030 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 995 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1031 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) | 996 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) |
1032 | be BASED(2f) | 997 | be BASED(2f) |
1033 | #endif | ||
1034 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 998 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) |
1035 | c %r12,BASED(.Lmck_old_psw) | 999 | c %r12,BASED(.Lmck_old_psw) |
1036 | bne BASED(0f) | 1000 | bne BASED(0f) |
@@ -1043,9 +1007,7 @@ cleanup_sysc_leave: | |||
1043 | br %r14 | 1007 | br %r14 |
1044 | cleanup_sysc_leave_insn: | 1008 | cleanup_sysc_leave_insn: |
1045 | .long sysc_done - 4 + 0x80000000 | 1009 | .long sysc_done - 4 + 0x80000000 |
1046 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1047 | .long sysc_done - 8 + 0x80000000 | 1010 | .long sysc_done - 8 + 0x80000000 |
1048 | #endif | ||
1049 | 1011 | ||
1050 | cleanup_io_return: | 1012 | cleanup_io_return: |
1051 | mvc __LC_RETURN_PSW(4),0(%r12) | 1013 | mvc __LC_RETURN_PSW(4),0(%r12) |
@@ -1056,11 +1018,9 @@ cleanup_io_return: | |||
1056 | cleanup_io_leave: | 1018 | cleanup_io_leave: |
1057 | clc 4(4,%r12),BASED(cleanup_io_leave_insn) | 1019 | clc 4(4,%r12),BASED(cleanup_io_leave_insn) |
1058 | be BASED(2f) | 1020 | be BASED(2f) |
1059 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1060 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 1021 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1061 | clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) | 1022 | clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) |
1062 | be BASED(2f) | 1023 | be BASED(2f) |
1063 | #endif | ||
1064 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 1024 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) |
1065 | c %r12,BASED(.Lmck_old_psw) | 1025 | c %r12,BASED(.Lmck_old_psw) |
1066 | bne BASED(0f) | 1026 | bne BASED(0f) |
@@ -1073,9 +1033,7 @@ cleanup_io_leave: | |||
1073 | br %r14 | 1033 | br %r14 |
1074 | cleanup_io_leave_insn: | 1034 | cleanup_io_leave_insn: |
1075 | .long io_done - 4 + 0x80000000 | 1035 | .long io_done - 4 + 0x80000000 |
1076 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1077 | .long io_done - 8 + 0x80000000 | 1036 | .long io_done - 8 + 0x80000000 |
1078 | #endif | ||
1079 | 1037 | ||
1080 | /* | 1038 | /* |
1081 | * Integer constants | 1039 | * Integer constants |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 89c121ae6339..16bb4fd1a403 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
96 | #define LOCKDEP_SYS_EXIT | 96 | #define LOCKDEP_SYS_EXIT |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | .macro STORE_TIMER lc_offset | ||
100 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
101 | stpt \lc_offset | ||
102 | #endif | ||
103 | .endm | ||
104 | |||
105 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
106 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | 99 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
107 | lg %r10,\lc_from | 100 | lg %r10,\lc_from |
108 | slg %r10,\lc_to | 101 | slg %r10,\lc_to |
109 | alg %r10,\lc_sum | 102 | alg %r10,\lc_sum |
110 | stg %r10,\lc_sum | 103 | stg %r10,\lc_sum |
111 | .endm | 104 | .endm |
112 | #endif | ||
113 | 105 | ||
114 | /* | 106 | /* |
115 | * Register usage in interrupt handlers: | 107 | * Register usage in interrupt handlers: |
@@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
186 | ni \psworg+1,0xfd # clear wait state bit | 178 | ni \psworg+1,0xfd # clear wait state bit |
187 | .endif | 179 | .endif |
188 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | 180 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user |
189 | STORE_TIMER __LC_EXIT_TIMER | 181 | stpt __LC_EXIT_TIMER |
190 | lpswe \psworg # back to caller | 182 | lpswe \psworg # back to caller |
191 | .endm | 183 | .endm |
192 | 184 | ||
@@ -233,20 +225,18 @@ __critical_start: | |||
233 | 225 | ||
234 | .globl system_call | 226 | .globl system_call |
235 | system_call: | 227 | system_call: |
236 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 228 | stpt __LC_SYNC_ENTER_TIMER |
237 | sysc_saveall: | 229 | sysc_saveall: |
238 | SAVE_ALL_BASE __LC_SAVE_AREA | 230 | SAVE_ALL_BASE __LC_SAVE_AREA |
239 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 231 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
240 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 232 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
241 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 233 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
242 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
243 | sysc_vtime: | 234 | sysc_vtime: |
244 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
245 | sysc_stime: | 236 | sysc_stime: |
246 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 237 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
247 | sysc_update: | 238 | sysc_update: |
248 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
249 | #endif | ||
250 | sysc_do_svc: | 240 | sysc_do_svc: |
251 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 241 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
252 | ltgr %r7,%r7 # test for svc 0 | 242 | ltgr %r7,%r7 # test for svc 0 |
@@ -417,7 +407,7 @@ ret_from_fork: | |||
417 | 0: brasl %r14,schedule_tail | 407 | 0: brasl %r14,schedule_tail |
418 | TRACE_IRQS_ON | 408 | TRACE_IRQS_ON |
419 | stosm 24(%r15),0x03 # reenable interrupts | 409 | stosm 24(%r15),0x03 # reenable interrupts |
420 | j sysc_return | 410 | j sysc_tracenogo |
421 | 411 | ||
422 | # | 412 | # |
423 | # kernel_execve function needs to deal with pt_regs that is not | 413 | # kernel_execve function needs to deal with pt_regs that is not |
@@ -469,20 +459,18 @@ pgm_check_handler: | |||
469 | * we just ignore the PER event (FIXME: is there anything we have to do | 459 | * we just ignore the PER event (FIXME: is there anything we have to do |
470 | * for LPSW?). | 460 | * for LPSW?). |
471 | */ | 461 | */ |
472 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 462 | stpt __LC_SYNC_ENTER_TIMER |
473 | SAVE_ALL_BASE __LC_SAVE_AREA | 463 | SAVE_ALL_BASE __LC_SAVE_AREA |
474 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 464 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
475 | jnz pgm_per # got per exception -> special case | 465 | jnz pgm_per # got per exception -> special case |
476 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 466 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
477 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 467 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
478 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
479 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 468 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
480 | jz pgm_no_vtime | 469 | jz pgm_no_vtime |
481 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 470 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
482 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 471 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
483 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 472 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
484 | pgm_no_vtime: | 473 | pgm_no_vtime: |
485 | #endif | ||
486 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 474 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
487 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | 475 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK |
488 | TRACE_IRQS_OFF | 476 | TRACE_IRQS_OFF |
@@ -516,14 +504,12 @@ pgm_per: | |||
516 | pgm_per_std: | 504 | pgm_per_std: |
517 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 505 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
518 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 506 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
519 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
520 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 507 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
521 | jz pgm_no_vtime2 | 508 | jz pgm_no_vtime2 |
522 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 509 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
523 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 510 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
524 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 511 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
525 | pgm_no_vtime2: | 512 | pgm_no_vtime2: |
526 | #endif | ||
527 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 513 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
528 | TRACE_IRQS_OFF | 514 | TRACE_IRQS_OFF |
529 | lg %r1,__TI_task(%r9) | 515 | lg %r1,__TI_task(%r9) |
@@ -545,11 +531,9 @@ pgm_no_vtime2: | |||
545 | pgm_svcper: | 531 | pgm_svcper: |
546 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 532 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
547 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 533 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
548 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
549 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 534 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
550 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 535 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
551 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 536 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
552 | #endif | ||
553 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 537 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
554 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 538 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
555 | lg %r1,__TI_task(%r9) | 539 | lg %r1,__TI_task(%r9) |
@@ -575,19 +559,17 @@ kernel_per: | |||
575 | */ | 559 | */ |
576 | .globl io_int_handler | 560 | .globl io_int_handler |
577 | io_int_handler: | 561 | io_int_handler: |
578 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 562 | stpt __LC_ASYNC_ENTER_TIMER |
579 | stck __LC_INT_CLOCK | 563 | stck __LC_INT_CLOCK |
580 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 564 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
581 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 565 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
582 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 566 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
583 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
584 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 567 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
585 | jz io_no_vtime | 568 | jz io_no_vtime |
586 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 569 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
587 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 570 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
588 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 571 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
589 | io_no_vtime: | 572 | io_no_vtime: |
590 | #endif | ||
591 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 573 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
592 | TRACE_IRQS_OFF | 574 | TRACE_IRQS_OFF |
593 | la %r2,SP_PTREGS(%r15) # address of register-save area | 575 | la %r2,SP_PTREGS(%r15) # address of register-save area |
@@ -739,19 +721,17 @@ io_notify_resume: | |||
739 | */ | 721 | */ |
740 | .globl ext_int_handler | 722 | .globl ext_int_handler |
741 | ext_int_handler: | 723 | ext_int_handler: |
742 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 724 | stpt __LC_ASYNC_ENTER_TIMER |
743 | stck __LC_INT_CLOCK | 725 | stck __LC_INT_CLOCK |
744 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 726 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
745 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 727 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
746 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 728 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
747 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
748 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 729 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
749 | jz ext_no_vtime | 730 | jz ext_no_vtime |
750 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 731 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
751 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 732 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
752 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 733 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
753 | ext_no_vtime: | 734 | ext_no_vtime: |
754 | #endif | ||
755 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 735 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
756 | TRACE_IRQS_OFF | 736 | TRACE_IRQS_OFF |
757 | la %r2,SP_PTREGS(%r15) # address of register-save area | 737 | la %r2,SP_PTREGS(%r15) # address of register-save area |
@@ -773,7 +753,6 @@ mcck_int_handler: | |||
773 | la %r12,__LC_MCK_OLD_PSW | 753 | la %r12,__LC_MCK_OLD_PSW |
774 | tm __LC_MCCK_CODE,0x80 # system damage? | 754 | tm __LC_MCCK_CODE,0x80 # system damage? |
775 | jo mcck_int_main # yes -> rest of mcck code invalid | 755 | jo mcck_int_main # yes -> rest of mcck code invalid |
776 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
777 | la %r14,4095 | 756 | la %r14,4095 |
778 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER | 757 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER |
779 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) | 758 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) |
@@ -791,9 +770,7 @@ mcck_int_handler: | |||
791 | la %r14,__LC_LAST_UPDATE_TIMER | 770 | la %r14,__LC_LAST_UPDATE_TIMER |
792 | 0: spt 0(%r14) | 771 | 0: spt 0(%r14) |
793 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | 772 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) |
794 | 1: | 773 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
795 | #endif | ||
796 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | ||
797 | jno mcck_int_main # no -> skip cleanup critical | 774 | jno mcck_int_main # no -> skip cleanup critical |
798 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 775 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
799 | jnz mcck_int_main # from user -> load kernel stack | 776 | jnz mcck_int_main # from user -> load kernel stack |
@@ -809,7 +786,6 @@ mcck_int_main: | |||
809 | jz 0f | 786 | jz 0f |
810 | lg %r15,__LC_PANIC_STACK # load panic stack | 787 | lg %r15,__LC_PANIC_STACK # load panic stack |
811 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 | 788 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 |
812 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
813 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 789 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
814 | jno mcck_no_vtime # no -> no timer update | 790 | jno mcck_no_vtime # no -> no timer update |
815 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 791 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -818,7 +794,6 @@ mcck_int_main: | |||
818 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 794 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
819 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 795 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
820 | mcck_no_vtime: | 796 | mcck_no_vtime: |
821 | #endif | ||
822 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 797 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
823 | la %r2,SP_PTREGS(%r15) # load pt_regs | 798 | la %r2,SP_PTREGS(%r15) # load pt_regs |
824 | brasl %r14,s390_do_machine_check | 799 | brasl %r14,s390_do_machine_check |
@@ -839,14 +814,11 @@ mcck_return: | |||
839 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW | 814 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW |
840 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 815 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
841 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 816 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
842 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
843 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 | 817 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 |
844 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 818 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
845 | jno 0f | 819 | jno 0f |
846 | stpt __LC_EXIT_TIMER | 820 | stpt __LC_EXIT_TIMER |
847 | 0: | 821 | 0: lpswe __LC_RETURN_MCCK_PSW # back to caller |
848 | #endif | ||
849 | lpswe __LC_RETURN_MCCK_PSW # back to caller | ||
850 | 822 | ||
851 | /* | 823 | /* |
852 | * Restart interruption handler, kick starter for additional CPUs | 824 | * Restart interruption handler, kick starter for additional CPUs |
@@ -964,13 +936,11 @@ cleanup_system_call: | |||
964 | j 1f | 936 | j 1f |
965 | 0: la %r12,__LC_SAVE_AREA+64 | 937 | 0: la %r12,__LC_SAVE_AREA+64 |
966 | 1: | 938 | 1: |
967 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
968 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | 939 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) |
969 | jh 0f | 940 | jh 0f |
970 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 941 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
971 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | 942 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) |
972 | jhe cleanup_vtime | 943 | jhe cleanup_vtime |
973 | #endif | ||
974 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | 944 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) |
975 | jh 0f | 945 | jh 0f |
976 | mvc __LC_SAVE_AREA(32),0(%r12) | 946 | mvc __LC_SAVE_AREA(32),0(%r12) |
@@ -981,7 +951,6 @@ cleanup_system_call: | |||
981 | lg %r12,__LC_SAVE_AREA+96 # argh | 951 | lg %r12,__LC_SAVE_AREA+96 # argh |
982 | stg %r15,24(%r12) | 952 | stg %r15,24(%r12) |
983 | llgh %r7,__LC_SVC_INT_CODE | 953 | llgh %r7,__LC_SVC_INT_CODE |
984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
985 | cleanup_vtime: | 954 | cleanup_vtime: |
986 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 955 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
987 | jhe cleanup_stime | 956 | jhe cleanup_stime |
@@ -992,18 +961,15 @@ cleanup_stime: | |||
992 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 961 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
993 | cleanup_update: | 962 | cleanup_update: |
994 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 963 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
995 | #endif | ||
996 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | 964 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) |
997 | la %r12,__LC_RETURN_PSW | 965 | la %r12,__LC_RETURN_PSW |
998 | br %r14 | 966 | br %r14 |
999 | cleanup_system_call_insn: | 967 | cleanup_system_call_insn: |
1000 | .quad sysc_saveall | 968 | .quad sysc_saveall |
1001 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1002 | .quad system_call | 969 | .quad system_call |
1003 | .quad sysc_vtime | 970 | .quad sysc_vtime |
1004 | .quad sysc_stime | 971 | .quad sysc_stime |
1005 | .quad sysc_update | 972 | .quad sysc_update |
1006 | #endif | ||
1007 | 973 | ||
1008 | cleanup_sysc_return: | 974 | cleanup_sysc_return: |
1009 | mvc __LC_RETURN_PSW(8),0(%r12) | 975 | mvc __LC_RETURN_PSW(8),0(%r12) |
@@ -1014,11 +980,9 @@ cleanup_sysc_return: | |||
1014 | cleanup_sysc_leave: | 980 | cleanup_sysc_leave: |
1015 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) | 981 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) |
1016 | je 2f | 982 | je 2f |
1017 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1018 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 983 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1019 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) | 984 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) |
1020 | je 2f | 985 | je 2f |
1021 | #endif | ||
1022 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 986 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) |
1023 | cghi %r12,__LC_MCK_OLD_PSW | 987 | cghi %r12,__LC_MCK_OLD_PSW |
1024 | jne 0f | 988 | jne 0f |
@@ -1031,9 +995,7 @@ cleanup_sysc_leave: | |||
1031 | br %r14 | 995 | br %r14 |
1032 | cleanup_sysc_leave_insn: | 996 | cleanup_sysc_leave_insn: |
1033 | .quad sysc_done - 4 | 997 | .quad sysc_done - 4 |
1034 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1035 | .quad sysc_done - 8 | 998 | .quad sysc_done - 8 |
1036 | #endif | ||
1037 | 999 | ||
1038 | cleanup_io_return: | 1000 | cleanup_io_return: |
1039 | mvc __LC_RETURN_PSW(8),0(%r12) | 1001 | mvc __LC_RETURN_PSW(8),0(%r12) |
@@ -1044,11 +1006,9 @@ cleanup_io_return: | |||
1044 | cleanup_io_leave: | 1006 | cleanup_io_leave: |
1045 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) | 1007 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) |
1046 | je 2f | 1008 | je 2f |
1047 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1048 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 1009 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1049 | clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) | 1010 | clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) |
1050 | je 2f | 1011 | je 2f |
1051 | #endif | ||
1052 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 1012 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) |
1053 | cghi %r12,__LC_MCK_OLD_PSW | 1013 | cghi %r12,__LC_MCK_OLD_PSW |
1054 | jne 0f | 1014 | jne 0f |
@@ -1061,9 +1021,7 @@ cleanup_io_leave: | |||
1061 | br %r14 | 1021 | br %r14 |
1062 | cleanup_io_leave_insn: | 1022 | cleanup_io_leave_insn: |
1063 | .quad io_done - 4 | 1023 | .quad io_done - 4 |
1064 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1065 | .quad io_done - 8 | 1024 | .quad io_done - 8 |
1066 | #endif | ||
1067 | 1025 | ||
1068 | /* | 1026 | /* |
1069 | * Integer constants | 1027 | * Integer constants |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 83477c7dc743..ec7e35f6055b 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -461,6 +461,55 @@ start: | |||
461 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 | 461 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 |
462 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff | 462 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff |
463 | 463 | ||
464 | # | ||
465 | # startup-code at 0x10000, running in absolute addressing mode | ||
466 | # this is called either by the ipl loader or directly by PSW restart | ||
467 | # or linload or SALIPL | ||
468 | # | ||
469 | .org 0x10000 | ||
470 | startup:basr %r13,0 # get base | ||
471 | .LPG0: | ||
472 | |||
473 | #ifndef CONFIG_MARCH_G5 | ||
474 | # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} | ||
475 | stidp __LC_CPUID # store cpuid | ||
476 | lhi %r0,(3f-2f) / 2 | ||
477 | la %r1,2f-.LPG0(%r13) | ||
478 | 0: clc __LC_CPUID+4(2),0(%r1) | ||
479 | jne 3f | ||
480 | lpsw 1f-.LPG0(13) # machine type not good enough, crash | ||
481 | .align 16 | ||
482 | 1: .long 0x000a0000,0x00000000 | ||
483 | 2: | ||
484 | #if defined(CONFIG_MARCH_Z10) | ||
485 | .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 | ||
486 | #elif defined(CONFIG_MARCH_Z9_109) | ||
487 | .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 | ||
488 | #elif defined(CONFIG_MARCH_Z990) | ||
489 | .short 0x9672, 0x2064, 0x2066 | ||
490 | #elif defined(CONFIG_MARCH_Z900) | ||
491 | .short 0x9672 | ||
492 | #endif | ||
493 | 3: la %r1,2(%r1) | ||
494 | brct %r0,0b | ||
495 | #endif | ||
496 | |||
497 | l %r13,0f-.LPG0(%r13) | ||
498 | b 0(%r13) | ||
499 | 0: .long startup_continue | ||
500 | |||
501 | # | ||
502 | # params at 10400 (setup.h) | ||
503 | # | ||
504 | .org PARMAREA | ||
505 | .long 0,0 # IPL_DEVICE | ||
506 | .long 0,0 # INITRD_START | ||
507 | .long 0,0 # INITRD_SIZE | ||
508 | |||
509 | .org COMMAND_LINE | ||
510 | .byte "root=/dev/ram0 ro" | ||
511 | .byte 0 | ||
512 | |||
464 | #ifdef CONFIG_64BIT | 513 | #ifdef CONFIG_64BIT |
465 | #include "head64.S" | 514 | #include "head64.S" |
466 | #else | 515 | #else |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index a816e2de32b9..db476d114caa 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -10,34 +10,13 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | # | ||
14 | # startup-code at 0x10000, running in absolute addressing mode | ||
15 | # this is called either by the ipl loader or directly by PSW restart | ||
16 | # or linload or SALIPL | ||
17 | # | ||
18 | .org 0x10000 | ||
19 | startup:basr %r13,0 # get base | ||
20 | .LPG0: l %r13,0f-.LPG0(%r13) | ||
21 | b 0(%r13) | ||
22 | 0: .long startup_continue | ||
23 | |||
24 | # | ||
25 | # params at 10400 (setup.h) | ||
26 | # | ||
27 | .org PARMAREA | ||
28 | .long 0,0 # IPL_DEVICE | ||
29 | .long 0,0 # INITRD_START | ||
30 | .long 0,0 # INITRD_SIZE | ||
31 | |||
32 | .org COMMAND_LINE | ||
33 | .byte "root=/dev/ram0 ro" | ||
34 | .byte 0 | ||
35 | |||
36 | .org 0x11000 | 13 | .org 0x11000 |
37 | 14 | ||
38 | startup_continue: | 15 | startup_continue: |
39 | basr %r13,0 # get base | 16 | basr %r13,0 # get base |
40 | .LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | 17 | .LPG1: |
18 | |||
19 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
41 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | 20 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers |
42 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | 21 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area |
43 | # move IPL device to lowcore | 22 | # move IPL device to lowcore |
@@ -50,7 +29,6 @@ startup_continue: | |||
50 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | 29 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE |
51 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 30 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
52 | ahi %r15,-96 | 31 | ahi %r15,-96 |
53 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | ||
54 | # | 32 | # |
55 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, | 33 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
56 | # and create a kernel NSS if the SAVESYS= parm is defined | 34 | # and create a kernel NSS if the SAVESYS= parm is defined |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 1d06961e87b3..3ccd36b24b8f 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -10,29 +10,6 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | # | ||
14 | # startup-code at 0x10000, running in absolute addressing mode | ||
15 | # this is called either by the ipl loader or directly by PSW restart | ||
16 | # or linload or SALIPL | ||
17 | # | ||
18 | .org 0x10000 | ||
19 | startup:basr %r13,0 # get base | ||
20 | .LPG0: l %r13,0f-.LPG0(%r13) | ||
21 | b 0(%r13) | ||
22 | 0: .long startup_continue | ||
23 | |||
24 | # | ||
25 | # params at 10400 (setup.h) | ||
26 | # | ||
27 | .org PARMAREA | ||
28 | .quad 0 # IPL_DEVICE | ||
29 | .quad 0 # INITRD_START | ||
30 | .quad 0 # INITRD_SIZE | ||
31 | |||
32 | .org COMMAND_LINE | ||
33 | .byte "root=/dev/ram0 ro" | ||
34 | .byte 0 | ||
35 | |||
36 | .org 0x11000 | 13 | .org 0x11000 |
37 | 14 | ||
38 | startup_continue: | 15 | startup_continue: |
@@ -119,7 +96,6 @@ startup_continue: | |||
119 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | 96 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE |
120 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack | 97 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack |
121 | aghi %r15,-160 | 98 | aghi %r15,-160 |
122 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | ||
123 | # | 99 | # |
124 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, | 100 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
125 | # and create a kernel NSS if the SAVESYS= parm is defined | 101 | # and create a kernel NSS if the SAVESYS= parm is defined |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S new file mode 100644 index 000000000000..397d131a345f --- /dev/null +++ b/arch/s390/kernel/mcount.S | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef CONFIG_64BIT | ||
9 | .globl _mcount | ||
10 | _mcount: | ||
11 | stm %r0,%r5,8(%r15) | ||
12 | st %r14,56(%r15) | ||
13 | lr %r1,%r15 | ||
14 | ahi %r15,-96 | ||
15 | l %r3,100(%r15) | ||
16 | la %r2,0(%r14) | ||
17 | st %r1,0(%r15) | ||
18 | la %r3,0(%r3) | ||
19 | bras %r14,0f | ||
20 | .long ftrace_trace_function | ||
21 | 0: l %r14,0(%r14) | ||
22 | l %r14,0(%r14) | ||
23 | basr %r14,%r14 | ||
24 | ahi %r15,96 | ||
25 | lm %r0,%r5,8(%r15) | ||
26 | l %r14,56(%r15) | ||
27 | br %r14 | ||
28 | |||
29 | .globl ftrace_stub | ||
30 | ftrace_stub: | ||
31 | br %r14 | ||
32 | |||
33 | #else /* CONFIG_64BIT */ | ||
34 | |||
35 | .globl _mcount | ||
36 | _mcount: | ||
37 | stmg %r0,%r5,16(%r15) | ||
38 | stg %r14,112(%r15) | ||
39 | lgr %r1,%r15 | ||
40 | aghi %r15,-160 | ||
41 | stg %r1,0(%r15) | ||
42 | lgr %r2,%r14 | ||
43 | lg %r3,168(%r15) | ||
44 | larl %r14,ftrace_trace_function | ||
45 | lg %r14,0(%r14) | ||
46 | basr %r14,%r14 | ||
47 | aghi %r15,160 | ||
48 | lmg %r0,%r5,16(%r15) | ||
49 | lg %r14,112(%r15) | ||
50 | br %r14 | ||
51 | |||
52 | .globl ftrace_stub | ||
53 | ftrace_stub: | ||
54 | br %r14 | ||
55 | |||
56 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c new file mode 100644 index 000000000000..82c1872cfe80 --- /dev/null +++ b/arch/s390/kernel/processor.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/processor.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
6 | */ | ||
7 | |||
8 | #define KMSG_COMPONENT "cpu" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/seq_file.h> | ||
15 | #include <linux/delay.h> | ||
16 | |||
17 | #include <asm/elf.h> | ||
18 | #include <asm/lowcore.h> | ||
19 | #include <asm/param.h> | ||
20 | |||
21 | void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) | ||
22 | { | ||
23 | pr_info("Processor %d started, address %d, identification %06X\n", | ||
24 | cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident); | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * show_cpuinfo - Get information on one CPU for use by procfs. | ||
29 | */ | ||
30 | |||
31 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
32 | { | ||
33 | static const char *hwcap_str[8] = { | ||
34 | "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", | ||
35 | "edat" | ||
36 | }; | ||
37 | struct cpuinfo_S390 *cpuinfo; | ||
38 | unsigned long n = (unsigned long) v - 1; | ||
39 | int i; | ||
40 | |||
41 | s390_adjust_jiffies(); | ||
42 | preempt_disable(); | ||
43 | if (!n) { | ||
44 | seq_printf(m, "vendor_id : IBM/S390\n" | ||
45 | "# processors : %i\n" | ||
46 | "bogomips per cpu: %lu.%02lu\n", | ||
47 | num_online_cpus(), loops_per_jiffy/(500000/HZ), | ||
48 | (loops_per_jiffy/(5000/HZ))%100); | ||
49 | seq_puts(m, "features\t: "); | ||
50 | for (i = 0; i < 8; i++) | ||
51 | if (hwcap_str[i] && (elf_hwcap & (1UL << i))) | ||
52 | seq_printf(m, "%s ", hwcap_str[i]); | ||
53 | seq_puts(m, "\n"); | ||
54 | } | ||
55 | |||
56 | if (cpu_online(n)) { | ||
57 | #ifdef CONFIG_SMP | ||
58 | if (smp_processor_id() == n) | ||
59 | cpuinfo = &S390_lowcore.cpu_data; | ||
60 | else | ||
61 | cpuinfo = &lowcore_ptr[n]->cpu_data; | ||
62 | #else | ||
63 | cpuinfo = &S390_lowcore.cpu_data; | ||
64 | #endif | ||
65 | seq_printf(m, "processor %li: " | ||
66 | "version = %02X, " | ||
67 | "identification = %06X, " | ||
68 | "machine = %04X\n", | ||
69 | n, cpuinfo->cpu_id.version, | ||
70 | cpuinfo->cpu_id.ident, | ||
71 | cpuinfo->cpu_id.machine); | ||
72 | } | ||
73 | preempt_enable(); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
78 | { | ||
79 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | ||
80 | } | ||
81 | |||
82 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
83 | { | ||
84 | ++*pos; | ||
85 | return c_start(m, pos); | ||
86 | } | ||
87 | |||
88 | static void c_stop(struct seq_file *m, void *v) | ||
89 | { | ||
90 | } | ||
91 | |||
92 | const struct seq_operations cpuinfo_op = { | ||
93 | .start = c_start, | ||
94 | .next = c_next, | ||
95 | .stop = c_stop, | ||
96 | .show = show_cpuinfo, | ||
97 | }; | ||
98 | |||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 38ff2bce1203..75c496f4f16d 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
204 | static int | 204 | static int |
205 | peek_user(struct task_struct *child, addr_t addr, addr_t data) | 205 | peek_user(struct task_struct *child, addr_t addr, addr_t data) |
206 | { | 206 | { |
207 | struct user *dummy = NULL; | ||
208 | addr_t tmp, mask; | 207 | addr_t tmp, mask; |
209 | 208 | ||
210 | /* | 209 | /* |
@@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
213 | */ | 212 | */ |
214 | mask = __ADDR_MASK; | 213 | mask = __ADDR_MASK; |
215 | #ifdef CONFIG_64BIT | 214 | #ifdef CONFIG_64BIT |
216 | if (addr >= (addr_t) &dummy->regs.acrs && | 215 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
217 | addr < (addr_t) &dummy->regs.orig_gpr2) | 216 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
218 | mask = 3; | 217 | mask = 3; |
219 | #endif | 218 | #endif |
220 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 219 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
@@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
312 | static int | 311 | static int |
313 | poke_user(struct task_struct *child, addr_t addr, addr_t data) | 312 | poke_user(struct task_struct *child, addr_t addr, addr_t data) |
314 | { | 313 | { |
315 | struct user *dummy = NULL; | ||
316 | addr_t mask; | 314 | addr_t mask; |
317 | 315 | ||
318 | /* | 316 | /* |
@@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
321 | */ | 319 | */ |
322 | mask = __ADDR_MASK; | 320 | mask = __ADDR_MASK; |
323 | #ifdef CONFIG_64BIT | 321 | #ifdef CONFIG_64BIT |
324 | if (addr >= (addr_t) &dummy->regs.acrs && | 322 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
325 | addr < (addr_t) &dummy->regs.orig_gpr2) | 323 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
326 | mask = 3; | 324 | mask = 3; |
327 | #endif | 325 | #endif |
328 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 326 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 48238a114ce9..46b90cb03707 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/delay.h> | 14 | #include <asm/delay.h> |
15 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
16 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
17 | #include <asm/ftrace.h> | ||
17 | #ifdef CONFIG_IP_MULTICAST | 18 | #ifdef CONFIG_IP_MULTICAST |
18 | #include <net/arp.h> | 19 | #include <net/arp.h> |
19 | #endif | 20 | #endif |
@@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold); | |||
43 | EXPORT_SYMBOL(console_mode); | 44 | EXPORT_SYMBOL(console_mode); |
44 | EXPORT_SYMBOL(console_devno); | 45 | EXPORT_SYMBOL(console_devno); |
45 | EXPORT_SYMBOL(console_irq); | 46 | EXPORT_SYMBOL(console_irq); |
47 | |||
48 | #ifdef CONFIG_FUNCTION_TRACER | ||
49 | EXPORT_SYMBOL(_mcount); | ||
50 | #endif | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 400b040df7fa..b7a1efd5522c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -14,6 +14,9 @@ | |||
14 | * This file handles the architecture-dependent parts of initialization | 14 | * This file handles the architecture-dependent parts of initialization |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define KMSG_COMPONENT "setup" | ||
18 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
19 | |||
17 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
@@ -32,7 +35,6 @@ | |||
32 | #include <linux/bootmem.h> | 35 | #include <linux/bootmem.h> |
33 | #include <linux/root_dev.h> | 36 | #include <linux/root_dev.h> |
34 | #include <linux/console.h> | 37 | #include <linux/console.h> |
35 | #include <linux/seq_file.h> | ||
36 | #include <linux/kernel_stat.h> | 38 | #include <linux/kernel_stat.h> |
37 | #include <linux/device.h> | 39 | #include <linux/device.h> |
38 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
@@ -291,8 +293,8 @@ unsigned int switch_amode = 0; | |||
291 | #endif | 293 | #endif |
292 | EXPORT_SYMBOL_GPL(switch_amode); | 294 | EXPORT_SYMBOL_GPL(switch_amode); |
293 | 295 | ||
294 | static void set_amode_and_uaccess(unsigned long user_amode, | 296 | static int set_amode_and_uaccess(unsigned long user_amode, |
295 | unsigned long user32_amode) | 297 | unsigned long user32_amode) |
296 | { | 298 | { |
297 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 299 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | |
298 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 300 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | |
@@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode, | |||
309 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | 311 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; |
310 | 312 | ||
311 | if (MACHINE_HAS_MVCOS) { | 313 | if (MACHINE_HAS_MVCOS) { |
312 | printk("mvcos available.\n"); | ||
313 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 314 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
315 | return 1; | ||
314 | } else { | 316 | } else { |
315 | printk("mvcos not available.\n"); | ||
316 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); | 317 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); |
318 | return 0; | ||
317 | } | 319 | } |
318 | } | 320 | } |
319 | 321 | ||
@@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p) | |||
328 | early_param("switch_amode", early_parse_switch_amode); | 330 | early_param("switch_amode", early_parse_switch_amode); |
329 | 331 | ||
330 | #else /* CONFIG_S390_SWITCH_AMODE */ | 332 | #else /* CONFIG_S390_SWITCH_AMODE */ |
331 | static inline void set_amode_and_uaccess(unsigned long user_amode, | 333 | static inline int set_amode_and_uaccess(unsigned long user_amode, |
332 | unsigned long user32_amode) | 334 | unsigned long user32_amode) |
333 | { | 335 | { |
336 | return 0; | ||
334 | } | 337 | } |
335 | #endif /* CONFIG_S390_SWITCH_AMODE */ | 338 | #endif /* CONFIG_S390_SWITCH_AMODE */ |
336 | 339 | ||
@@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec); | |||
355 | static void setup_addressing_mode(void) | 358 | static void setup_addressing_mode(void) |
356 | { | 359 | { |
357 | if (s390_noexec) { | 360 | if (s390_noexec) { |
358 | printk("S390 execute protection active, "); | 361 | if (set_amode_and_uaccess(PSW_ASC_SECONDARY, |
359 | set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); | 362 | PSW32_ASC_SECONDARY)) |
363 | pr_info("Execute protection active, " | ||
364 | "mvcos available\n"); | ||
365 | else | ||
366 | pr_info("Execute protection active, " | ||
367 | "mvcos not available\n"); | ||
360 | } else if (switch_amode) { | 368 | } else if (switch_amode) { |
361 | printk("S390 address spaces switched, "); | 369 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) |
362 | set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); | 370 | pr_info("Address spaces switched, " |
371 | "mvcos available\n"); | ||
372 | else | ||
373 | pr_info("Address spaces switched, " | ||
374 | "mvcos not available\n"); | ||
363 | } | 375 | } |
364 | #ifdef CONFIG_TRACE_IRQFLAGS | 376 | #ifdef CONFIG_TRACE_IRQFLAGS |
365 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 377 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
@@ -572,15 +584,15 @@ setup_memory(void) | |||
572 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 584 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
573 | 585 | ||
574 | if (start + INITRD_SIZE > memory_end) { | 586 | if (start + INITRD_SIZE > memory_end) { |
575 | printk("initrd extends beyond end of memory " | 587 | pr_err("initrd extends beyond end of " |
576 | "(0x%08lx > 0x%08lx)\n" | 588 | "memory (0x%08lx > 0x%08lx) " |
577 | "disabling initrd\n", | 589 | "disabling initrd\n", |
578 | start + INITRD_SIZE, memory_end); | 590 | start + INITRD_SIZE, memory_end); |
579 | INITRD_START = INITRD_SIZE = 0; | 591 | INITRD_START = INITRD_SIZE = 0; |
580 | } else { | 592 | } else { |
581 | printk("Moving initrd (0x%08lx -> 0x%08lx, " | 593 | pr_info("Moving initrd (0x%08lx -> " |
582 | "size: %ld)\n", | 594 | "0x%08lx, size: %ld)\n", |
583 | INITRD_START, start, INITRD_SIZE); | 595 | INITRD_START, start, INITRD_SIZE); |
584 | memmove((void *) start, (void *) INITRD_START, | 596 | memmove((void *) start, (void *) INITRD_START, |
585 | INITRD_SIZE); | 597 | INITRD_SIZE); |
586 | INITRD_START = start; | 598 | INITRD_START = start; |
@@ -642,8 +654,9 @@ setup_memory(void) | |||
642 | initrd_start = INITRD_START; | 654 | initrd_start = INITRD_START; |
643 | initrd_end = initrd_start + INITRD_SIZE; | 655 | initrd_end = initrd_start + INITRD_SIZE; |
644 | } else { | 656 | } else { |
645 | printk("initrd extends beyond end of memory " | 657 | pr_err("initrd extends beyond end of " |
646 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | 658 | "memory (0x%08lx > 0x%08lx) " |
659 | "disabling initrd\n", | ||
647 | initrd_start + INITRD_SIZE, memory_end); | 660 | initrd_start + INITRD_SIZE, memory_end); |
648 | initrd_start = initrd_end = 0; | 661 | initrd_start = initrd_end = 0; |
649 | } | 662 | } |
@@ -651,23 +664,6 @@ setup_memory(void) | |||
651 | #endif | 664 | #endif |
652 | } | 665 | } |
653 | 666 | ||
654 | static int __init __stfle(unsigned long long *list, int doublewords) | ||
655 | { | ||
656 | typedef struct { unsigned long long _[doublewords]; } addrtype; | ||
657 | register unsigned long __nr asm("0") = doublewords - 1; | ||
658 | |||
659 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | ||
660 | : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); | ||
661 | return __nr + 1; | ||
662 | } | ||
663 | |||
664 | int __init stfle(unsigned long long *list, int doublewords) | ||
665 | { | ||
666 | if (!(stfl() & (1UL << 24))) | ||
667 | return -EOPNOTSUPP; | ||
668 | return __stfle(list, doublewords); | ||
669 | } | ||
670 | |||
671 | /* | 667 | /* |
672 | * Setup hardware capabilities. | 668 | * Setup hardware capabilities. |
673 | */ | 669 | */ |
@@ -739,8 +735,13 @@ static void __init setup_hwcaps(void) | |||
739 | strcpy(elf_platform, "z990"); | 735 | strcpy(elf_platform, "z990"); |
740 | break; | 736 | break; |
741 | case 0x2094: | 737 | case 0x2094: |
738 | case 0x2096: | ||
742 | strcpy(elf_platform, "z9-109"); | 739 | strcpy(elf_platform, "z9-109"); |
743 | break; | 740 | break; |
741 | case 0x2097: | ||
742 | case 0x2098: | ||
743 | strcpy(elf_platform, "z10"); | ||
744 | break; | ||
744 | } | 745 | } |
745 | } | 746 | } |
746 | 747 | ||
@@ -752,25 +753,34 @@ static void __init setup_hwcaps(void) | |||
752 | void __init | 753 | void __init |
753 | setup_arch(char **cmdline_p) | 754 | setup_arch(char **cmdline_p) |
754 | { | 755 | { |
756 | /* set up preferred console */ | ||
757 | add_preferred_console("ttyS", 0, NULL); | ||
758 | |||
755 | /* | 759 | /* |
756 | * print what head.S has found out about the machine | 760 | * print what head.S has found out about the machine |
757 | */ | 761 | */ |
758 | #ifndef CONFIG_64BIT | 762 | #ifndef CONFIG_64BIT |
759 | printk((MACHINE_IS_VM) ? | 763 | if (MACHINE_IS_VM) |
760 | "We are running under VM (31 bit mode)\n" : | 764 | pr_info("Linux is running as a z/VM " |
761 | "We are running native (31 bit mode)\n"); | 765 | "guest operating system in 31-bit mode\n"); |
762 | printk((MACHINE_HAS_IEEE) ? | 766 | else |
763 | "This machine has an IEEE fpu\n" : | 767 | pr_info("Linux is running natively in 31-bit mode\n"); |
764 | "This machine has no IEEE fpu\n"); | 768 | if (MACHINE_HAS_IEEE) |
769 | pr_info("The hardware system has IEEE compatible " | ||
770 | "floating point units\n"); | ||
771 | else | ||
772 | pr_info("The hardware system has no IEEE compatible " | ||
773 | "floating point units\n"); | ||
765 | #else /* CONFIG_64BIT */ | 774 | #else /* CONFIG_64BIT */ |
766 | if (MACHINE_IS_VM) | 775 | if (MACHINE_IS_VM) |
767 | printk("We are running under VM (64 bit mode)\n"); | 776 | pr_info("Linux is running as a z/VM " |
777 | "guest operating system in 64-bit mode\n"); | ||
768 | else if (MACHINE_IS_KVM) { | 778 | else if (MACHINE_IS_KVM) { |
769 | printk("We are running under KVM (64 bit mode)\n"); | 779 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
770 | add_preferred_console("hvc", 0, NULL); | 780 | add_preferred_console("hvc", 0, NULL); |
771 | s390_virtio_console_init(); | 781 | s390_virtio_console_init(); |
772 | } else | 782 | } else |
773 | printk("We are running native (64 bit mode)\n"); | 783 | pr_info("Linux is running natively in 64-bit mode\n"); |
774 | #endif /* CONFIG_64BIT */ | 784 | #endif /* CONFIG_64BIT */ |
775 | 785 | ||
776 | /* Have one command line that is parsed and saved in /proc/cmdline */ | 786 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
@@ -818,90 +828,3 @@ setup_arch(char **cmdline_p) | |||
818 | /* Setup zfcpdump support */ | 828 | /* Setup zfcpdump support */ |
819 | setup_zfcpdump(console_devno); | 829 | setup_zfcpdump(console_devno); |
820 | } | 830 | } |
821 | |||
822 | void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) | ||
823 | { | ||
824 | printk(KERN_INFO "cpu %d " | ||
825 | #ifdef CONFIG_SMP | ||
826 | "phys_idx=%d " | ||
827 | #endif | ||
828 | "vers=%02X ident=%06X machine=%04X unused=%04X\n", | ||
829 | cpuinfo->cpu_nr, | ||
830 | #ifdef CONFIG_SMP | ||
831 | cpuinfo->cpu_addr, | ||
832 | #endif | ||
833 | cpuinfo->cpu_id.version, | ||
834 | cpuinfo->cpu_id.ident, | ||
835 | cpuinfo->cpu_id.machine, | ||
836 | cpuinfo->cpu_id.unused); | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * show_cpuinfo - Get information on one CPU for use by procfs. | ||
841 | */ | ||
842 | |||
843 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
844 | { | ||
845 | static const char *hwcap_str[8] = { | ||
846 | "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", | ||
847 | "edat" | ||
848 | }; | ||
849 | struct cpuinfo_S390 *cpuinfo; | ||
850 | unsigned long n = (unsigned long) v - 1; | ||
851 | int i; | ||
852 | |||
853 | s390_adjust_jiffies(); | ||
854 | preempt_disable(); | ||
855 | if (!n) { | ||
856 | seq_printf(m, "vendor_id : IBM/S390\n" | ||
857 | "# processors : %i\n" | ||
858 | "bogomips per cpu: %lu.%02lu\n", | ||
859 | num_online_cpus(), loops_per_jiffy/(500000/HZ), | ||
860 | (loops_per_jiffy/(5000/HZ))%100); | ||
861 | seq_puts(m, "features\t: "); | ||
862 | for (i = 0; i < 8; i++) | ||
863 | if (hwcap_str[i] && (elf_hwcap & (1UL << i))) | ||
864 | seq_printf(m, "%s ", hwcap_str[i]); | ||
865 | seq_puts(m, "\n"); | ||
866 | } | ||
867 | |||
868 | if (cpu_online(n)) { | ||
869 | #ifdef CONFIG_SMP | ||
870 | if (smp_processor_id() == n) | ||
871 | cpuinfo = &S390_lowcore.cpu_data; | ||
872 | else | ||
873 | cpuinfo = &lowcore_ptr[n]->cpu_data; | ||
874 | #else | ||
875 | cpuinfo = &S390_lowcore.cpu_data; | ||
876 | #endif | ||
877 | seq_printf(m, "processor %li: " | ||
878 | "version = %02X, " | ||
879 | "identification = %06X, " | ||
880 | "machine = %04X\n", | ||
881 | n, cpuinfo->cpu_id.version, | ||
882 | cpuinfo->cpu_id.ident, | ||
883 | cpuinfo->cpu_id.machine); | ||
884 | } | ||
885 | preempt_enable(); | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
890 | { | ||
891 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | ||
892 | } | ||
893 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
894 | { | ||
895 | ++*pos; | ||
896 | return c_start(m, pos); | ||
897 | } | ||
898 | static void c_stop(struct seq_file *m, void *v) | ||
899 | { | ||
900 | } | ||
901 | const struct seq_operations cpuinfo_op = { | ||
902 | .start = c_start, | ||
903 | .next = c_next, | ||
904 | .stop = c_stop, | ||
905 | .show = show_cpuinfo, | ||
906 | }; | ||
907 | |||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index b5595688a477..6fc78541dc57 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -20,6 +20,9 @@ | |||
20 | * cpu_number_map in other architectures. | 20 | * cpu_number_map in other architectures. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define KMSG_COMPONENT "cpu" | ||
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
25 | |||
23 | #include <linux/module.h> | 26 | #include <linux/module.h> |
24 | #include <linux/init.h> | 27 | #include <linux/init.h> |
25 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
@@ -77,159 +80,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); | |||
77 | 80 | ||
78 | static void smp_ext_bitcall(int, ec_bit_sig); | 81 | static void smp_ext_bitcall(int, ec_bit_sig); |
79 | 82 | ||
80 | /* | ||
81 | * Structure and data for __smp_call_function_map(). This is designed to | ||
82 | * minimise static memory requirements. It also looks cleaner. | ||
83 | */ | ||
84 | static DEFINE_SPINLOCK(call_lock); | ||
85 | |||
86 | struct call_data_struct { | ||
87 | void (*func) (void *info); | ||
88 | void *info; | ||
89 | cpumask_t started; | ||
90 | cpumask_t finished; | ||
91 | int wait; | ||
92 | }; | ||
93 | |||
94 | static struct call_data_struct *call_data; | ||
95 | |||
96 | /* | ||
97 | * 'Call function' interrupt callback | ||
98 | */ | ||
99 | static void do_call_function(void) | ||
100 | { | ||
101 | void (*func) (void *info) = call_data->func; | ||
102 | void *info = call_data->info; | ||
103 | int wait = call_data->wait; | ||
104 | |||
105 | cpu_set(smp_processor_id(), call_data->started); | ||
106 | (*func)(info); | ||
107 | if (wait) | ||
108 | cpu_set(smp_processor_id(), call_data->finished);; | ||
109 | } | ||
110 | |||
111 | static void __smp_call_function_map(void (*func) (void *info), void *info, | ||
112 | int wait, cpumask_t map) | ||
113 | { | ||
114 | struct call_data_struct data; | ||
115 | int cpu, local = 0; | ||
116 | |||
117 | /* | ||
118 | * Can deadlock when interrupts are disabled or if in wrong context. | ||
119 | */ | ||
120 | WARN_ON(irqs_disabled() || in_irq()); | ||
121 | |||
122 | /* | ||
123 | * Check for local function call. We have to have the same call order | ||
124 | * as in on_each_cpu() because of machine_restart_smp(). | ||
125 | */ | ||
126 | if (cpu_isset(smp_processor_id(), map)) { | ||
127 | local = 1; | ||
128 | cpu_clear(smp_processor_id(), map); | ||
129 | } | ||
130 | |||
131 | cpus_and(map, map, cpu_online_map); | ||
132 | if (cpus_empty(map)) | ||
133 | goto out; | ||
134 | |||
135 | data.func = func; | ||
136 | data.info = info; | ||
137 | data.started = CPU_MASK_NONE; | ||
138 | data.wait = wait; | ||
139 | if (wait) | ||
140 | data.finished = CPU_MASK_NONE; | ||
141 | |||
142 | call_data = &data; | ||
143 | |||
144 | for_each_cpu_mask(cpu, map) | ||
145 | smp_ext_bitcall(cpu, ec_call_function); | ||
146 | |||
147 | /* Wait for response */ | ||
148 | while (!cpus_equal(map, data.started)) | ||
149 | cpu_relax(); | ||
150 | if (wait) | ||
151 | while (!cpus_equal(map, data.finished)) | ||
152 | cpu_relax(); | ||
153 | out: | ||
154 | if (local) { | ||
155 | local_irq_disable(); | ||
156 | func(info); | ||
157 | local_irq_enable(); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * smp_call_function: | ||
163 | * @func: the function to run; this must be fast and non-blocking | ||
164 | * @info: an arbitrary pointer to pass to the function | ||
165 | * @wait: if true, wait (atomically) until function has completed on other CPUs | ||
166 | * | ||
167 | * Run a function on all other CPUs. | ||
168 | * | ||
169 | * You must not call this function with disabled interrupts, from a | ||
170 | * hardware interrupt handler or from a bottom half. | ||
171 | */ | ||
172 | int smp_call_function(void (*func) (void *info), void *info, int wait) | ||
173 | { | ||
174 | cpumask_t map; | ||
175 | |||
176 | spin_lock(&call_lock); | ||
177 | map = cpu_online_map; | ||
178 | cpu_clear(smp_processor_id(), map); | ||
179 | __smp_call_function_map(func, info, wait, map); | ||
180 | spin_unlock(&call_lock); | ||
181 | return 0; | ||
182 | } | ||
183 | EXPORT_SYMBOL(smp_call_function); | ||
184 | |||
185 | /* | ||
186 | * smp_call_function_single: | ||
187 | * @cpu: the CPU where func should run | ||
188 | * @func: the function to run; this must be fast and non-blocking | ||
189 | * @info: an arbitrary pointer to pass to the function | ||
190 | * @wait: if true, wait (atomically) until function has completed on other CPUs | ||
191 | * | ||
192 | * Run a function on one processor. | ||
193 | * | ||
194 | * You must not call this function with disabled interrupts, from a | ||
195 | * hardware interrupt handler or from a bottom half. | ||
196 | */ | ||
197 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
198 | int wait) | ||
199 | { | ||
200 | spin_lock(&call_lock); | ||
201 | __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); | ||
202 | spin_unlock(&call_lock); | ||
203 | return 0; | ||
204 | } | ||
205 | EXPORT_SYMBOL(smp_call_function_single); | ||
206 | |||
207 | /** | ||
208 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
209 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
210 | * @func: The function to run. This must be fast and non-blocking. | ||
211 | * @info: An arbitrary pointer to pass to the function. | ||
212 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
213 | * | ||
214 | * Returns 0 on success, else a negative status code. | ||
215 | * | ||
216 | * If @wait is true, then returns once @func has returned; otherwise | ||
217 | * it returns just before the target cpu calls @func. | ||
218 | * | ||
219 | * You must not call this function with disabled interrupts or from a | ||
220 | * hardware interrupt handler or from a bottom half handler. | ||
221 | */ | ||
222 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | ||
223 | int wait) | ||
224 | { | ||
225 | spin_lock(&call_lock); | ||
226 | cpu_clear(smp_processor_id(), mask); | ||
227 | __smp_call_function_map(func, info, wait, mask); | ||
228 | spin_unlock(&call_lock); | ||
229 | return 0; | ||
230 | } | ||
231 | EXPORT_SYMBOL(smp_call_function_mask); | ||
232 | |||
233 | void smp_send_stop(void) | 83 | void smp_send_stop(void) |
234 | { | 84 | { |
235 | int cpu, rc; | 85 | int cpu, rc; |
@@ -271,7 +121,10 @@ static void do_ext_call_interrupt(__u16 code) | |||
271 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 121 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
272 | 122 | ||
273 | if (test_bit(ec_call_function, &bits)) | 123 | if (test_bit(ec_call_function, &bits)) |
274 | do_call_function(); | 124 | generic_smp_call_function_interrupt(); |
125 | |||
126 | if (test_bit(ec_call_function_single, &bits)) | ||
127 | generic_smp_call_function_single_interrupt(); | ||
275 | } | 128 | } |
276 | 129 | ||
277 | /* | 130 | /* |
@@ -288,6 +141,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |||
288 | udelay(10); | 141 | udelay(10); |
289 | } | 142 | } |
290 | 143 | ||
144 | void arch_send_call_function_ipi(cpumask_t mask) | ||
145 | { | ||
146 | int cpu; | ||
147 | |||
148 | for_each_cpu_mask(cpu, mask) | ||
149 | smp_ext_bitcall(cpu, ec_call_function); | ||
150 | } | ||
151 | |||
152 | void arch_send_call_function_single_ipi(int cpu) | ||
153 | { | ||
154 | smp_ext_bitcall(cpu, ec_call_function_single); | ||
155 | } | ||
156 | |||
291 | #ifndef CONFIG_64BIT | 157 | #ifndef CONFIG_64BIT |
292 | /* | 158 | /* |
293 | * this function sends a 'purge tlb' signal to another CPU. | 159 | * this function sends a 'purge tlb' signal to another CPU. |
@@ -388,8 +254,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
388 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 254 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
389 | return; | 255 | return; |
390 | if (cpu >= NR_CPUS) { | 256 | if (cpu >= NR_CPUS) { |
391 | printk(KERN_WARNING "Registers for cpu %i not saved since dump " | 257 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
392 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); | 258 | "the dump\n", cpu, NR_CPUS - 1); |
393 | return; | 259 | return; |
394 | } | 260 | } |
395 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); | 261 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); |
@@ -562,7 +428,7 @@ static void __init smp_detect_cpus(void) | |||
562 | } | 428 | } |
563 | out: | 429 | out: |
564 | kfree(info); | 430 | kfree(info); |
565 | printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); | 431 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
566 | get_online_cpus(); | 432 | get_online_cpus(); |
567 | __smp_rescan_cpus(); | 433 | __smp_rescan_cpus(); |
568 | put_online_cpus(); | 434 | put_online_cpus(); |
@@ -578,19 +444,17 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
578 | preempt_disable(); | 444 | preempt_disable(); |
579 | /* Enable TOD clock interrupts on the secondary cpu. */ | 445 | /* Enable TOD clock interrupts on the secondary cpu. */ |
580 | init_cpu_timer(); | 446 | init_cpu_timer(); |
581 | #ifdef CONFIG_VIRT_TIMER | ||
582 | /* Enable cpu timer interrupts on the secondary cpu. */ | 447 | /* Enable cpu timer interrupts on the secondary cpu. */ |
583 | init_cpu_vtimer(); | 448 | init_cpu_vtimer(); |
584 | #endif | ||
585 | /* Enable pfault pseudo page faults on this cpu. */ | 449 | /* Enable pfault pseudo page faults on this cpu. */ |
586 | pfault_init(); | 450 | pfault_init(); |
587 | 451 | ||
588 | /* call cpu notifiers */ | 452 | /* call cpu notifiers */ |
589 | notify_cpu_starting(smp_processor_id()); | 453 | notify_cpu_starting(smp_processor_id()); |
590 | /* Mark this cpu as online */ | 454 | /* Mark this cpu as online */ |
591 | spin_lock(&call_lock); | 455 | ipi_call_lock(); |
592 | cpu_set(smp_processor_id(), cpu_online_map); | 456 | cpu_set(smp_processor_id(), cpu_online_map); |
593 | spin_unlock(&call_lock); | 457 | ipi_call_unlock(); |
594 | /* Switch on interrupts */ | 458 | /* Switch on interrupts */ |
595 | local_irq_enable(); | 459 | local_irq_enable(); |
596 | /* Print info about this processor */ | 460 | /* Print info about this processor */ |
@@ -639,18 +503,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
639 | 503 | ||
640 | save_area = get_zeroed_page(GFP_KERNEL); | 504 | save_area = get_zeroed_page(GFP_KERNEL); |
641 | if (!save_area) | 505 | if (!save_area) |
642 | goto out_save_area; | 506 | goto out; |
643 | lowcore->extended_save_area_addr = (u32) save_area; | 507 | lowcore->extended_save_area_addr = (u32) save_area; |
644 | } | 508 | } |
645 | #endif | 509 | #endif |
646 | lowcore_ptr[cpu] = lowcore; | 510 | lowcore_ptr[cpu] = lowcore; |
647 | return 0; | 511 | return 0; |
648 | 512 | ||
649 | #ifndef CONFIG_64BIT | ||
650 | out_save_area: | ||
651 | free_page(panic_stack); | ||
652 | #endif | ||
653 | out: | 513 | out: |
514 | free_page(panic_stack); | ||
654 | free_pages(async_stack, ASYNC_ORDER); | 515 | free_pages(async_stack, ASYNC_ORDER); |
655 | free_pages((unsigned long) lowcore, lc_order); | 516 | free_pages((unsigned long) lowcore, lc_order); |
656 | return -ENOMEM; | 517 | return -ENOMEM; |
@@ -690,12 +551,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
690 | 551 | ||
691 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 552 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), |
692 | cpu, sigp_set_prefix); | 553 | cpu, sigp_set_prefix); |
693 | if (ccode) { | 554 | if (ccode) |
694 | printk("sigp_set_prefix failed for cpu %d " | ||
695 | "with condition code %d\n", | ||
696 | (int) cpu, (int) ccode); | ||
697 | return -EIO; | 555 | return -EIO; |
698 | } | ||
699 | 556 | ||
700 | idle = current_set[cpu]; | 557 | idle = current_set[cpu]; |
701 | cpu_lowcore = lowcore_ptr[cpu]; | 558 | cpu_lowcore = lowcore_ptr[cpu]; |
@@ -778,7 +635,7 @@ void __cpu_die(unsigned int cpu) | |||
778 | while (!smp_cpu_not_running(cpu)) | 635 | while (!smp_cpu_not_running(cpu)) |
779 | cpu_relax(); | 636 | cpu_relax(); |
780 | smp_free_lowcore(cpu); | 637 | smp_free_lowcore(cpu); |
781 | printk(KERN_INFO "Processor %d spun down\n", cpu); | 638 | pr_info("Processor %d stopped\n", cpu); |
782 | } | 639 | } |
783 | 640 | ||
784 | void cpu_die(void) | 641 | void cpu_die(void) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index eccefbbff887..5be981a36c3e 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -12,6 +12,9 @@ | |||
12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define KMSG_COMPONENT "time" | ||
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
17 | |||
15 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
@@ -20,6 +23,8 @@ | |||
20 | #include <linux/string.h> | 23 | #include <linux/string.h> |
21 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
22 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/cpu.h> | ||
27 | #include <linux/stop_machine.h> | ||
23 | #include <linux/time.h> | 28 | #include <linux/time.h> |
24 | #include <linux/sysdev.h> | 29 | #include <linux/sysdev.h> |
25 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
@@ -36,6 +41,7 @@ | |||
36 | #include <asm/delay.h> | 41 | #include <asm/delay.h> |
37 | #include <asm/s390_ext.h> | 42 | #include <asm/s390_ext.h> |
38 | #include <asm/div64.h> | 43 | #include <asm/div64.h> |
44 | #include <asm/vdso.h> | ||
39 | #include <asm/irq.h> | 45 | #include <asm/irq.h> |
40 | #include <asm/irq_regs.h> | 46 | #include <asm/irq_regs.h> |
41 | #include <asm/timer.h> | 47 | #include <asm/timer.h> |
@@ -223,6 +229,36 @@ static struct clocksource clocksource_tod = { | |||
223 | }; | 229 | }; |
224 | 230 | ||
225 | 231 | ||
232 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | ||
233 | { | ||
234 | if (clock != &clocksource_tod) | ||
235 | return; | ||
236 | |||
237 | /* Make userspace gettimeofday spin until we're done. */ | ||
238 | ++vdso_data->tb_update_count; | ||
239 | smp_wmb(); | ||
240 | vdso_data->xtime_tod_stamp = clock->cycle_last; | ||
241 | vdso_data->xtime_clock_sec = xtime.tv_sec; | ||
242 | vdso_data->xtime_clock_nsec = xtime.tv_nsec; | ||
243 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | ||
244 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | ||
245 | smp_wmb(); | ||
246 | ++vdso_data->tb_update_count; | ||
247 | } | ||
248 | |||
249 | extern struct timezone sys_tz; | ||
250 | |||
251 | void update_vsyscall_tz(void) | ||
252 | { | ||
253 | /* Make userspace gettimeofday spin until we're done. */ | ||
254 | ++vdso_data->tb_update_count; | ||
255 | smp_wmb(); | ||
256 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | ||
257 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; | ||
258 | smp_wmb(); | ||
259 | ++vdso_data->tb_update_count; | ||
260 | } | ||
261 | |||
226 | /* | 262 | /* |
227 | * Initialize the TOD clock and the CPU timer of | 263 | * Initialize the TOD clock and the CPU timer of |
228 | * the boot cpu. | 264 | * the boot cpu. |
@@ -253,10 +289,8 @@ void __init time_init(void) | |||
253 | 289 | ||
254 | /* Enable TOD clock interrupts on the boot cpu. */ | 290 | /* Enable TOD clock interrupts on the boot cpu. */ |
255 | init_cpu_timer(); | 291 | init_cpu_timer(); |
256 | 292 | /* Enable cpu timer interrupts on the boot cpu. */ | |
257 | #ifdef CONFIG_VIRT_TIMER | ||
258 | vtime_init(); | 293 | vtime_init(); |
259 | #endif | ||
260 | } | 294 | } |
261 | 295 | ||
262 | /* | 296 | /* |
@@ -288,8 +322,8 @@ static unsigned long long adjust_time(unsigned long long old, | |||
288 | } | 322 | } |
289 | sched_clock_base_cc += delta; | 323 | sched_clock_base_cc += delta; |
290 | if (adjust.offset != 0) { | 324 | if (adjust.offset != 0) { |
291 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | 325 | pr_notice("The ETR interface has adjusted the clock " |
292 | adjust.offset); | 326 | "by %li microseconds\n", adjust.offset); |
293 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | 327 | adjust.modes = ADJ_OFFSET_SINGLESHOT; |
294 | do_adjtimex(&adjust); | 328 | do_adjtimex(&adjust); |
295 | } | 329 | } |
@@ -360,6 +394,15 @@ static void enable_sync_clock(void) | |||
360 | atomic_set_mask(0x80000000, sw_ptr); | 394 | atomic_set_mask(0x80000000, sw_ptr); |
361 | } | 395 | } |
362 | 396 | ||
397 | /* Single threaded workqueue used for etr and stp sync events */ | ||
398 | static struct workqueue_struct *time_sync_wq; | ||
399 | |||
400 | static void __init time_init_wq(void) | ||
401 | { | ||
402 | if (!time_sync_wq) | ||
403 | time_sync_wq = create_singlethread_workqueue("timesync"); | ||
404 | } | ||
405 | |||
363 | /* | 406 | /* |
364 | * External Time Reference (ETR) code. | 407 | * External Time Reference (ETR) code. |
365 | */ | 408 | */ |
@@ -425,6 +468,7 @@ static struct timer_list etr_timer; | |||
425 | 468 | ||
426 | static void etr_timeout(unsigned long dummy); | 469 | static void etr_timeout(unsigned long dummy); |
427 | static void etr_work_fn(struct work_struct *work); | 470 | static void etr_work_fn(struct work_struct *work); |
471 | static DEFINE_MUTEX(etr_work_mutex); | ||
428 | static DECLARE_WORK(etr_work, etr_work_fn); | 472 | static DECLARE_WORK(etr_work, etr_work_fn); |
429 | 473 | ||
430 | /* | 474 | /* |
@@ -440,8 +484,8 @@ static void etr_reset(void) | |||
440 | etr_tolec = get_clock(); | 484 | etr_tolec = get_clock(); |
441 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); | 485 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); |
442 | } else if (etr_port0_online || etr_port1_online) { | 486 | } else if (etr_port0_online || etr_port1_online) { |
443 | printk(KERN_WARNING "Running on non ETR capable " | 487 | pr_warning("The real or virtual hardware system does " |
444 | "machine, only local mode available.\n"); | 488 | "not provide an ETR interface\n"); |
445 | etr_port0_online = etr_port1_online = 0; | 489 | etr_port0_online = etr_port1_online = 0; |
446 | } | 490 | } |
447 | } | 491 | } |
@@ -452,17 +496,18 @@ static int __init etr_init(void) | |||
452 | 496 | ||
453 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) | 497 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
454 | return 0; | 498 | return 0; |
499 | time_init_wq(); | ||
455 | /* Check if this machine has the steai instruction. */ | 500 | /* Check if this machine has the steai instruction. */ |
456 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | 501 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) |
457 | etr_steai_available = 1; | 502 | etr_steai_available = 1; |
458 | setup_timer(&etr_timer, etr_timeout, 0UL); | 503 | setup_timer(&etr_timer, etr_timeout, 0UL); |
459 | if (etr_port0_online) { | 504 | if (etr_port0_online) { |
460 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 505 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
461 | schedule_work(&etr_work); | 506 | queue_work(time_sync_wq, &etr_work); |
462 | } | 507 | } |
463 | if (etr_port1_online) { | 508 | if (etr_port1_online) { |
464 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 509 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
465 | schedule_work(&etr_work); | 510 | queue_work(time_sync_wq, &etr_work); |
466 | } | 511 | } |
467 | return 0; | 512 | return 0; |
468 | } | 513 | } |
@@ -489,7 +534,7 @@ void etr_switch_to_local(void) | |||
489 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | 534 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
490 | disable_sync_clock(NULL); | 535 | disable_sync_clock(NULL); |
491 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); | 536 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); |
492 | schedule_work(&etr_work); | 537 | queue_work(time_sync_wq, &etr_work); |
493 | } | 538 | } |
494 | 539 | ||
495 | /* | 540 | /* |
@@ -505,7 +550,7 @@ void etr_sync_check(void) | |||
505 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | 550 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
506 | disable_sync_clock(NULL); | 551 | disable_sync_clock(NULL); |
507 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); | 552 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); |
508 | schedule_work(&etr_work); | 553 | queue_work(time_sync_wq, &etr_work); |
509 | } | 554 | } |
510 | 555 | ||
511 | /* | 556 | /* |
@@ -529,13 +574,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm) | |||
529 | * Both ports are not up-to-date now. | 574 | * Both ports are not up-to-date now. |
530 | */ | 575 | */ |
531 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); | 576 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); |
532 | schedule_work(&etr_work); | 577 | queue_work(time_sync_wq, &etr_work); |
533 | } | 578 | } |
534 | 579 | ||
535 | static void etr_timeout(unsigned long dummy) | 580 | static void etr_timeout(unsigned long dummy) |
536 | { | 581 | { |
537 | set_bit(ETR_EVENT_UPDATE, &etr_events); | 582 | set_bit(ETR_EVENT_UPDATE, &etr_events); |
538 | schedule_work(&etr_work); | 583 | queue_work(time_sync_wq, &etr_work); |
539 | } | 584 | } |
540 | 585 | ||
541 | /* | 586 | /* |
@@ -642,14 +687,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
642 | } | 687 | } |
643 | 688 | ||
644 | struct clock_sync_data { | 689 | struct clock_sync_data { |
690 | atomic_t cpus; | ||
645 | int in_sync; | 691 | int in_sync; |
646 | unsigned long long fixup_cc; | 692 | unsigned long long fixup_cc; |
693 | int etr_port; | ||
694 | struct etr_aib *etr_aib; | ||
647 | }; | 695 | }; |
648 | 696 | ||
649 | static void clock_sync_cpu_start(void *dummy) | 697 | static void clock_sync_cpu(struct clock_sync_data *sync) |
650 | { | 698 | { |
651 | struct clock_sync_data *sync = dummy; | 699 | atomic_dec(&sync->cpus); |
652 | |||
653 | enable_sync_clock(); | 700 | enable_sync_clock(); |
654 | /* | 701 | /* |
655 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 702 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
@@ -675,39 +722,35 @@ static void clock_sync_cpu_start(void *dummy) | |||
675 | fixup_clock_comparator(sync->fixup_cc); | 722 | fixup_clock_comparator(sync->fixup_cc); |
676 | } | 723 | } |
677 | 724 | ||
678 | static void clock_sync_cpu_end(void *dummy) | ||
679 | { | ||
680 | } | ||
681 | |||
682 | /* | 725 | /* |
683 | * Sync the TOD clock using the port refered to by aibp. This port | 726 | * Sync the TOD clock using the port refered to by aibp. This port |
684 | * has to be enabled and the other port has to be disabled. The | 727 | * has to be enabled and the other port has to be disabled. The |
685 | * last eacr update has to be more than 1.6 seconds in the past. | 728 | * last eacr update has to be more than 1.6 seconds in the past. |
686 | */ | 729 | */ |
687 | static int etr_sync_clock(struct etr_aib *aib, int port) | 730 | static int etr_sync_clock(void *data) |
688 | { | 731 | { |
689 | struct etr_aib *sync_port; | 732 | static int first; |
690 | struct clock_sync_data etr_sync; | ||
691 | unsigned long long clock, old_clock, delay, delta; | 733 | unsigned long long clock, old_clock, delay, delta; |
692 | int follows; | 734 | struct clock_sync_data *etr_sync; |
735 | struct etr_aib *sync_port, *aib; | ||
736 | int port; | ||
693 | int rc; | 737 | int rc; |
694 | 738 | ||
695 | /* Check if the current aib is adjacent to the sync port aib. */ | 739 | etr_sync = data; |
696 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
697 | follows = etr_aib_follows(sync_port, aib, port); | ||
698 | memcpy(sync_port, aib, sizeof(*aib)); | ||
699 | if (!follows) | ||
700 | return -EAGAIN; | ||
701 | 740 | ||
702 | /* | 741 | if (xchg(&first, 1) == 1) { |
703 | * Catch all other cpus and make them wait until we have | 742 | /* Slave */ |
704 | * successfully synced the clock. smp_call_function will | 743 | clock_sync_cpu(etr_sync); |
705 | * return after all other cpus are in etr_sync_cpu_start. | 744 | return 0; |
706 | */ | 745 | } |
707 | memset(&etr_sync, 0, sizeof(etr_sync)); | 746 | |
708 | preempt_disable(); | 747 | /* Wait until all other cpus entered the sync function. */ |
709 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0); | 748 | while (atomic_read(&etr_sync->cpus) != 0) |
710 | local_irq_disable(); | 749 | cpu_relax(); |
750 | |||
751 | port = etr_sync->etr_port; | ||
752 | aib = etr_sync->etr_aib; | ||
753 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
711 | enable_sync_clock(); | 754 | enable_sync_clock(); |
712 | 755 | ||
713 | /* Set clock to next OTE. */ | 756 | /* Set clock to next OTE. */ |
@@ -724,16 +767,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
724 | delay = (unsigned long long) | 767 | delay = (unsigned long long) |
725 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 768 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
726 | delta = adjust_time(old_clock, clock, delay); | 769 | delta = adjust_time(old_clock, clock, delay); |
727 | etr_sync.fixup_cc = delta; | 770 | etr_sync->fixup_cc = delta; |
728 | fixup_clock_comparator(delta); | 771 | fixup_clock_comparator(delta); |
729 | /* Verify that the clock is properly set. */ | 772 | /* Verify that the clock is properly set. */ |
730 | if (!etr_aib_follows(sync_port, aib, port)) { | 773 | if (!etr_aib_follows(sync_port, aib, port)) { |
731 | /* Didn't work. */ | 774 | /* Didn't work. */ |
732 | disable_sync_clock(NULL); | 775 | disable_sync_clock(NULL); |
733 | etr_sync.in_sync = -EAGAIN; | 776 | etr_sync->in_sync = -EAGAIN; |
734 | rc = -EAGAIN; | 777 | rc = -EAGAIN; |
735 | } else { | 778 | } else { |
736 | etr_sync.in_sync = 1; | 779 | etr_sync->in_sync = 1; |
737 | rc = 0; | 780 | rc = 0; |
738 | } | 781 | } |
739 | } else { | 782 | } else { |
@@ -741,12 +784,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
741 | __ctl_clear_bit(0, 29); | 784 | __ctl_clear_bit(0, 29); |
742 | __ctl_clear_bit(14, 21); | 785 | __ctl_clear_bit(14, 21); |
743 | disable_sync_clock(NULL); | 786 | disable_sync_clock(NULL); |
744 | etr_sync.in_sync = -EAGAIN; | 787 | etr_sync->in_sync = -EAGAIN; |
745 | rc = -EAGAIN; | 788 | rc = -EAGAIN; |
746 | } | 789 | } |
747 | local_irq_enable(); | 790 | xchg(&first, 0); |
748 | smp_call_function(clock_sync_cpu_end, NULL, 0); | 791 | return rc; |
749 | preempt_enable(); | 792 | } |
793 | |||
794 | static int etr_sync_clock_stop(struct etr_aib *aib, int port) | ||
795 | { | ||
796 | struct clock_sync_data etr_sync; | ||
797 | struct etr_aib *sync_port; | ||
798 | int follows; | ||
799 | int rc; | ||
800 | |||
801 | /* Check if the current aib is adjacent to the sync port aib. */ | ||
802 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
803 | follows = etr_aib_follows(sync_port, aib, port); | ||
804 | memcpy(sync_port, aib, sizeof(*aib)); | ||
805 | if (!follows) | ||
806 | return -EAGAIN; | ||
807 | memset(&etr_sync, 0, sizeof(etr_sync)); | ||
808 | etr_sync.etr_aib = aib; | ||
809 | etr_sync.etr_port = port; | ||
810 | get_online_cpus(); | ||
811 | atomic_set(&etr_sync.cpus, num_online_cpus() - 1); | ||
812 | rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); | ||
813 | put_online_cpus(); | ||
750 | return rc; | 814 | return rc; |
751 | } | 815 | } |
752 | 816 | ||
@@ -903,7 +967,7 @@ static void etr_update_eacr(struct etr_eacr eacr) | |||
903 | } | 967 | } |
904 | 968 | ||
905 | /* | 969 | /* |
906 | * ETR tasklet. In this function you'll find the main logic. In | 970 | * ETR work. In this function you'll find the main logic. In |
907 | * particular this is the only function that calls etr_update_eacr(), | 971 | * particular this is the only function that calls etr_update_eacr(), |
908 | * it "controls" the etr control register. | 972 | * it "controls" the etr control register. |
909 | */ | 973 | */ |
@@ -914,6 +978,9 @@ static void etr_work_fn(struct work_struct *work) | |||
914 | struct etr_aib aib; | 978 | struct etr_aib aib; |
915 | int sync_port; | 979 | int sync_port; |
916 | 980 | ||
981 | /* prevent multiple execution. */ | ||
982 | mutex_lock(&etr_work_mutex); | ||
983 | |||
917 | /* Create working copy of etr_eacr. */ | 984 | /* Create working copy of etr_eacr. */ |
918 | eacr = etr_eacr; | 985 | eacr = etr_eacr; |
919 | 986 | ||
@@ -929,7 +996,7 @@ static void etr_work_fn(struct work_struct *work) | |||
929 | del_timer_sync(&etr_timer); | 996 | del_timer_sync(&etr_timer); |
930 | etr_update_eacr(eacr); | 997 | etr_update_eacr(eacr); |
931 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 998 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
932 | return; | 999 | goto out_unlock; |
933 | } | 1000 | } |
934 | 1001 | ||
935 | /* Store aib to get the current ETR status word. */ | 1002 | /* Store aib to get the current ETR status word. */ |
@@ -1016,7 +1083,7 @@ static void etr_work_fn(struct work_struct *work) | |||
1016 | eacr.es || sync_port < 0) { | 1083 | eacr.es || sync_port < 0) { |
1017 | etr_update_eacr(eacr); | 1084 | etr_update_eacr(eacr); |
1018 | etr_set_tolec_timeout(now); | 1085 | etr_set_tolec_timeout(now); |
1019 | return; | 1086 | goto out_unlock; |
1020 | } | 1087 | } |
1021 | 1088 | ||
1022 | /* | 1089 | /* |
@@ -1036,7 +1103,7 @@ static void etr_work_fn(struct work_struct *work) | |||
1036 | etr_update_eacr(eacr); | 1103 | etr_update_eacr(eacr); |
1037 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 1104 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
1038 | if (now < etr_tolec + (1600000 << 12) || | 1105 | if (now < etr_tolec + (1600000 << 12) || |
1039 | etr_sync_clock(&aib, sync_port) != 0) { | 1106 | etr_sync_clock_stop(&aib, sync_port) != 0) { |
1040 | /* Sync failed. Try again in 1/2 second. */ | 1107 | /* Sync failed. Try again in 1/2 second. */ |
1041 | eacr.es = 0; | 1108 | eacr.es = 0; |
1042 | etr_update_eacr(eacr); | 1109 | etr_update_eacr(eacr); |
@@ -1044,6 +1111,8 @@ static void etr_work_fn(struct work_struct *work) | |||
1044 | etr_set_sync_timeout(); | 1111 | etr_set_sync_timeout(); |
1045 | } else | 1112 | } else |
1046 | etr_set_tolec_timeout(now); | 1113 | etr_set_tolec_timeout(now); |
1114 | out_unlock: | ||
1115 | mutex_unlock(&etr_work_mutex); | ||
1047 | } | 1116 | } |
1048 | 1117 | ||
1049 | /* | 1118 | /* |
@@ -1125,13 +1194,13 @@ static ssize_t etr_online_store(struct sys_device *dev, | |||
1125 | return count; /* Nothing to do. */ | 1194 | return count; /* Nothing to do. */ |
1126 | etr_port0_online = value; | 1195 | etr_port0_online = value; |
1127 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 1196 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
1128 | schedule_work(&etr_work); | 1197 | queue_work(time_sync_wq, &etr_work); |
1129 | } else { | 1198 | } else { |
1130 | if (etr_port1_online == value) | 1199 | if (etr_port1_online == value) |
1131 | return count; /* Nothing to do. */ | 1200 | return count; /* Nothing to do. */ |
1132 | etr_port1_online = value; | 1201 | etr_port1_online = value; |
1133 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 1202 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
1134 | schedule_work(&etr_work); | 1203 | queue_work(time_sync_wq, &etr_work); |
1135 | } | 1204 | } |
1136 | return count; | 1205 | return count; |
1137 | } | 1206 | } |
@@ -1332,6 +1401,7 @@ static struct stp_sstpi stp_info; | |||
1332 | static void *stp_page; | 1401 | static void *stp_page; |
1333 | 1402 | ||
1334 | static void stp_work_fn(struct work_struct *work); | 1403 | static void stp_work_fn(struct work_struct *work); |
1404 | static DEFINE_MUTEX(stp_work_mutex); | ||
1335 | static DECLARE_WORK(stp_work, stp_work_fn); | 1405 | static DECLARE_WORK(stp_work, stp_work_fn); |
1336 | 1406 | ||
1337 | static int __init early_parse_stp(char *p) | 1407 | static int __init early_parse_stp(char *p) |
@@ -1356,7 +1426,8 @@ static void __init stp_reset(void) | |||
1356 | if (rc == 0) | 1426 | if (rc == 0) |
1357 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); | 1427 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); |
1358 | else if (stp_online) { | 1428 | else if (stp_online) { |
1359 | printk(KERN_WARNING "Running on non STP capable machine.\n"); | 1429 | pr_warning("The real or virtual hardware system does " |
1430 | "not provide an STP interface\n"); | ||
1360 | free_bootmem((unsigned long) stp_page, PAGE_SIZE); | 1431 | free_bootmem((unsigned long) stp_page, PAGE_SIZE); |
1361 | stp_page = NULL; | 1432 | stp_page = NULL; |
1362 | stp_online = 0; | 1433 | stp_online = 0; |
@@ -1365,8 +1436,12 @@ static void __init stp_reset(void) | |||
1365 | 1436 | ||
1366 | static int __init stp_init(void) | 1437 | static int __init stp_init(void) |
1367 | { | 1438 | { |
1368 | if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) | 1439 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
1369 | schedule_work(&stp_work); | 1440 | return 0; |
1441 | time_init_wq(); | ||
1442 | if (!stp_online) | ||
1443 | return 0; | ||
1444 | queue_work(time_sync_wq, &stp_work); | ||
1370 | return 0; | 1445 | return 0; |
1371 | } | 1446 | } |
1372 | 1447 | ||
@@ -1383,7 +1458,7 @@ arch_initcall(stp_init); | |||
1383 | static void stp_timing_alert(struct stp_irq_parm *intparm) | 1458 | static void stp_timing_alert(struct stp_irq_parm *intparm) |
1384 | { | 1459 | { |
1385 | if (intparm->tsc || intparm->lac || intparm->tcpc) | 1460 | if (intparm->tsc || intparm->lac || intparm->tcpc) |
1386 | schedule_work(&stp_work); | 1461 | queue_work(time_sync_wq, &stp_work); |
1387 | } | 1462 | } |
1388 | 1463 | ||
1389 | /* | 1464 | /* |
@@ -1397,7 +1472,7 @@ void stp_sync_check(void) | |||
1397 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | 1472 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) |
1398 | return; | 1473 | return; |
1399 | disable_sync_clock(NULL); | 1474 | disable_sync_clock(NULL); |
1400 | schedule_work(&stp_work); | 1475 | queue_work(time_sync_wq, &stp_work); |
1401 | } | 1476 | } |
1402 | 1477 | ||
1403 | /* | 1478 | /* |
@@ -1411,46 +1486,34 @@ void stp_island_check(void) | |||
1411 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | 1486 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) |
1412 | return; | 1487 | return; |
1413 | disable_sync_clock(NULL); | 1488 | disable_sync_clock(NULL); |
1414 | schedule_work(&stp_work); | 1489 | queue_work(time_sync_wq, &stp_work); |
1415 | } | 1490 | } |
1416 | 1491 | ||
1417 | /* | 1492 | |
1418 | * STP tasklet. Check for the STP state and take over the clock | 1493 | static int stp_sync_clock(void *data) |
1419 | * synchronization if the STP clock source is usable. | ||
1420 | */ | ||
1421 | static void stp_work_fn(struct work_struct *work) | ||
1422 | { | 1494 | { |
1423 | struct clock_sync_data stp_sync; | 1495 | static int first; |
1424 | unsigned long long old_clock, delta; | 1496 | unsigned long long old_clock, delta; |
1497 | struct clock_sync_data *stp_sync; | ||
1425 | int rc; | 1498 | int rc; |
1426 | 1499 | ||
1427 | if (!stp_online) { | 1500 | stp_sync = data; |
1428 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
1429 | return; | ||
1430 | } | ||
1431 | 1501 | ||
1432 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | 1502 | if (xchg(&first, 1) == 1) { |
1433 | if (rc) | 1503 | /* Slave */ |
1434 | return; | 1504 | clock_sync_cpu(stp_sync); |
1505 | return 0; | ||
1506 | } | ||
1435 | 1507 | ||
1436 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | 1508 | /* Wait until all other cpus entered the sync function. */ |
1437 | if (rc || stp_info.c == 0) | 1509 | while (atomic_read(&stp_sync->cpus) != 0) |
1438 | return; | 1510 | cpu_relax(); |
1439 | 1511 | ||
1440 | /* | ||
1441 | * Catch all other cpus and make them wait until we have | ||
1442 | * successfully synced the clock. smp_call_function will | ||
1443 | * return after all other cpus are in clock_sync_cpu_start. | ||
1444 | */ | ||
1445 | memset(&stp_sync, 0, sizeof(stp_sync)); | ||
1446 | preempt_disable(); | ||
1447 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0); | ||
1448 | local_irq_disable(); | ||
1449 | enable_sync_clock(); | 1512 | enable_sync_clock(); |
1450 | 1513 | ||
1451 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); | 1514 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); |
1452 | if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | 1515 | if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
1453 | schedule_work(&etr_work); | 1516 | queue_work(time_sync_wq, &etr_work); |
1454 | 1517 | ||
1455 | rc = 0; | 1518 | rc = 0; |
1456 | if (stp_info.todoff[0] || stp_info.todoff[1] || | 1519 | if (stp_info.todoff[0] || stp_info.todoff[1] || |
@@ -1469,16 +1532,49 @@ static void stp_work_fn(struct work_struct *work) | |||
1469 | } | 1532 | } |
1470 | if (rc) { | 1533 | if (rc) { |
1471 | disable_sync_clock(NULL); | 1534 | disable_sync_clock(NULL); |
1472 | stp_sync.in_sync = -EAGAIN; | 1535 | stp_sync->in_sync = -EAGAIN; |
1473 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); | 1536 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); |
1474 | if (etr_port0_online || etr_port1_online) | 1537 | if (etr_port0_online || etr_port1_online) |
1475 | schedule_work(&etr_work); | 1538 | queue_work(time_sync_wq, &etr_work); |
1476 | } else | 1539 | } else |
1477 | stp_sync.in_sync = 1; | 1540 | stp_sync->in_sync = 1; |
1541 | xchg(&first, 0); | ||
1542 | return 0; | ||
1543 | } | ||
1544 | |||
1545 | /* | ||
1546 | * STP work. Check for the STP state and take over the clock | ||
1547 | * synchronization if the STP clock source is usable. | ||
1548 | */ | ||
1549 | static void stp_work_fn(struct work_struct *work) | ||
1550 | { | ||
1551 | struct clock_sync_data stp_sync; | ||
1552 | int rc; | ||
1553 | |||
1554 | /* prevent multiple execution. */ | ||
1555 | mutex_lock(&stp_work_mutex); | ||
1556 | |||
1557 | if (!stp_online) { | ||
1558 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
1559 | goto out_unlock; | ||
1560 | } | ||
1561 | |||
1562 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | ||
1563 | if (rc) | ||
1564 | goto out_unlock; | ||
1565 | |||
1566 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | ||
1567 | if (rc || stp_info.c == 0) | ||
1568 | goto out_unlock; | ||
1569 | |||
1570 | memset(&stp_sync, 0, sizeof(stp_sync)); | ||
1571 | get_online_cpus(); | ||
1572 | atomic_set(&stp_sync.cpus, num_online_cpus() - 1); | ||
1573 | stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); | ||
1574 | put_online_cpus(); | ||
1478 | 1575 | ||
1479 | local_irq_enable(); | 1576 | out_unlock: |
1480 | smp_call_function(clock_sync_cpu_end, NULL, 0); | 1577 | mutex_unlock(&stp_work_mutex); |
1481 | preempt_enable(); | ||
1482 | } | 1578 | } |
1483 | 1579 | ||
1484 | /* | 1580 | /* |
@@ -1587,7 +1683,7 @@ static ssize_t stp_online_store(struct sysdev_class *class, | |||
1587 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | 1683 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
1588 | return -EOPNOTSUPP; | 1684 | return -EOPNOTSUPP; |
1589 | stp_online = value; | 1685 | stp_online = value; |
1590 | schedule_work(&stp_work); | 1686 | queue_work(time_sync_wq, &stp_work); |
1591 | return count; | 1687 | return count; |
1592 | } | 1688 | } |
1593 | 1689 | ||
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bf96f1b5c6ec..90e9ba11eba1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define KMSG_COMPONENT "cpu" | ||
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
8 | |||
6 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
7 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
8 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -12,6 +15,7 @@ | |||
12 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
13 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
14 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | ||
15 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
16 | #include <asm/s390_ext.h> | 20 | #include <asm/s390_ext.h> |
17 | #include <asm/sysinfo.h> | 21 | #include <asm/sysinfo.h> |
@@ -57,11 +61,11 @@ struct core_info { | |||
57 | cpumask_t mask; | 61 | cpumask_t mask; |
58 | }; | 62 | }; |
59 | 63 | ||
64 | static int topology_enabled; | ||
60 | static void topology_work_fn(struct work_struct *work); | 65 | static void topology_work_fn(struct work_struct *work); |
61 | static struct tl_info *tl_info; | 66 | static struct tl_info *tl_info; |
62 | static struct core_info core_info; | 67 | static struct core_info core_info; |
63 | static int machine_has_topology; | 68 | static int machine_has_topology; |
64 | static int machine_has_topology_irq; | ||
65 | static struct timer_list topology_timer; | 69 | static struct timer_list topology_timer; |
66 | static void set_topology_timer(void); | 70 | static void set_topology_timer(void); |
67 | static DECLARE_WORK(topology_work, topology_work_fn); | 71 | static DECLARE_WORK(topology_work, topology_work_fn); |
@@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) | |||
77 | cpumask_t mask; | 81 | cpumask_t mask; |
78 | 82 | ||
79 | cpus_clear(mask); | 83 | cpus_clear(mask); |
80 | if (!machine_has_topology) | 84 | if (!topology_enabled || !machine_has_topology) |
81 | return cpu_present_map; | 85 | return cpu_possible_map; |
82 | spin_lock_irqsave(&topology_lock, flags); | 86 | spin_lock_irqsave(&topology_lock, flags); |
83 | while (core) { | 87 | while (core) { |
84 | if (cpu_isset(cpu, core->mask)) { | 88 | if (cpu_isset(cpu, core->mask)) { |
@@ -168,7 +172,7 @@ static void topology_update_polarization_simple(void) | |||
168 | int cpu; | 172 | int cpu; |
169 | 173 | ||
170 | mutex_lock(&smp_cpu_state_mutex); | 174 | mutex_lock(&smp_cpu_state_mutex); |
171 | for_each_present_cpu(cpu) | 175 | for_each_possible_cpu(cpu) |
172 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | 176 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; |
173 | mutex_unlock(&smp_cpu_state_mutex); | 177 | mutex_unlock(&smp_cpu_state_mutex); |
174 | } | 178 | } |
@@ -199,7 +203,7 @@ int topology_set_cpu_management(int fc) | |||
199 | rc = ptf(PTF_HORIZONTAL); | 203 | rc = ptf(PTF_HORIZONTAL); |
200 | if (rc) | 204 | if (rc) |
201 | return -EBUSY; | 205 | return -EBUSY; |
202 | for_each_present_cpu(cpu) | 206 | for_each_possible_cpu(cpu) |
203 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 207 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
204 | return rc; | 208 | return rc; |
205 | } | 209 | } |
@@ -208,7 +212,7 @@ static void update_cpu_core_map(void) | |||
208 | { | 212 | { |
209 | int cpu; | 213 | int cpu; |
210 | 214 | ||
211 | for_each_present_cpu(cpu) | 215 | for_each_possible_cpu(cpu) |
212 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 216 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); |
213 | } | 217 | } |
214 | 218 | ||
@@ -235,7 +239,7 @@ int arch_update_cpu_topology(void) | |||
235 | 239 | ||
236 | static void topology_work_fn(struct work_struct *work) | 240 | static void topology_work_fn(struct work_struct *work) |
237 | { | 241 | { |
238 | arch_reinit_sched_domains(); | 242 | rebuild_sched_domains(); |
239 | } | 243 | } |
240 | 244 | ||
241 | void topology_schedule_update(void) | 245 | void topology_schedule_update(void) |
@@ -258,10 +262,14 @@ static void set_topology_timer(void) | |||
258 | add_timer(&topology_timer); | 262 | add_timer(&topology_timer); |
259 | } | 263 | } |
260 | 264 | ||
261 | static void topology_interrupt(__u16 code) | 265 | static int __init early_parse_topology(char *p) |
262 | { | 266 | { |
263 | schedule_work(&topology_work); | 267 | if (strncmp(p, "on", 2)) |
268 | return 0; | ||
269 | topology_enabled = 1; | ||
270 | return 0; | ||
264 | } | 271 | } |
272 | early_param("topology", early_parse_topology); | ||
265 | 273 | ||
266 | static int __init init_topology_update(void) | 274 | static int __init init_topology_update(void) |
267 | { | 275 | { |
@@ -273,14 +281,7 @@ static int __init init_topology_update(void) | |||
273 | goto out; | 281 | goto out; |
274 | } | 282 | } |
275 | init_timer_deferrable(&topology_timer); | 283 | init_timer_deferrable(&topology_timer); |
276 | if (machine_has_topology_irq) { | 284 | set_topology_timer(); |
277 | rc = register_external_interrupt(0x2005, topology_interrupt); | ||
278 | if (rc) | ||
279 | goto out; | ||
280 | ctl_set_bit(0, 8); | ||
281 | } | ||
282 | else | ||
283 | set_topology_timer(); | ||
284 | out: | 285 | out: |
285 | update_cpu_core_map(); | 286 | update_cpu_core_map(); |
286 | return rc; | 287 | return rc; |
@@ -301,9 +302,6 @@ void __init s390_init_cpu_topology(void) | |||
301 | return; | 302 | return; |
302 | machine_has_topology = 1; | 303 | machine_has_topology = 1; |
303 | 304 | ||
304 | if (facility_bits & (1ULL << 51)) | ||
305 | machine_has_topology_irq = 1; | ||
306 | |||
307 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 305 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
308 | info = tl_info; | 306 | info = tl_info; |
309 | stsi(info, 15, 1, 2); | 307 | stsi(info, 15, 1, 2); |
@@ -312,7 +310,7 @@ void __init s390_init_cpu_topology(void) | |||
312 | for (i = 0; i < info->mnest - 2; i++) | 310 | for (i = 0; i < info->mnest - 2; i++) |
313 | nr_cores *= info->mag[NR_MAG - 3 - i]; | 311 | nr_cores *= info->mag[NR_MAG - 3 - i]; |
314 | 312 | ||
315 | printk(KERN_INFO "CPU topology:"); | 313 | pr_info("The CPU configuration topology of the machine is:"); |
316 | for (i = 0; i < NR_MAG; i++) | 314 | for (i = 0; i < NR_MAG; i++) |
317 | printk(" %d", info->mag[i]); | 315 | printk(" %d", info->mag[i]); |
318 | printk(" / %d\n", info->mnest); | 316 | printk(" / %d\n", info->mnest); |
@@ -327,5 +325,4 @@ void __init s390_init_cpu_topology(void) | |||
327 | return; | 325 | return; |
328 | error: | 326 | error: |
329 | machine_has_topology = 0; | 327 | machine_has_topology = 0; |
330 | machine_has_topology_irq = 0; | ||
331 | } | 328 | } |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c new file mode 100644 index 000000000000..10a6ccef4412 --- /dev/null +++ b/arch/s390/kernel/vdso.c | |||
@@ -0,0 +1,234 @@ | |||
1 | /* | ||
2 | * vdso setup for s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License (version 2 only) | ||
9 | * as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/unistd.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/user.h> | ||
22 | #include <linux/elf.h> | ||
23 | #include <linux/security.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | |||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/mmu.h> | ||
30 | #include <asm/mmu_context.h> | ||
31 | #include <asm/sections.h> | ||
32 | #include <asm/vdso.h> | ||
33 | |||
34 | /* Max supported size for symbol names */ | ||
35 | #define MAX_SYMNAME 64 | ||
36 | |||
37 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | ||
38 | extern char vdso32_start, vdso32_end; | ||
39 | static void *vdso32_kbase = &vdso32_start; | ||
40 | static unsigned int vdso32_pages; | ||
41 | static struct page **vdso32_pagelist; | ||
42 | #endif | ||
43 | |||
44 | #ifdef CONFIG_64BIT | ||
45 | extern char vdso64_start, vdso64_end; | ||
46 | static void *vdso64_kbase = &vdso64_start; | ||
47 | static unsigned int vdso64_pages; | ||
48 | static struct page **vdso64_pagelist; | ||
49 | #endif /* CONFIG_64BIT */ | ||
50 | |||
51 | /* | ||
52 | * Should the kernel map a VDSO page into processes and pass its | ||
53 | * address down to glibc upon exec()? | ||
54 | */ | ||
55 | unsigned int __read_mostly vdso_enabled = 1; | ||
56 | |||
57 | static int __init vdso_setup(char *s) | ||
58 | { | ||
59 | vdso_enabled = simple_strtoul(s, NULL, 0); | ||
60 | return 1; | ||
61 | } | ||
62 | __setup("vdso=", vdso_setup); | ||
63 | |||
64 | /* | ||
65 | * The vdso data page | ||
66 | */ | ||
67 | static union { | ||
68 | struct vdso_data data; | ||
69 | u8 page[PAGE_SIZE]; | ||
70 | } vdso_data_store __attribute__((__section__(".data.page_aligned"))); | ||
71 | struct vdso_data *vdso_data = &vdso_data_store.data; | ||
72 | |||
73 | /* | ||
74 | * This is called from binfmt_elf, we create the special vma for the | ||
75 | * vDSO and insert it into the mm struct tree | ||
76 | */ | ||
77 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
78 | { | ||
79 | struct mm_struct *mm = current->mm; | ||
80 | struct page **vdso_pagelist; | ||
81 | unsigned long vdso_pages; | ||
82 | unsigned long vdso_base; | ||
83 | int rc; | ||
84 | |||
85 | if (!vdso_enabled) | ||
86 | return 0; | ||
87 | /* | ||
88 | * Only map the vdso for dynamically linked elf binaries. | ||
89 | */ | ||
90 | if (!uses_interp) | ||
91 | return 0; | ||
92 | |||
93 | vdso_base = mm->mmap_base; | ||
94 | #ifdef CONFIG_64BIT | ||
95 | vdso_pagelist = vdso64_pagelist; | ||
96 | vdso_pages = vdso64_pages; | ||
97 | #ifdef CONFIG_COMPAT | ||
98 | if (test_thread_flag(TIF_31BIT)) { | ||
99 | vdso_pagelist = vdso32_pagelist; | ||
100 | vdso_pages = vdso32_pages; | ||
101 | } | ||
102 | #endif | ||
103 | #else | ||
104 | vdso_pagelist = vdso32_pagelist; | ||
105 | vdso_pages = vdso32_pages; | ||
106 | #endif | ||
107 | |||
108 | /* | ||
109 | * vDSO has a problem and was disabled, just don't "enable" it for | ||
110 | * the process | ||
111 | */ | ||
112 | if (vdso_pages == 0) | ||
113 | return 0; | ||
114 | |||
115 | current->mm->context.vdso_base = 0; | ||
116 | |||
117 | /* | ||
118 | * pick a base address for the vDSO in process space. We try to put | ||
119 | * it at vdso_base which is the "natural" base for it, but we might | ||
120 | * fail and end up putting it elsewhere. | ||
121 | */ | ||
122 | down_write(&mm->mmap_sem); | ||
123 | vdso_base = get_unmapped_area(NULL, vdso_base, | ||
124 | vdso_pages << PAGE_SHIFT, 0, 0); | ||
125 | if (IS_ERR_VALUE(vdso_base)) { | ||
126 | rc = vdso_base; | ||
127 | goto out_up; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * our vma flags don't have VM_WRITE so by default, the process | ||
132 | * isn't allowed to write those pages. | ||
133 | * gdb can break that with ptrace interface, and thus trigger COW | ||
134 | * on those pages but it's then your responsibility to never do that | ||
135 | * on the "data" page of the vDSO or you'll stop getting kernel | ||
136 | * updates and your nice userland gettimeofday will be totally dead. | ||
137 | * It's fine to use that for setting breakpoints in the vDSO code | ||
138 | * pages though | ||
139 | * | ||
140 | * Make sure the vDSO gets into every core dump. | ||
141 | * Dumping its contents makes post-mortem fully interpretable later | ||
142 | * without matching up the same kernel and hardware config to see | ||
143 | * what PC values meant. | ||
144 | */ | ||
145 | rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, | ||
146 | VM_READ|VM_EXEC| | ||
147 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
148 | VM_ALWAYSDUMP, | ||
149 | vdso_pagelist); | ||
150 | if (rc) | ||
151 | goto out_up; | ||
152 | |||
153 | /* Put vDSO base into mm struct */ | ||
154 | current->mm->context.vdso_base = vdso_base; | ||
155 | |||
156 | up_write(&mm->mmap_sem); | ||
157 | return 0; | ||
158 | |||
159 | out_up: | ||
160 | up_write(&mm->mmap_sem); | ||
161 | return rc; | ||
162 | } | ||
163 | |||
164 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
165 | { | ||
166 | if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) | ||
167 | return "[vdso]"; | ||
168 | return NULL; | ||
169 | } | ||
170 | |||
171 | static int __init vdso_init(void) | ||
172 | { | ||
173 | int i; | ||
174 | |||
175 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | ||
176 | /* Calculate the size of the 32 bit vDSO */ | ||
177 | vdso32_pages = ((&vdso32_end - &vdso32_start | ||
178 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | ||
179 | |||
180 | /* Make sure pages are in the correct state */ | ||
181 | vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), | ||
182 | GFP_KERNEL); | ||
183 | BUG_ON(vdso32_pagelist == NULL); | ||
184 | for (i = 0; i < vdso32_pages - 1; i++) { | ||
185 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); | ||
186 | ClearPageReserved(pg); | ||
187 | get_page(pg); | ||
188 | vdso32_pagelist[i] = pg; | ||
189 | } | ||
190 | vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); | ||
191 | vdso32_pagelist[vdso32_pages] = NULL; | ||
192 | #endif | ||
193 | |||
194 | #ifdef CONFIG_64BIT | ||
195 | /* Calculate the size of the 64 bit vDSO */ | ||
196 | vdso64_pages = ((&vdso64_end - &vdso64_start | ||
197 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | ||
198 | |||
199 | /* Make sure pages are in the correct state */ | ||
200 | vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), | ||
201 | GFP_KERNEL); | ||
202 | BUG_ON(vdso64_pagelist == NULL); | ||
203 | for (i = 0; i < vdso64_pages - 1; i++) { | ||
204 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); | ||
205 | ClearPageReserved(pg); | ||
206 | get_page(pg); | ||
207 | vdso64_pagelist[i] = pg; | ||
208 | } | ||
209 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); | ||
210 | vdso64_pagelist[vdso64_pages] = NULL; | ||
211 | #endif /* CONFIG_64BIT */ | ||
212 | |||
213 | get_page(virt_to_page(vdso_data)); | ||
214 | |||
215 | smp_wmb(); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | arch_initcall(vdso_init); | ||
220 | |||
221 | int in_gate_area_no_task(unsigned long addr) | ||
222 | { | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | int in_gate_area(struct task_struct *task, unsigned long addr) | ||
227 | { | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | ||
232 | { | ||
233 | return NULL; | ||
234 | } | ||
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile new file mode 100644 index 000000000000..ca78ad60ba24 --- /dev/null +++ b/arch/s390/kernel/vdso32/Makefile | |||
@@ -0,0 +1,55 @@ | |||
1 | # List of files in the vdso, has to be asm only for now | ||
2 | |||
3 | obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o | ||
4 | |||
5 | # Build rules | ||
6 | |||
7 | targets := $(obj-vdso32) vdso32.so vdso32.so.dbg | ||
8 | obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) | ||
9 | |||
10 | KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS)) | ||
11 | KBUILD_AFLAGS_31 += -m31 -s | ||
12 | |||
13 | KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) | ||
14 | KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin | ||
15 | KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ | ||
16 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
17 | |||
18 | $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) | ||
19 | $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) | ||
20 | |||
21 | obj-y += vdso32_wrapper.o | ||
22 | extra-y += vdso32.lds | ||
23 | CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) | ||
24 | |||
25 | # Force dependency (incbin is bad) | ||
26 | $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so | ||
27 | |||
28 | # link rule for the .so file, .lds has to be first | ||
29 | $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) | ||
30 | $(call if_changed,vdso32ld) | ||
31 | |||
32 | # strip rule for the .so file | ||
33 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
34 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
35 | $(call if_changed,objcopy) | ||
36 | |||
37 | # assembly rules for the .S files | ||
38 | $(obj-vdso32): %.o: %.S | ||
39 | $(call if_changed_dep,vdso32as) | ||
40 | |||
41 | # actual build commands | ||
42 | quiet_cmd_vdso32ld = VDSO32L $@ | ||
43 | cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | ||
44 | quiet_cmd_vdso32as = VDSO32A $@ | ||
45 | cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< | ||
46 | |||
47 | # install commands for the unstripped file | ||
48 | quiet_cmd_vdso_install = INSTALL $@ | ||
49 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
50 | |||
51 | vdso32.so: $(obj)/vdso32.so.dbg | ||
52 | @mkdir -p $(MODLIB)/vdso | ||
53 | $(call cmd,vdso_install) | ||
54 | |||
55 | vdso_install: vdso32.so | ||
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S new file mode 100644 index 000000000000..9532c4e6a9d2 --- /dev/null +++ b/arch/s390/kernel/vdso32/clock_getres.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_getres() for 32 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_getres | ||
19 | .type __kernel_clock_getres,@function | ||
20 | __kernel_clock_getres: | ||
21 | .cfi_startproc | ||
22 | chi %r2,CLOCK_REALTIME | ||
23 | je 0f | ||
24 | chi %r2,CLOCK_MONOTONIC | ||
25 | jne 3f | ||
26 | 0: ltr %r3,%r3 | ||
27 | jz 2f /* res == NULL */ | ||
28 | basr %r1,0 | ||
29 | 1: l %r0,4f-1b(%r1) | ||
30 | xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ | ||
31 | st %r0,4(%r3) /* store tp->tv_usec */ | ||
32 | 2: lhi %r2,0 | ||
33 | br %r14 | ||
34 | 3: lhi %r1,__NR_clock_getres /* fallback to svc */ | ||
35 | svc 0 | ||
36 | br %r14 | ||
37 | 4: .long CLOCK_REALTIME_RES | ||
38 | .cfi_endproc | ||
39 | .size __kernel_clock_getres,.-__kernel_clock_getres | ||
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S new file mode 100644 index 000000000000..4a98909a8310 --- /dev/null +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_gettime() for 32 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_gettime | ||
19 | .type __kernel_clock_gettime,@function | ||
20 | __kernel_clock_gettime: | ||
21 | .cfi_startproc | ||
22 | basr %r5,0 | ||
23 | 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ | ||
24 | chi %r2,CLOCK_REALTIME | ||
25 | je 10f | ||
26 | chi %r2,CLOCK_MONOTONIC | ||
27 | jne 19f | ||
28 | |||
29 | /* CLOCK_MONOTONIC */ | ||
30 | ltr %r3,%r3 | ||
31 | jz 9f /* tp == NULL */ | ||
32 | 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | ||
33 | tml %r4,0x0001 /* pending update ? loop */ | ||
34 | jnz 1b | ||
35 | stck 24(%r15) /* Store TOD clock */ | ||
36 | lm %r0,%r1,24(%r15) | ||
37 | s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
38 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | ||
39 | brc 3,2f | ||
40 | ahi %r0,-1 | ||
41 | 2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | ||
42 | lr %r2,%r0 | ||
43 | lhi %r0,1000 | ||
44 | ltr %r1,%r1 | ||
45 | mr %r0,%r0 | ||
46 | jnm 3f | ||
47 | ahi %r0,1000 | ||
48 | 3: alr %r0,%r2 | ||
49 | srdl %r0,12 | ||
50 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
51 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
52 | brc 12,4f | ||
53 | ahi %r0,1 | ||
54 | 4: l %r2,__VDSO_XTIME_SEC+4(%r5) | ||
55 | al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ | ||
56 | al %r1,__VDSO_WTOM_NSEC+4(%r5) | ||
57 | brc 12,5f | ||
58 | ahi %r0,1 | ||
59 | 5: al %r2,__VDSO_WTOM_SEC+4(%r5) | ||
60 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | ||
61 | jne 1b | ||
62 | basr %r5,0 | ||
63 | 6: ltr %r0,%r0 | ||
64 | jnz 7f | ||
65 | cl %r1,20f-6b(%r5) | ||
66 | jl 8f | ||
67 | 7: ahi %r2,1 | ||
68 | sl %r1,20f-6b(%r5) | ||
69 | brc 3,6b | ||
70 | ahi %r0,-1 | ||
71 | j 6b | ||
72 | 8: st %r2,0(%r3) /* store tp->tv_sec */ | ||
73 | st %r1,4(%r3) /* store tp->tv_nsec */ | ||
74 | 9: lhi %r2,0 | ||
75 | br %r14 | ||
76 | |||
77 | /* CLOCK_REALTIME */ | ||
78 | 10: ltr %r3,%r3 /* tp == NULL */ | ||
79 | jz 18f | ||
80 | 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | ||
81 | tml %r4,0x0001 /* pending update ? loop */ | ||
82 | jnz 11b | ||
83 | stck 24(%r15) /* Store TOD clock */ | ||
84 | lm %r0,%r1,24(%r15) | ||
85 | s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
86 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | ||
87 | brc 3,12f | ||
88 | ahi %r0,-1 | ||
89 | 12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | ||
90 | lr %r2,%r0 | ||
91 | lhi %r0,1000 | ||
92 | ltr %r1,%r1 | ||
93 | mr %r0,%r0 | ||
94 | jnm 13f | ||
95 | ahi %r0,1000 | ||
96 | 13: alr %r0,%r2 | ||
97 | srdl %r0,12 | ||
98 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
99 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
100 | brc 12,14f | ||
101 | ahi %r0,1 | ||
102 | 14: l %r2,__VDSO_XTIME_SEC+4(%r5) | ||
103 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | ||
104 | jne 11b | ||
105 | basr %r5,0 | ||
106 | 15: ltr %r0,%r0 | ||
107 | jnz 16f | ||
108 | cl %r1,20f-15b(%r5) | ||
109 | jl 17f | ||
110 | 16: ahi %r2,1 | ||
111 | sl %r1,20f-15b(%r5) | ||
112 | brc 3,15b | ||
113 | ahi %r0,-1 | ||
114 | j 15b | ||
115 | 17: st %r2,0(%r3) /* store tp->tv_sec */ | ||
116 | st %r1,4(%r3) /* store tp->tv_nsec */ | ||
117 | 18: lhi %r2,0 | ||
118 | br %r14 | ||
119 | |||
120 | /* Fallback to system call */ | ||
121 | 19: lhi %r1,__NR_clock_gettime | ||
122 | svc 0 | ||
123 | br %r14 | ||
124 | |||
125 | 20: .long 1000000000 | ||
126 | 21: .long _vdso_data - 0b | ||
127 | .cfi_endproc | ||
128 | .size __kernel_clock_gettime,.-__kernel_clock_gettime | ||
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S new file mode 100644 index 000000000000..c32f29c3d70c --- /dev/null +++ b/arch/s390/kernel/vdso32/gettimeofday.S | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Userland implementation of gettimeofday() for 32 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | #include <asm/vdso.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/unistd.h> | ||
19 | |||
20 | .text | ||
21 | .align 4 | ||
22 | .globl __kernel_gettimeofday | ||
23 | .type __kernel_gettimeofday,@function | ||
24 | __kernel_gettimeofday: | ||
25 | .cfi_startproc | ||
26 | basr %r5,0 | ||
27 | 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ | ||
28 | 1: ltr %r3,%r3 /* check if tz is NULL */ | ||
29 | je 2f | ||
30 | mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) | ||
31 | 2: ltr %r2,%r2 /* check if tv is NULL */ | ||
32 | je 10f | ||
33 | l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | ||
34 | tml %r4,0x0001 /* pending update ? loop */ | ||
35 | jnz 1b | ||
36 | stck 24(%r15) /* Store TOD clock */ | ||
37 | lm %r0,%r1,24(%r15) | ||
38 | s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
39 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | ||
40 | brc 3,3f | ||
41 | ahi %r0,-1 | ||
42 | 3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | ||
43 | st %r0,24(%r15) | ||
44 | lhi %r0,1000 | ||
45 | ltr %r1,%r1 | ||
46 | mr %r0,%r0 | ||
47 | jnm 4f | ||
48 | ahi %r0,1000 | ||
49 | 4: al %r0,24(%r15) | ||
50 | srdl %r0,12 | ||
51 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
52 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
53 | brc 12,5f | ||
54 | ahi %r0,1 | ||
55 | 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) | ||
56 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | ||
57 | jne 1b | ||
58 | l %r4,24(%r15) /* get tv_sec from stack */ | ||
59 | basr %r5,0 | ||
60 | 6: ltr %r0,%r0 | ||
61 | jnz 7f | ||
62 | cl %r1,11f-6b(%r5) | ||
63 | jl 8f | ||
64 | 7: ahi %r4,1 | ||
65 | sl %r1,11f-6b(%r5) | ||
66 | brc 3,6b | ||
67 | ahi %r0,-1 | ||
68 | j 6b | ||
69 | 8: st %r4,0(%r2) /* store tv->tv_sec */ | ||
70 | ltr %r1,%r1 | ||
71 | m %r0,12f-6b(%r5) | ||
72 | jnm 9f | ||
73 | al %r0,12f-6b(%r5) | ||
74 | 9: srl %r0,6 | ||
75 | st %r0,4(%r2) /* store tv->tv_usec */ | ||
76 | 10: slr %r2,%r2 | ||
77 | br %r14 | ||
78 | 11: .long 1000000000 | ||
79 | 12: .long 274877907 | ||
80 | 13: .long _vdso_data - 0b | ||
81 | .cfi_endproc | ||
82 | .size __kernel_gettimeofday,.-__kernel_gettimeofday | ||
diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso32/note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/s390/kernel/vdso32/note.S | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/elfnote.h> | ||
9 | |||
10 | ELFNOTE_START(Linux, 0, "a") | ||
11 | .long LINUX_VERSION_CODE | ||
12 | ELFNOTE_END | ||
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S new file mode 100644 index 000000000000..a8c379fa1247 --- /dev/null +++ b/arch/s390/kernel/vdso32/vdso32.lds.S | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * This is the infamous ld script for the 32 bits vdso | ||
3 | * library | ||
4 | */ | ||
5 | #include <asm/vdso.h> | ||
6 | |||
7 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
8 | OUTPUT_ARCH(s390:31-bit) | ||
9 | ENTRY(_start) | ||
10 | |||
11 | SECTIONS | ||
12 | { | ||
13 | . = VDSO32_LBASE + SIZEOF_HEADERS; | ||
14 | |||
15 | .hash : { *(.hash) } :text | ||
16 | .gnu.hash : { *(.gnu.hash) } | ||
17 | .dynsym : { *(.dynsym) } | ||
18 | .dynstr : { *(.dynstr) } | ||
19 | .gnu.version : { *(.gnu.version) } | ||
20 | .gnu.version_d : { *(.gnu.version_d) } | ||
21 | .gnu.version_r : { *(.gnu.version_r) } | ||
22 | |||
23 | .note : { *(.note.*) } :text :note | ||
24 | |||
25 | . = ALIGN(16); | ||
26 | .text : { | ||
27 | *(.text .stub .text.* .gnu.linkonce.t.*) | ||
28 | } :text | ||
29 | PROVIDE(__etext = .); | ||
30 | PROVIDE(_etext = .); | ||
31 | PROVIDE(etext = .); | ||
32 | |||
33 | /* | ||
34 | * Other stuff is appended to the text segment: | ||
35 | */ | ||
36 | .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||
37 | .rodata1 : { *(.rodata1) } | ||
38 | |||
39 | .dynamic : { *(.dynamic) } :text :dynamic | ||
40 | |||
41 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
42 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
43 | .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } | ||
44 | |||
45 | .rela.dyn ALIGN(8) : { *(.rela.dyn) } | ||
46 | .got ALIGN(8) : { *(.got .toc) } | ||
47 | |||
48 | _end = .; | ||
49 | PROVIDE(end = .); | ||
50 | |||
51 | /* | ||
52 | * Stabs debugging sections are here too. | ||
53 | */ | ||
54 | .stab 0 : { *(.stab) } | ||
55 | .stabstr 0 : { *(.stabstr) } | ||
56 | .stab.excl 0 : { *(.stab.excl) } | ||
57 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
58 | .stab.index 0 : { *(.stab.index) } | ||
59 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
60 | .comment 0 : { *(.comment) } | ||
61 | |||
62 | /* | ||
63 | * DWARF debug sections. | ||
64 | * Symbols in the DWARF debugging sections are relative to the | ||
65 | * beginning of the section so we begin them at 0. | ||
66 | */ | ||
67 | /* DWARF 1 */ | ||
68 | .debug 0 : { *(.debug) } | ||
69 | .line 0 : { *(.line) } | ||
70 | /* GNU DWARF 1 extensions */ | ||
71 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
72 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
73 | /* DWARF 1.1 and DWARF 2 */ | ||
74 | .debug_aranges 0 : { *(.debug_aranges) } | ||
75 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
76 | /* DWARF 2 */ | ||
77 | .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||
78 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
79 | .debug_line 0 : { *(.debug_line) } | ||
80 | .debug_frame 0 : { *(.debug_frame) } | ||
81 | .debug_str 0 : { *(.debug_str) } | ||
82 | .debug_loc 0 : { *(.debug_loc) } | ||
83 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
84 | /* SGI/MIPS DWARF 2 extensions */ | ||
85 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
86 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
87 | .debug_typenames 0 : { *(.debug_typenames) } | ||
88 | .debug_varnames 0 : { *(.debug_varnames) } | ||
89 | /* DWARF 3 */ | ||
90 | .debug_pubtypes 0 : { *(.debug_pubtypes) } | ||
91 | .debug_ranges 0 : { *(.debug_ranges) } | ||
92 | .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } | ||
93 | |||
94 | . = ALIGN(4096); | ||
95 | PROVIDE(_vdso_data = .); | ||
96 | |||
97 | /DISCARD/ : { | ||
98 | *(.note.GNU-stack) | ||
99 | *(.branch_lt) | ||
100 | *(.data .data.* .gnu.linkonce.d.* .sdata*) | ||
101 | *(.bss .sbss .dynbss .dynsbss) | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Very old versions of ld do not recognize this name token; use the constant. | ||
107 | */ | ||
108 | #define PT_GNU_EH_FRAME 0x6474e550 | ||
109 | |||
110 | /* | ||
111 | * We must supply the ELF program headers explicitly to get just one | ||
112 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
113 | */ | ||
114 | PHDRS | ||
115 | { | ||
116 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | ||
117 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
118 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
119 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * This controls what symbols we export from the DSO. | ||
124 | */ | ||
125 | VERSION | ||
126 | { | ||
127 | VDSO_VERSION_STRING { | ||
128 | global: | ||
129 | /* | ||
130 | * Has to be there for the kernel to find | ||
131 | */ | ||
132 | __kernel_gettimeofday; | ||
133 | __kernel_clock_gettime; | ||
134 | __kernel_clock_getres; | ||
135 | |||
136 | local: *; | ||
137 | }; | ||
138 | } | ||
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S new file mode 100644 index 000000000000..61639a89e70b --- /dev/null +++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/page.h> | ||
3 | |||
4 | .section ".data.page_aligned" | ||
5 | |||
6 | .globl vdso32_start, vdso32_end | ||
7 | .balign PAGE_SIZE | ||
8 | vdso32_start: | ||
9 | .incbin "arch/s390/kernel/vdso32/vdso32.so" | ||
10 | .balign PAGE_SIZE | ||
11 | vdso32_end: | ||
12 | |||
13 | .previous | ||
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile new file mode 100644 index 000000000000..6fc8e829258c --- /dev/null +++ b/arch/s390/kernel/vdso64/Makefile | |||
@@ -0,0 +1,55 @@ | |||
1 | # List of files in the vdso, has to be asm only for now | ||
2 | |||
3 | obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o | ||
4 | |||
5 | # Build rules | ||
6 | |||
7 | targets := $(obj-vdso64) vdso64.so vdso64.so.dbg | ||
8 | obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | ||
9 | |||
10 | KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) | ||
11 | KBUILD_AFLAGS_64 += -m64 -s | ||
12 | |||
13 | KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) | ||
14 | KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin | ||
15 | KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ | ||
16 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
17 | |||
18 | $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) | ||
19 | $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) | ||
20 | |||
21 | obj-y += vdso64_wrapper.o | ||
22 | extra-y += vdso64.lds | ||
23 | CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) | ||
24 | |||
25 | # Force dependency (incbin is bad) | ||
26 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so | ||
27 | |||
28 | # link rule for the .so file, .lds has to be first | ||
29 | $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) | ||
30 | $(call if_changed,vdso64ld) | ||
31 | |||
32 | # strip rule for the .so file | ||
33 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
34 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
35 | $(call if_changed,objcopy) | ||
36 | |||
37 | # assembly rules for the .S files | ||
38 | $(obj-vdso64): %.o: %.S | ||
39 | $(call if_changed_dep,vdso64as) | ||
40 | |||
41 | # actual build commands | ||
42 | quiet_cmd_vdso64ld = VDSO64L $@ | ||
43 | cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | ||
44 | quiet_cmd_vdso64as = VDSO64A $@ | ||
45 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< | ||
46 | |||
47 | # install commands for the unstripped file | ||
48 | quiet_cmd_vdso_install = INSTALL $@ | ||
49 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
50 | |||
51 | vdso64.so: $(obj)/vdso64.so.dbg | ||
52 | @mkdir -p $(MODLIB)/vdso | ||
53 | $(call cmd,vdso_install) | ||
54 | |||
55 | vdso_install: vdso64.so | ||
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S new file mode 100644 index 000000000000..488e31a3c0e7 --- /dev/null +++ b/arch/s390/kernel/vdso64/clock_getres.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_getres() for 64 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_getres | ||
19 | .type __kernel_clock_getres,@function | ||
20 | __kernel_clock_getres: | ||
21 | .cfi_startproc | ||
22 | cghi %r2,CLOCK_REALTIME | ||
23 | je 0f | ||
24 | cghi %r2,CLOCK_MONOTONIC | ||
25 | jne 2f | ||
26 | 0: ltgr %r3,%r3 | ||
27 | jz 1f /* res == NULL */ | ||
28 | larl %r1,3f | ||
29 | lg %r0,0(%r1) | ||
30 | xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ | ||
31 | stg %r0,8(%r3) /* store tp->tv_usec */ | ||
32 | 1: lghi %r2,0 | ||
33 | br %r14 | ||
34 | 2: lghi %r1,__NR_clock_getres /* fallback to svc */ | ||
35 | svc 0 | ||
36 | br %r14 | ||
37 | 3: .quad CLOCK_REALTIME_RES | ||
38 | .cfi_endproc | ||
39 | .size __kernel_clock_getres,.-__kernel_clock_getres | ||
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S new file mode 100644 index 000000000000..738a410b7eb2 --- /dev/null +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_gettime() for 64 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_gettime | ||
19 | .type __kernel_clock_gettime,@function | ||
20 | __kernel_clock_gettime: | ||
21 | .cfi_startproc | ||
22 | larl %r5,_vdso_data | ||
23 | cghi %r2,CLOCK_REALTIME | ||
24 | je 4f | ||
25 | cghi %r2,CLOCK_MONOTONIC | ||
26 | jne 9f | ||
27 | |||
28 | /* CLOCK_MONOTONIC */ | ||
29 | ltgr %r3,%r3 | ||
30 | jz 3f /* tp == NULL */ | ||
31 | 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | ||
32 | tmll %r4,0x0001 /* pending update ? loop */ | ||
33 | jnz 0b | ||
34 | stck 48(%r15) /* Store TOD clock */ | ||
35 | lg %r1,48(%r15) | ||
36 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
37 | mghi %r1,1000 | ||
38 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | ||
39 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
40 | lg %r0,__VDSO_XTIME_SEC(%r5) | ||
41 | alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ | ||
42 | alg %r0,__VDSO_WTOM_SEC(%r5) | ||
43 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | ||
44 | jne 0b | ||
45 | larl %r5,10f | ||
46 | 1: clg %r1,0(%r5) | ||
47 | jl 2f | ||
48 | slg %r1,0(%r5) | ||
49 | aghi %r0,1 | ||
50 | j 1b | ||
51 | 2: stg %r0,0(%r3) /* store tp->tv_sec */ | ||
52 | stg %r1,8(%r3) /* store tp->tv_nsec */ | ||
53 | 3: lghi %r2,0 | ||
54 | br %r14 | ||
55 | |||
56 | /* CLOCK_REALTIME */ | ||
57 | 4: ltr %r3,%r3 /* tp == NULL */ | ||
58 | jz 8f | ||
59 | 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | ||
60 | tmll %r4,0x0001 /* pending update ? loop */ | ||
61 | jnz 5b | ||
62 | stck 48(%r15) /* Store TOD clock */ | ||
63 | lg %r1,48(%r15) | ||
64 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
65 | mghi %r1,1000 | ||
66 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | ||
67 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
68 | lg %r0,__VDSO_XTIME_SEC(%r5) | ||
69 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | ||
70 | jne 5b | ||
71 | larl %r5,10f | ||
72 | 6: clg %r1,0(%r5) | ||
73 | jl 7f | ||
74 | slg %r1,0(%r5) | ||
75 | aghi %r0,1 | ||
76 | j 6b | ||
77 | 7: stg %r0,0(%r3) /* store tp->tv_sec */ | ||
78 | stg %r1,8(%r3) /* store tp->tv_nsec */ | ||
79 | 8: lghi %r2,0 | ||
80 | br %r14 | ||
81 | |||
82 | /* Fallback to system call */ | ||
83 | 9: lghi %r1,__NR_clock_gettime | ||
84 | svc 0 | ||
85 | br %r14 | ||
86 | |||
87 | 10: .quad 1000000000 | ||
88 | .cfi_endproc | ||
89 | .size __kernel_clock_gettime,.-__kernel_clock_gettime | ||
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S new file mode 100644 index 000000000000..f873e75634e1 --- /dev/null +++ b/arch/s390/kernel/vdso64/gettimeofday.S | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Userland implementation of gettimeofday() for 64 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_gettimeofday | ||
19 | .type __kernel_gettimeofday,@function | ||
20 | __kernel_gettimeofday: | ||
21 | .cfi_startproc | ||
22 | larl %r5,_vdso_data | ||
23 | 0: ltgr %r3,%r3 /* check if tz is NULL */ | ||
24 | je 1f | ||
25 | mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) | ||
26 | 1: ltgr %r2,%r2 /* check if tv is NULL */ | ||
27 | je 4f | ||
28 | lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | ||
29 | tmll %r4,0x0001 /* pending update ? loop */ | ||
30 | jnz 0b | ||
31 | stck 48(%r15) /* Store TOD clock */ | ||
32 | lg %r1,48(%r15) | ||
33 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
34 | mghi %r1,1000 | ||
35 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | ||
36 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ | ||
37 | lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ | ||
38 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | ||
39 | jne 0b | ||
40 | larl %r5,5f | ||
41 | 2: clg %r1,0(%r5) | ||
42 | jl 3f | ||
43 | slg %r1,0(%r5) | ||
44 | aghi %r0,1 | ||
45 | j 2b | ||
46 | 3: stg %r0,0(%r2) /* store tv->tv_sec */ | ||
47 | slgr %r0,%r0 /* tv_nsec -> tv_usec */ | ||
48 | ml %r0,8(%r5) | ||
49 | srlg %r0,%r0,6 | ||
50 | stg %r0,8(%r2) /* store tv->tv_usec */ | ||
51 | 4: lghi %r2,0 | ||
52 | br %r14 | ||
53 | 5: .quad 1000000000 | ||
54 | .long 274877907 | ||
55 | .cfi_endproc | ||
56 | .size __kernel_gettimeofday,.-__kernel_gettimeofday | ||
diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/s390/kernel/vdso64/note.S | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/elfnote.h> | ||
9 | |||
10 | ELFNOTE_START(Linux, 0, "a") | ||
11 | .long LINUX_VERSION_CODE | ||
12 | ELFNOTE_END | ||
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..9f5979d102a9 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64.lds.S | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * This is the infamous ld script for the 64 bits vdso | ||
3 | * library | ||
4 | */ | ||
5 | #include <asm/vdso.h> | ||
6 | |||
7 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | ||
8 | OUTPUT_ARCH(s390:64-bit) | ||
9 | ENTRY(_start) | ||
10 | |||
11 | SECTIONS | ||
12 | { | ||
13 | . = VDSO64_LBASE + SIZEOF_HEADERS; | ||
14 | |||
15 | .hash : { *(.hash) } :text | ||
16 | .gnu.hash : { *(.gnu.hash) } | ||
17 | .dynsym : { *(.dynsym) } | ||
18 | .dynstr : { *(.dynstr) } | ||
19 | .gnu.version : { *(.gnu.version) } | ||
20 | .gnu.version_d : { *(.gnu.version_d) } | ||
21 | .gnu.version_r : { *(.gnu.version_r) } | ||
22 | |||
23 | .note : { *(.note.*) } :text :note | ||
24 | |||
25 | . = ALIGN(16); | ||
26 | .text : { | ||
27 | *(.text .stub .text.* .gnu.linkonce.t.*) | ||
28 | } :text | ||
29 | PROVIDE(__etext = .); | ||
30 | PROVIDE(_etext = .); | ||
31 | PROVIDE(etext = .); | ||
32 | |||
33 | /* | ||
34 | * Other stuff is appended to the text segment: | ||
35 | */ | ||
36 | .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||
37 | .rodata1 : { *(.rodata1) } | ||
38 | |||
39 | .dynamic : { *(.dynamic) } :text :dynamic | ||
40 | |||
41 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
42 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
43 | .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } | ||
44 | |||
45 | .rela.dyn ALIGN(8) : { *(.rela.dyn) } | ||
46 | .got ALIGN(8) : { *(.got .toc) } | ||
47 | |||
48 | _end = .; | ||
49 | PROVIDE(end = .); | ||
50 | |||
51 | /* | ||
52 | * Stabs debugging sections are here too. | ||
53 | */ | ||
54 | .stab 0 : { *(.stab) } | ||
55 | .stabstr 0 : { *(.stabstr) } | ||
56 | .stab.excl 0 : { *(.stab.excl) } | ||
57 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
58 | .stab.index 0 : { *(.stab.index) } | ||
59 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
60 | .comment 0 : { *(.comment) } | ||
61 | |||
62 | /* | ||
63 | * DWARF debug sections. | ||
64 | * Symbols in the DWARF debugging sections are relative to the | ||
65 | * beginning of the section so we begin them at 0. | ||
66 | */ | ||
67 | /* DWARF 1 */ | ||
68 | .debug 0 : { *(.debug) } | ||
69 | .line 0 : { *(.line) } | ||
70 | /* GNU DWARF 1 extensions */ | ||
71 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
72 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
73 | /* DWARF 1.1 and DWARF 2 */ | ||
74 | .debug_aranges 0 : { *(.debug_aranges) } | ||
75 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
76 | /* DWARF 2 */ | ||
77 | .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||
78 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
79 | .debug_line 0 : { *(.debug_line) } | ||
80 | .debug_frame 0 : { *(.debug_frame) } | ||
81 | .debug_str 0 : { *(.debug_str) } | ||
82 | .debug_loc 0 : { *(.debug_loc) } | ||
83 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
84 | /* SGI/MIPS DWARF 2 extensions */ | ||
85 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
86 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
87 | .debug_typenames 0 : { *(.debug_typenames) } | ||
88 | .debug_varnames 0 : { *(.debug_varnames) } | ||
89 | /* DWARF 3 */ | ||
90 | .debug_pubtypes 0 : { *(.debug_pubtypes) } | ||
91 | .debug_ranges 0 : { *(.debug_ranges) } | ||
92 | .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } | ||
93 | |||
94 | . = ALIGN(4096); | ||
95 | PROVIDE(_vdso_data = .); | ||
96 | |||
97 | /DISCARD/ : { | ||
98 | *(.note.GNU-stack) | ||
99 | *(.branch_lt) | ||
100 | *(.data .data.* .gnu.linkonce.d.* .sdata*) | ||
101 | *(.bss .sbss .dynbss .dynsbss) | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Very old versions of ld do not recognize this name token; use the constant. | ||
107 | */ | ||
108 | #define PT_GNU_EH_FRAME 0x6474e550 | ||
109 | |||
110 | /* | ||
111 | * We must supply the ELF program headers explicitly to get just one | ||
112 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
113 | */ | ||
114 | PHDRS | ||
115 | { | ||
116 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | ||
117 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
118 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
119 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * This controls what symbols we export from the DSO. | ||
124 | */ | ||
125 | VERSION | ||
126 | { | ||
127 | VDSO_VERSION_STRING { | ||
128 | global: | ||
129 | /* | ||
130 | * Has to be there for the kernel to find | ||
131 | */ | ||
132 | __kernel_gettimeofday; | ||
133 | __kernel_clock_gettime; | ||
134 | __kernel_clock_getres; | ||
135 | |||
136 | local: *; | ||
137 | }; | ||
138 | } | ||
diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S new file mode 100644 index 000000000000..d8e2ac14d564 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64_wrapper.S | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/page.h> | ||
3 | |||
4 | .section ".data.page_aligned" | ||
5 | |||
6 | .globl vdso64_start, vdso64_end | ||
7 | .balign PAGE_SIZE | ||
8 | vdso64_start: | ||
9 | .incbin "arch/s390/kernel/vdso64/vdso64.so" | ||
10 | .balign PAGE_SIZE | ||
11 | vdso64_end: | ||
12 | |||
13 | .previous | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 0fa5dc5d68e1..75a6e62ea973 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -27,7 +27,6 @@ | |||
27 | static ext_int_info_t ext_int_info_timer; | 27 | static ext_int_info_t ext_int_info_timer; |
28 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | 28 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); |
29 | 29 | ||
30 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
31 | /* | 30 | /* |
32 | * Update process times based on virtual cpu times stored by entry.S | 31 | * Update process times based on virtual cpu times stored by entry.S |
33 | * to the lowcore fields user_timer, system_timer & steal_clock. | 32 | * to the lowcore fields user_timer, system_timer & steal_clock. |
@@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires) | |||
125 | /* store expire time for this CPU timer */ | 124 | /* store expire time for this CPU timer */ |
126 | __get_cpu_var(virt_cpu_timer).to_expire = expires; | 125 | __get_cpu_var(virt_cpu_timer).to_expire = expires; |
127 | } | 126 | } |
128 | #else | ||
129 | static inline void set_vtimer(__u64 expires) | ||
130 | { | ||
131 | S390_lowcore.last_update_timer = expires; | ||
132 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
133 | |||
134 | /* store expire time for this CPU timer */ | ||
135 | __get_cpu_var(virt_cpu_timer).to_expire = expires; | ||
136 | } | ||
137 | #endif | ||
138 | 127 | ||
139 | void vtime_start_cpu_timer(void) | 128 | void vtime_start_cpu_timer(void) |
140 | { | 129 | { |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 580fc64cc735..5c8457129603 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * (C) IBM Corporation 2002-2004 | 7 | * (C) IBM Corporation 2002-2004 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "extmem" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/string.h> | 14 | #include <linux/string.h> |
12 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
@@ -24,19 +27,6 @@ | |||
24 | #include <asm/cpcmd.h> | 27 | #include <asm/cpcmd.h> |
25 | #include <asm/setup.h> | 28 | #include <asm/setup.h> |
26 | 29 | ||
27 | #define DCSS_DEBUG /* Debug messages on/off */ | ||
28 | |||
29 | #define DCSS_NAME "extmem" | ||
30 | #ifdef DCSS_DEBUG | ||
31 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x) | ||
32 | #else | ||
33 | #define PRINT_DEBUG(x...) do {} while (0) | ||
34 | #endif | ||
35 | #define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x) | ||
36 | #define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x) | ||
37 | #define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x) | ||
38 | |||
39 | |||
40 | #define DCSS_LOADSHR 0x00 | 30 | #define DCSS_LOADSHR 0x00 |
41 | #define DCSS_LOADNSR 0x04 | 31 | #define DCSS_LOADNSR 0x04 |
42 | #define DCSS_PURGESEG 0x08 | 32 | #define DCSS_PURGESEG 0x08 |
@@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg) | |||
286 | goto out_free; | 276 | goto out_free; |
287 | } | 277 | } |
288 | if (diag_cc > 1) { | 278 | if (diag_cc > 1) { |
289 | PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc); | 279 | pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc); |
290 | rc = dcss_diag_translate_rc (vmrc); | 280 | rc = dcss_diag_translate_rc (vmrc); |
291 | goto out_free; | 281 | goto out_free; |
292 | } | 282 | } |
@@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg) | |||
368 | * -EIO : could not perform query diagnose | 358 | * -EIO : could not perform query diagnose |
369 | * -ENOENT : no such segment | 359 | * -ENOENT : no such segment |
370 | * -ENOTSUPP: multi-part segment cannot be used with linux | 360 | * -ENOTSUPP: multi-part segment cannot be used with linux |
371 | * -ENOSPC : segment cannot be used (overlaps with storage) | ||
372 | * -ENOMEM : out of memory | 361 | * -ENOMEM : out of memory |
373 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h | 362 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h |
374 | */ | 363 | */ |
@@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
480 | goto out_resource; | 469 | goto out_resource; |
481 | } | 470 | } |
482 | if (diag_cc > 1) { | 471 | if (diag_cc > 1) { |
483 | PRINT_WARN ("segment_load: could not load segment %s - " | 472 | pr_warning("Loading DCSS %s failed with rc=%ld\n", name, |
484 | "diag returned error (%ld)\n", | 473 | end_addr); |
485 | name, end_addr); | ||
486 | rc = dcss_diag_translate_rc(end_addr); | 474 | rc = dcss_diag_translate_rc(end_addr); |
487 | dcss_diag(&purgeseg_scode, seg->dcss_name, | 475 | dcss_diag(&purgeseg_scode, seg->dcss_name, |
488 | &dummy, &dummy); | 476 | &dummy, &dummy); |
@@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
496 | *addr = seg->start_addr; | 484 | *addr = seg->start_addr; |
497 | *end = seg->end; | 485 | *end = seg->end; |
498 | if (do_nonshared) | 486 | if (do_nonshared) |
499 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | 487 | pr_info("DCSS %s of range %p to %p and type %s loaded as " |
500 | "type %s in non-shared mode\n", name, | 488 | "exclusive-writable\n", name, (void*) seg->start_addr, |
501 | (void*)seg->start_addr, (void*)seg->end, | 489 | (void*) seg->end, segtype_string[seg->vm_segtype]); |
502 | segtype_string[seg->vm_segtype]); | ||
503 | else { | 490 | else { |
504 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | 491 | pr_info("DCSS %s of range %p to %p and type %s loaded in " |
505 | "type %s in shared mode\n", name, | 492 | "shared access mode\n", name, (void*) seg->start_addr, |
506 | (void*)seg->start_addr, (void*)seg->end, | 493 | (void*) seg->end, segtype_string[seg->vm_segtype]); |
507 | segtype_string[seg->vm_segtype]); | ||
508 | } | 494 | } |
509 | goto out; | 495 | goto out; |
510 | out_resource: | 496 | out_resource: |
@@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared) | |||
593 | goto out_unlock; | 579 | goto out_unlock; |
594 | } | 580 | } |
595 | if (do_nonshared == seg->do_nonshared) { | 581 | if (do_nonshared == seg->do_nonshared) { |
596 | PRINT_INFO ("segment_modify_shared: not reloading segment %s" | 582 | pr_info("DCSS %s is already in the requested access " |
597 | " - already in requested mode\n",name); | 583 | "mode\n", name); |
598 | rc = 0; | 584 | rc = 0; |
599 | goto out_unlock; | 585 | goto out_unlock; |
600 | } | 586 | } |
601 | if (atomic_read (&seg->ref_count) != 1) { | 587 | if (atomic_read (&seg->ref_count) != 1) { |
602 | PRINT_WARN ("segment_modify_shared: not reloading segment %s - " | 588 | pr_warning("DCSS %s is in use and cannot be reloaded\n", |
603 | "segment is in use by other driver(s)\n",name); | 589 | name); |
604 | rc = -EAGAIN; | 590 | rc = -EAGAIN; |
605 | goto out_unlock; | 591 | goto out_unlock; |
606 | } | 592 | } |
@@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared) | |||
613 | seg->res->flags |= IORESOURCE_READONLY; | 599 | seg->res->flags |= IORESOURCE_READONLY; |
614 | 600 | ||
615 | if (request_resource(&iomem_resource, seg->res)) { | 601 | if (request_resource(&iomem_resource, seg->res)) { |
616 | PRINT_WARN("segment_modify_shared: could not reload segment %s" | 602 | pr_warning("DCSS %s overlaps with used memory resources " |
617 | " - overlapping resources\n", name); | 603 | "and cannot be reloaded\n", name); |
618 | rc = -EBUSY; | 604 | rc = -EBUSY; |
619 | kfree(seg->res); | 605 | kfree(seg->res); |
620 | goto out_del_mem; | 606 | goto out_del_mem; |
@@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared) | |||
632 | goto out_del_res; | 618 | goto out_del_res; |
633 | } | 619 | } |
634 | if (diag_cc > 1) { | 620 | if (diag_cc > 1) { |
635 | PRINT_WARN ("segment_modify_shared: could not reload segment %s" | 621 | pr_warning("Reloading DCSS %s failed with rc=%ld\n", name, |
636 | " - diag returned error (%ld)\n", | 622 | end_addr); |
637 | name, end_addr); | ||
638 | rc = dcss_diag_translate_rc(end_addr); | 623 | rc = dcss_diag_translate_rc(end_addr); |
639 | goto out_del_res; | 624 | goto out_del_res; |
640 | } | 625 | } |
@@ -673,8 +658,7 @@ segment_unload(char *name) | |||
673 | mutex_lock(&dcss_lock); | 658 | mutex_lock(&dcss_lock); |
674 | seg = segment_by_name (name); | 659 | seg = segment_by_name (name); |
675 | if (seg == NULL) { | 660 | if (seg == NULL) { |
676 | PRINT_ERR ("could not find segment %s in segment_unload, " | 661 | pr_err("Unloading unknown DCSS %s failed\n", name); |
677 | "please report to linux390@de.ibm.com\n",name); | ||
678 | goto out_unlock; | 662 | goto out_unlock; |
679 | } | 663 | } |
680 | if (atomic_dec_return(&seg->ref_count) != 0) | 664 | if (atomic_dec_return(&seg->ref_count) != 0) |
@@ -709,8 +693,7 @@ segment_save(char *name) | |||
709 | seg = segment_by_name (name); | 693 | seg = segment_by_name (name); |
710 | 694 | ||
711 | if (seg == NULL) { | 695 | if (seg == NULL) { |
712 | PRINT_ERR("could not find segment %s in segment_save, please " | 696 | pr_err("Saving unknown DCSS %s failed\n", name); |
713 | "report to linux390@de.ibm.com\n", name); | ||
714 | goto out; | 697 | goto out; |
715 | } | 698 | } |
716 | 699 | ||
@@ -727,14 +710,14 @@ segment_save(char *name) | |||
727 | response = 0; | 710 | response = 0; |
728 | cpcmd(cmd1, NULL, 0, &response); | 711 | cpcmd(cmd1, NULL, 0, &response); |
729 | if (response) { | 712 | if (response) { |
730 | PRINT_ERR("segment_save: DEFSEG failed with response code %i\n", | 713 | pr_err("Saving a DCSS failed with DEFSEG response code " |
731 | response); | 714 | "%i\n", response); |
732 | goto out; | 715 | goto out; |
733 | } | 716 | } |
734 | cpcmd(cmd2, NULL, 0, &response); | 717 | cpcmd(cmd2, NULL, 0, &response); |
735 | if (response) { | 718 | if (response) { |
736 | PRINT_ERR("segment_save: SAVESEG failed with response code %i\n", | 719 | pr_err("Saving a DCSS failed with SAVESEG response code " |
737 | response); | 720 | "%i\n", response); |
738 | goto out; | 721 | goto out; |
739 | } | 722 | } |
740 | out: | 723 | out: |
@@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name) | |||
749 | { | 732 | { |
750 | switch (rc) { | 733 | switch (rc) { |
751 | case -ENOENT: | 734 | case -ENOENT: |
752 | PRINT_WARN("cannot load/query segment %s, " | 735 | pr_err("DCSS %s cannot be loaded or queried\n", seg_name); |
753 | "does not exist\n", seg_name); | ||
754 | break; | 736 | break; |
755 | case -ENOSYS: | 737 | case -ENOSYS: |
756 | PRINT_WARN("cannot load/query segment %s, " | 738 | pr_err("DCSS %s cannot be loaded or queried without " |
757 | "not running on VM\n", seg_name); | 739 | "z/VM\n", seg_name); |
758 | break; | 740 | break; |
759 | case -EIO: | 741 | case -EIO: |
760 | PRINT_WARN("cannot load/query segment %s, " | 742 | pr_err("Loading or querying DCSS %s resulted in a " |
761 | "hardware error\n", seg_name); | 743 | "hardware error\n", seg_name); |
762 | break; | 744 | break; |
763 | case -ENOTSUPP: | 745 | case -ENOTSUPP: |
764 | PRINT_WARN("cannot load/query segment %s, " | 746 | pr_err("DCSS %s has multiple page ranges and cannot be " |
765 | "is a multi-part segment\n", seg_name); | 747 | "loaded or queried\n", seg_name); |
766 | break; | 748 | break; |
767 | case -ENOSPC: | 749 | case -ENOSPC: |
768 | PRINT_WARN("cannot load/query segment %s, " | 750 | pr_err("DCSS %s overlaps with used storage and cannot " |
769 | "overlaps with storage\n", seg_name); | 751 | "be loaded\n", seg_name); |
770 | break; | 752 | break; |
771 | case -EBUSY: | 753 | case -EBUSY: |
772 | PRINT_WARN("cannot load/query segment %s, " | 754 | pr_err("%s needs used memory resources and cannot be " |
773 | "overlaps with already loaded dcss\n", seg_name); | 755 | "loaded or queried\n", seg_name); |
774 | break; | 756 | break; |
775 | case -EPERM: | 757 | case -EPERM: |
776 | PRINT_WARN("cannot load/query segment %s, " | 758 | pr_err("DCSS %s is already loaded in a different access " |
777 | "already loaded in incompatible mode\n", seg_name); | 759 | "mode\n", seg_name); |
778 | break; | 760 | break; |
779 | case -ENOMEM: | 761 | case -ENOMEM: |
780 | PRINT_WARN("cannot load/query segment %s, " | 762 | pr_err("There is not enough memory to load or query " |
781 | "out of memory\n", seg_name); | 763 | "DCSS %s\n", seg_name); |
782 | break; | 764 | break; |
783 | case -ERANGE: | 765 | case -ERANGE: |
784 | PRINT_WARN("cannot load/query segment %s, " | 766 | pr_err("DCSS %s exceeds the kernel mapping range (%lu) " |
785 | "exceeds kernel mapping range\n", seg_name); | 767 | "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS); |
786 | break; | 768 | break; |
787 | default: | 769 | default: |
788 | PRINT_WARN("cannot load/query segment %s, " | ||
789 | "return value %i\n", seg_name, rc); | ||
790 | break; | 770 | break; |
791 | } | 771 | } |
792 | } | 772 | } |
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h index b809f22ea638..ccb1d93bb043 100644 --- a/arch/sh/include/asm/elf.h +++ b/arch/sh/include/asm/elf.h | |||
@@ -202,7 +202,7 @@ do { \ | |||
202 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | 202 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES |
203 | struct linux_binprm; | 203 | struct linux_binprm; |
204 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 204 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
205 | int executable_stack); | 205 | int uses_interp); |
206 | 206 | ||
207 | extern unsigned int vdso_enabled; | 207 | extern unsigned int vdso_enabled; |
208 | extern void __kernel_vsyscall; | 208 | extern void __kernel_vsyscall; |
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c index 95f4de0800ec..3f7e415be86a 100644 --- a/arch/sh/kernel/vsyscall/vsyscall.c +++ b/arch/sh/kernel/vsyscall/vsyscall.c | |||
@@ -59,8 +59,7 @@ int __init vsyscall_init(void) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | /* Setup a VMA at program startup for the vsyscall page */ | 61 | /* Setup a VMA at program startup for the vsyscall page */ |
62 | int arch_setup_additional_pages(struct linux_binprm *bprm, | 62 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
63 | int executable_stack) | ||
64 | { | 63 | { |
65 | struct mm_struct *mm = current->mm; | 64 | struct mm_struct *mm = current->mm; |
66 | unsigned long addr; | 65 | unsigned long addr; |
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index 40ca1bea7916..f51a3ddde01a 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h | |||
@@ -325,7 +325,7 @@ struct linux_binprm; | |||
325 | 325 | ||
326 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | 326 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 |
327 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | 327 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, |
328 | int executable_stack); | 328 | int uses_interp); |
329 | 329 | ||
330 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); | 330 | extern int syscall32_setup_pages(struct linux_binprm *, int exstack); |
331 | #define compat_arch_setup_additional_pages syscall32_setup_pages | 331 | #define compat_arch_setup_additional_pages syscall32_setup_pages |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 513f330c5832..1241f118ab56 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -310,7 +310,7 @@ int __init sysenter_setup(void) | |||
310 | } | 310 | } |
311 | 311 | ||
312 | /* Setup a VMA at program startup for the vsyscall page */ | 312 | /* Setup a VMA at program startup for the vsyscall page */ |
313 | int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | 313 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
314 | { | 314 | { |
315 | struct mm_struct *mm = current->mm; | 315 | struct mm_struct *mm = current->mm; |
316 | unsigned long addr; | 316 | unsigned long addr; |
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 257ba4a10abf..9c98cc6ba978 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -98,7 +98,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) | |||
98 | 98 | ||
99 | /* Setup a VMA at program startup for the vsyscall page. | 99 | /* Setup a VMA at program startup for the vsyscall page. |
100 | Not called for compat tasks */ | 100 | Not called for compat tasks */ |
101 | int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack) | 101 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
102 | { | 102 | { |
103 | struct mm_struct *mm = current->mm; | 103 | struct mm_struct *mm = current->mm; |
104 | unsigned long addr; | 104 | unsigned long addr; |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 43d6ba83a191..8783457b93d3 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -622,6 +622,16 @@ config HVC_BEAT | |||
622 | help | 622 | help |
623 | Toshiba's Cell Reference Set Beat Console device driver | 623 | Toshiba's Cell Reference Set Beat Console device driver |
624 | 624 | ||
625 | config HVC_IUCV | ||
626 | bool "z/VM IUCV Hypervisor console support (VM only)" | ||
627 | depends on S390 | ||
628 | select HVC_DRIVER | ||
629 | select IUCV | ||
630 | default y | ||
631 | help | ||
632 | This driver provides a Hypervisor console (HVC) back-end to access | ||
633 | a Linux (console) terminal via a z/VM IUCV communication path. | ||
634 | |||
625 | config HVC_XEN | 635 | config HVC_XEN |
626 | bool "Xen Hypervisor Console support" | 636 | bool "Xen Hypervisor Console support" |
627 | depends on XEN | 637 | depends on XEN |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 438f71317c5c..36151bae0d72 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o | |||
50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o | 50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o |
51 | obj-$(CONFIG_HVC_IRQ) += hvc_irq.o | 51 | obj-$(CONFIG_HVC_IRQ) += hvc_irq.o |
52 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o | 52 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o |
53 | obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o | ||
53 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o | 54 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o |
54 | obj-$(CONFIG_RAW_DRIVER) += raw.o | 55 | obj-$(CONFIG_RAW_DRIVER) += raw.o |
55 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 56 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c new file mode 100644 index 000000000000..5ea7d7713fca --- /dev/null +++ b/drivers/char/hvc_iucv.c | |||
@@ -0,0 +1,850 @@ | |||
1 | /* | ||
2 | * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC) | ||
3 | * | ||
4 | * This back-end for HVC provides terminal access via | ||
5 | * z/VM IUCV communication paths. | ||
6 | * | ||
7 | * Copyright IBM Corp. 2008. | ||
8 | * | ||
9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | ||
10 | */ | ||
11 | #define KMSG_COMPONENT "hvc_iucv" | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <asm/ebcdic.h> | ||
15 | #include <linux/mempool.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/tty.h> | ||
18 | #include <net/iucv/iucv.h> | ||
19 | |||
20 | #include "hvc_console.h" | ||
21 | |||
22 | |||
23 | /* HVC backend for z/VM IUCV */ | ||
24 | #define HVC_IUCV_MAGIC 0xc9e4c3e5 | ||
25 | #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS | ||
26 | #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) | ||
27 | |||
28 | /* IUCV TTY message */ | ||
29 | #define MSG_VERSION 0x02 /* Message version */ | ||
30 | #define MSG_TYPE_ERROR 0x01 /* Error message */ | ||
31 | #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */ | ||
32 | #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */ | ||
33 | #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ | ||
34 | #define MSG_TYPE_DATA 0x10 /* Terminal data */ | ||
35 | |||
36 | #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) | ||
37 | struct iucv_tty_msg { | ||
38 | u8 version; /* Message version */ | ||
39 | u8 type; /* Message type */ | ||
40 | #define MSG_MAX_DATALEN (~(u16)0) | ||
41 | u16 datalen; /* Payload length */ | ||
42 | u8 data[]; /* Payload buffer */ | ||
43 | } __attribute__((packed)); | ||
44 | |||
45 | enum iucv_state_t { | ||
46 | IUCV_DISCONN = 0, | ||
47 | IUCV_CONNECTED = 1, | ||
48 | IUCV_SEVERED = 2, | ||
49 | }; | ||
50 | |||
51 | enum tty_state_t { | ||
52 | TTY_CLOSED = 0, | ||
53 | TTY_OPENED = 1, | ||
54 | }; | ||
55 | |||
56 | struct hvc_iucv_private { | ||
57 | struct hvc_struct *hvc; /* HVC console struct reference */ | ||
58 | u8 srv_name[8]; /* IUCV service name (ebcdic) */ | ||
59 | enum iucv_state_t iucv_state; /* IUCV connection status */ | ||
60 | enum tty_state_t tty_state; /* TTY status */ | ||
61 | struct iucv_path *path; /* IUCV path pointer */ | ||
62 | spinlock_t lock; /* hvc_iucv_private lock */ | ||
63 | struct list_head tty_outqueue; /* outgoing IUCV messages */ | ||
64 | struct list_head tty_inqueue; /* incoming IUCV messages */ | ||
65 | }; | ||
66 | |||
67 | struct iucv_tty_buffer { | ||
68 | struct list_head list; /* list pointer */ | ||
69 | struct iucv_message msg; /* store an incoming IUCV message */ | ||
70 | size_t offset; /* data buffer offset */ | ||
71 | struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ | ||
72 | }; | ||
73 | |||
74 | /* IUCV callback handler */ | ||
75 | static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]); | ||
76 | static void hvc_iucv_path_severed(struct iucv_path *, u8[16]); | ||
77 | static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); | ||
78 | static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); | ||
79 | |||
80 | |||
81 | /* Kernel module parameters */ | ||
82 | static unsigned long hvc_iucv_devices; | ||
83 | |||
84 | /* Array of allocated hvc iucv tty lines... */ | ||
85 | static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; | ||
86 | |||
87 | /* Kmem cache and mempool for iucv_tty_buffer elements */ | ||
88 | static struct kmem_cache *hvc_iucv_buffer_cache; | ||
89 | static mempool_t *hvc_iucv_mempool; | ||
90 | |||
91 | /* IUCV handler callback functions */ | ||
92 | static struct iucv_handler hvc_iucv_handler = { | ||
93 | .path_pending = hvc_iucv_path_pending, | ||
94 | .path_severed = hvc_iucv_path_severed, | ||
95 | .message_complete = hvc_iucv_msg_complete, | ||
96 | .message_pending = hvc_iucv_msg_pending, | ||
97 | }; | ||
98 | |||
99 | |||
100 | /** | ||
101 | * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance. | ||
102 | * @num: The HVC virtual terminal number (vtermno) | ||
103 | * | ||
104 | * This function returns the struct hvc_iucv_private instance that corresponds | ||
105 | * to the HVC virtual terminal number specified as parameter @num. | ||
106 | */ | ||
107 | struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) | ||
108 | { | ||
109 | if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices)) | ||
110 | return NULL; | ||
111 | return hvc_iucv_table[num - HVC_IUCV_MAGIC]; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element. | ||
116 | * @size: Size of the internal buffer used to store data. | ||
117 | * @flags: Memory allocation flags passed to mempool. | ||
118 | * | ||
119 | * This function allocates a new struct iucv_tty_buffer element and, optionally, | ||
120 | * allocates an internal data buffer with the specified size @size. | ||
121 | * Note: The total message size arises from the internal buffer size and the | ||
122 | * members of the iucv_tty_msg structure. | ||
123 | * | ||
124 | * The function returns NULL if memory allocation has failed. | ||
125 | */ | ||
126 | static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) | ||
127 | { | ||
128 | struct iucv_tty_buffer *bufp; | ||
129 | |||
130 | bufp = mempool_alloc(hvc_iucv_mempool, flags); | ||
131 | if (!bufp) | ||
132 | return NULL; | ||
133 | memset(bufp, 0, sizeof(struct iucv_tty_buffer)); | ||
134 | |||
135 | if (size > 0) { | ||
136 | bufp->msg.length = MSG_SIZE(size); | ||
137 | bufp->mbuf = kmalloc(bufp->msg.length, flags); | ||
138 | if (!bufp->mbuf) { | ||
139 | mempool_free(bufp, hvc_iucv_mempool); | ||
140 | return NULL; | ||
141 | } | ||
142 | bufp->mbuf->version = MSG_VERSION; | ||
143 | bufp->mbuf->type = MSG_TYPE_DATA; | ||
144 | bufp->mbuf->datalen = (u16) size; | ||
145 | } | ||
146 | return bufp; | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. | ||
151 | * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. | ||
152 | * | ||
153 | * The destroy_tty_buffer() function frees the internal data buffer and returns | ||
154 | * the struct iucv_tty_buffer element back to the mempool for freeing. | ||
155 | */ | ||
156 | static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) | ||
157 | { | ||
158 | kfree(bufp->mbuf); | ||
159 | mempool_free(bufp, hvc_iucv_mempool); | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. | ||
164 | * @list: List head pointer to a list containing struct iucv_tty_buffer | ||
165 | * elements. | ||
166 | * | ||
167 | * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the | ||
168 | * list @list. | ||
169 | */ | ||
170 | static void destroy_tty_buffer_list(struct list_head *list) | ||
171 | { | ||
172 | struct iucv_tty_buffer *ent, *next; | ||
173 | |||
174 | list_for_each_entry_safe(ent, next, list, list) { | ||
175 | list_del(&ent->list); | ||
176 | destroy_tty_buffer(ent); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer. | ||
182 | * @priv: Pointer to hvc_iucv_private structure. | ||
183 | * @buf: HVC console buffer for writing received terminal data. | ||
184 | * @count: HVC console buffer size. | ||
185 | * @has_more_data: Pointer to an int variable. | ||
186 | * | ||
187 | * The function picks up pending messages from the input queue and receives | ||
188 | * the message data that is then written to the specified buffer @buf. | ||
189 | * If the buffer size @count is less than the data message size, then the | ||
190 | * message is kept on the input queue and @has_more_data is set to 1. | ||
191 | * If the message data has been entirely written, the message is removed from | ||
192 | * the input queue. | ||
193 | * | ||
194 | * The function returns the number of bytes written to the terminal, zero if | ||
195 | * there are no pending data messages available or if there is no established | ||
196 | * IUCV path. | ||
197 | * If the IUCV path has been severed, then -EPIPE is returned to cause a | ||
198 | * hang up (that is issued by the HVC console layer). | ||
199 | */ | ||
200 | static int hvc_iucv_write(struct hvc_iucv_private *priv, | ||
201 | char *buf, int count, int *has_more_data) | ||
202 | { | ||
203 | struct iucv_tty_buffer *rb; | ||
204 | int written; | ||
205 | int rc; | ||
206 | |||
207 | /* Immediately return if there is no IUCV connection */ | ||
208 | if (priv->iucv_state == IUCV_DISCONN) | ||
209 | return 0; | ||
210 | |||
211 | /* If the IUCV path has been severed, return -EPIPE to inform the | ||
212 | * hvc console layer to hang up the tty device. */ | ||
213 | if (priv->iucv_state == IUCV_SEVERED) | ||
214 | return -EPIPE; | ||
215 | |||
216 | /* check if there are pending messages */ | ||
217 | if (list_empty(&priv->tty_inqueue)) | ||
218 | return 0; | ||
219 | |||
220 | /* receive a iucv message and flip data to the tty (ldisc) */ | ||
221 | rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); | ||
222 | |||
223 | written = 0; | ||
224 | if (!rb->mbuf) { /* message not yet received ... */ | ||
225 | /* allocate mem to store msg data; if no memory is available | ||
226 | * then leave the buffer on the list and re-try later */ | ||
227 | rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC); | ||
228 | if (!rb->mbuf) | ||
229 | return -ENOMEM; | ||
230 | |||
231 | rc = __iucv_message_receive(priv->path, &rb->msg, 0, | ||
232 | rb->mbuf, rb->msg.length, NULL); | ||
233 | switch (rc) { | ||
234 | case 0: /* Successful */ | ||
235 | break; | ||
236 | case 2: /* No message found */ | ||
237 | case 9: /* Message purged */ | ||
238 | break; | ||
239 | default: | ||
240 | written = -EIO; | ||
241 | } | ||
242 | /* remove buffer if an error has occured or received data | ||
243 | * is not correct */ | ||
244 | if (rc || (rb->mbuf->version != MSG_VERSION) || | ||
245 | (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) | ||
246 | goto out_remove_buffer; | ||
247 | } | ||
248 | |||
249 | switch (rb->mbuf->type) { | ||
250 | case MSG_TYPE_DATA: | ||
251 | written = min_t(int, rb->mbuf->datalen - rb->offset, count); | ||
252 | memcpy(buf, rb->mbuf->data + rb->offset, written); | ||
253 | if (written < (rb->mbuf->datalen - rb->offset)) { | ||
254 | rb->offset += written; | ||
255 | *has_more_data = 1; | ||
256 | goto out_written; | ||
257 | } | ||
258 | break; | ||
259 | |||
260 | case MSG_TYPE_WINSIZE: | ||
261 | if (rb->mbuf->datalen != sizeof(struct winsize)) | ||
262 | break; | ||
263 | hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data)); | ||
264 | break; | ||
265 | |||
266 | case MSG_TYPE_ERROR: /* ignored ... */ | ||
267 | case MSG_TYPE_TERMENV: /* ignored ... */ | ||
268 | case MSG_TYPE_TERMIOS: /* ignored ... */ | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | out_remove_buffer: | ||
273 | list_del(&rb->list); | ||
274 | destroy_tty_buffer(rb); | ||
275 | *has_more_data = !list_empty(&priv->tty_inqueue); | ||
276 | |||
277 | out_written: | ||
278 | return written; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * hvc_iucv_get_chars() - HVC get_chars operation. | ||
283 | * @vtermno: HVC virtual terminal number. | ||
284 | * @buf: Pointer to a buffer to store data | ||
285 | * @count: Size of buffer available for writing | ||
286 | * | ||
287 | * The hvc_console thread calls this method to read characters from | ||
288 | * the terminal backend. If an IUCV communication path has been established, | ||
289 | * pending IUCV messages are received and data is copied into buffer @buf | ||
290 | * up to @count bytes. | ||
291 | * | ||
292 | * Locking: The routine gets called under an irqsave() spinlock; and | ||
293 | * the routine locks the struct hvc_iucv_private->lock to call | ||
294 | * helper functions. | ||
295 | */ | ||
296 | static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) | ||
297 | { | ||
298 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | ||
299 | int written; | ||
300 | int has_more_data; | ||
301 | |||
302 | if (count <= 0) | ||
303 | return 0; | ||
304 | |||
305 | if (!priv) | ||
306 | return -ENODEV; | ||
307 | |||
308 | spin_lock(&priv->lock); | ||
309 | has_more_data = 0; | ||
310 | written = hvc_iucv_write(priv, buf, count, &has_more_data); | ||
311 | spin_unlock(&priv->lock); | ||
312 | |||
313 | /* if there are still messages on the queue... schedule another run */ | ||
314 | if (has_more_data) | ||
315 | hvc_kick(); | ||
316 | |||
317 | return written; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * hvc_iucv_send() - Send an IUCV message containing terminal data. | ||
322 | * @priv: Pointer to struct hvc_iucv_private instance. | ||
323 | * @buf: Buffer containing data to send. | ||
324 | * @size: Size of buffer and amount of data to send. | ||
325 | * | ||
326 | * If an IUCV communication path is established, the function copies the buffer | ||
327 | * data to a newly allocated struct iucv_tty_buffer element, sends the data and | ||
328 | * puts the element to the outqueue. | ||
329 | * | ||
330 | * If there is no IUCV communication path established, the function returns 0. | ||
331 | * If an existing IUCV communicaton path has been severed, the function returns | ||
332 | * -EPIPE (can be passed to HVC layer to cause a tty hangup). | ||
333 | */ | ||
334 | static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, | ||
335 | int count) | ||
336 | { | ||
337 | struct iucv_tty_buffer *sb; | ||
338 | int rc; | ||
339 | u16 len; | ||
340 | |||
341 | if (priv->iucv_state == IUCV_SEVERED) | ||
342 | return -EPIPE; | ||
343 | |||
344 | if (priv->iucv_state == IUCV_DISCONN) | ||
345 | return 0; | ||
346 | |||
347 | len = min_t(u16, MSG_MAX_DATALEN, count); | ||
348 | |||
349 | /* allocate internal buffer to store msg data and also compute total | ||
350 | * message length */ | ||
351 | sb = alloc_tty_buffer(len, GFP_ATOMIC); | ||
352 | if (!sb) | ||
353 | return -ENOMEM; | ||
354 | |||
355 | sb->mbuf->datalen = len; | ||
356 | memcpy(sb->mbuf->data, buf, len); | ||
357 | |||
358 | list_add_tail(&sb->list, &priv->tty_outqueue); | ||
359 | |||
360 | rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, | ||
361 | (void *) sb->mbuf, sb->msg.length); | ||
362 | if (rc) { | ||
363 | list_del(&sb->list); | ||
364 | destroy_tty_buffer(sb); | ||
365 | len = 0; | ||
366 | } | ||
367 | |||
368 | return len; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * hvc_iucv_put_chars() - HVC put_chars operation. | ||
373 | * @vtermno: HVC virtual terminal number. | ||
374 | * @buf: Pointer to an buffer to read data from | ||
375 | * @count: Size of buffer available for reading | ||
376 | * | ||
377 | * The hvc_console thread calls this method to write characters from | ||
378 | * to the terminal backend. | ||
379 | * The function calls hvc_iucv_send() under the lock of the | ||
380 | * struct hvc_iucv_private instance that corresponds to the tty @vtermno. | ||
381 | * | ||
382 | * Locking: The method gets called under an irqsave() spinlock; and | ||
383 | * locks struct hvc_iucv_private->lock. | ||
384 | */ | ||
385 | static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) | ||
386 | { | ||
387 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | ||
388 | int sent; | ||
389 | |||
390 | if (count <= 0) | ||
391 | return 0; | ||
392 | |||
393 | if (!priv) | ||
394 | return -ENODEV; | ||
395 | |||
396 | spin_lock(&priv->lock); | ||
397 | sent = hvc_iucv_send(priv, buf, count); | ||
398 | spin_unlock(&priv->lock); | ||
399 | |||
400 | return sent; | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time. | ||
405 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
406 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | ||
407 | * hvc_iucv_private instance. | ||
408 | * | ||
409 | * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private | ||
410 | * instance that is derived from @id. Always returns 0. | ||
411 | * | ||
412 | * Locking: struct hvc_iucv_private->lock, spin_lock_bh | ||
413 | */ | ||
414 | static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) | ||
415 | { | ||
416 | struct hvc_iucv_private *priv; | ||
417 | |||
418 | priv = hvc_iucv_get_private(id); | ||
419 | if (!priv) | ||
420 | return 0; | ||
421 | |||
422 | spin_lock_bh(&priv->lock); | ||
423 | priv->tty_state = TTY_OPENED; | ||
424 | spin_unlock_bh(&priv->lock); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed. | ||
431 | * @priv: Pointer to the struct hvc_iucv_private instance. | ||
432 | * | ||
433 | * The functions severs the established IUCV communication path (if any), and | ||
434 | * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally, | ||
435 | * the functions resets the states to TTY_CLOSED and IUCV_DISCONN. | ||
436 | */ | ||
437 | static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) | ||
438 | { | ||
439 | destroy_tty_buffer_list(&priv->tty_outqueue); | ||
440 | destroy_tty_buffer_list(&priv->tty_inqueue); | ||
441 | |||
442 | priv->tty_state = TTY_CLOSED; | ||
443 | priv->iucv_state = IUCV_DISCONN; | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups. | ||
448 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
449 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | ||
450 | * hvc_iucv_private instance. | ||
451 | * | ||
452 | * This routine notifies the HVC backend that a tty hangup (carrier loss, | ||
453 | * virtual or otherwise) has occured. | ||
454 | * | ||
455 | * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep | ||
456 | * an existing IUCV communication path established. | ||
457 | * (Background: vhangup() is called from user space (by getty or login) to | ||
458 | * disable writing to the tty by other applications). | ||
459 | * | ||
460 | * If the tty has been opened (e.g. getty) and an established IUCV path has been | ||
461 | * severed (we caused the tty hangup in that case), then the functions invokes | ||
462 | * hvc_iucv_cleanup() to clean up. | ||
463 | * | ||
464 | * Locking: struct hvc_iucv_private->lock | ||
465 | */ | ||
466 | static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) | ||
467 | { | ||
468 | struct hvc_iucv_private *priv; | ||
469 | |||
470 | priv = hvc_iucv_get_private(id); | ||
471 | if (!priv) | ||
472 | return; | ||
473 | |||
474 | spin_lock_bh(&priv->lock); | ||
475 | /* NOTE: If the hangup was scheduled by ourself (from the iucv | ||
476 | * path_servered callback [IUCV_SEVERED]), then we have to | ||
477 | * finally clean up the tty backend structure and set state to | ||
478 | * TTY_CLOSED. | ||
479 | * | ||
480 | * If the tty was hung up otherwise (e.g. vhangup()), then we | ||
481 | * ignore this hangup and keep an established IUCV path open... | ||
482 | * (...the reason is that we are not able to connect back to the | ||
483 | * client if we disconnect on hang up) */ | ||
484 | priv->tty_state = TTY_CLOSED; | ||
485 | |||
486 | if (priv->iucv_state == IUCV_SEVERED) | ||
487 | hvc_iucv_cleanup(priv); | ||
488 | spin_unlock_bh(&priv->lock); | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time. | ||
493 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
494 | * @id: Additional data (originally passed to hvc_alloc): | ||
495 | * the index of an struct hvc_iucv_private instance. | ||
496 | * | ||
497 | * This routine notifies the HVC backend that the last tty device file | ||
498 | * descriptor has been closed. | ||
499 | * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private | ||
500 | * instance. | ||
501 | * | ||
502 | * Locking: struct hvc_iucv_private->lock | ||
503 | */ | ||
504 | static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) | ||
505 | { | ||
506 | struct hvc_iucv_private *priv; | ||
507 | struct iucv_path *path; | ||
508 | |||
509 | priv = hvc_iucv_get_private(id); | ||
510 | if (!priv) | ||
511 | return; | ||
512 | |||
513 | spin_lock_bh(&priv->lock); | ||
514 | path = priv->path; /* save reference to IUCV path */ | ||
515 | priv->path = NULL; | ||
516 | hvc_iucv_cleanup(priv); | ||
517 | spin_unlock_bh(&priv->lock); | ||
518 | |||
519 | /* sever IUCV path outside of priv->lock due to lock ordering of: | ||
520 | * priv->lock <--> iucv_table_lock */ | ||
521 | if (path) { | ||
522 | iucv_path_sever(path, NULL); | ||
523 | iucv_path_free(path); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /** | ||
528 | * hvc_iucv_path_pending() - IUCV handler to process a connection request. | ||
529 | * @path: Pending path (struct iucv_path) | ||
530 | * @ipvmid: Originator z/VM system identifier | ||
531 | * @ipuser: User specified data for this path | ||
532 | * (AF_IUCV: port/service name and originator port) | ||
533 | * | ||
534 | * The function uses the @ipuser data to check to determine if the pending | ||
535 | * path belongs to a terminal managed by this HVC backend. | ||
536 | * If the check is successful, then an additional check is done to ensure | ||
537 | * that a terminal cannot be accessed multiple times (only one connection | ||
538 | * to a terminal is allowed). In that particular case, the pending path is | ||
539 | * severed. If it is the first connection, the pending path is accepted and | ||
540 | * associated to the struct hvc_iucv_private. The iucv state is updated to | ||
541 | * reflect that a communication path has been established. | ||
542 | * | ||
543 | * Returns 0 if the path belongs to a terminal managed by the this HVC backend; | ||
544 | * otherwise returns -ENODEV in order to dispatch this path to other handlers. | ||
545 | * | ||
546 | * Locking: struct hvc_iucv_private->lock | ||
547 | */ | ||
548 | static int hvc_iucv_path_pending(struct iucv_path *path, | ||
549 | u8 ipvmid[8], u8 ipuser[16]) | ||
550 | { | ||
551 | struct hvc_iucv_private *priv; | ||
552 | u8 nuser_data[16]; | ||
553 | int i, rc; | ||
554 | |||
555 | priv = NULL; | ||
556 | for (i = 0; i < hvc_iucv_devices; i++) | ||
557 | if (hvc_iucv_table[i] && | ||
558 | (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) { | ||
559 | priv = hvc_iucv_table[i]; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | if (!priv) | ||
564 | return -ENODEV; | ||
565 | |||
566 | spin_lock(&priv->lock); | ||
567 | |||
568 | /* If the terminal is already connected or being severed, then sever | ||
569 | * this path to enforce that there is only ONE established communication | ||
570 | * path per terminal. */ | ||
571 | if (priv->iucv_state != IUCV_DISCONN) { | ||
572 | iucv_path_sever(path, ipuser); | ||
573 | iucv_path_free(path); | ||
574 | goto out_path_handled; | ||
575 | } | ||
576 | |||
577 | /* accept path */ | ||
578 | memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */ | ||
579 | memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */ | ||
580 | path->msglim = 0xffff; /* IUCV MSGLIMIT */ | ||
581 | path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */ | ||
582 | rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); | ||
583 | if (rc) { | ||
584 | iucv_path_sever(path, ipuser); | ||
585 | iucv_path_free(path); | ||
586 | goto out_path_handled; | ||
587 | } | ||
588 | priv->path = path; | ||
589 | priv->iucv_state = IUCV_CONNECTED; | ||
590 | |||
591 | out_path_handled: | ||
592 | spin_unlock(&priv->lock); | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * hvc_iucv_path_severed() - IUCV handler to process a path sever. | ||
598 | * @path: Pending path (struct iucv_path) | ||
599 | * @ipuser: User specified data for this path | ||
600 | * (AF_IUCV: port/service name and originator port) | ||
601 | * | ||
602 | * The function also severs the path (as required by the IUCV protocol) and | ||
603 | * sets the iucv state to IUCV_SEVERED for the associated struct | ||
604 | * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty | ||
605 | * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). | ||
606 | * | ||
607 | * If tty portion of the HVC is closed then clean up the outqueue in addition. | ||
608 | * | ||
609 | * Locking: struct hvc_iucv_private->lock | ||
610 | */ | ||
611 | static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | ||
612 | { | ||
613 | struct hvc_iucv_private *priv = path->private; | ||
614 | |||
615 | spin_lock(&priv->lock); | ||
616 | priv->iucv_state = IUCV_SEVERED; | ||
617 | |||
618 | /* NOTE: If the tty has not yet been opened by a getty program | ||
619 | * (e.g. to see console messages), then cleanup the | ||
620 | * hvc_iucv_private structure to allow re-connects. | ||
621 | * | ||
622 | * If the tty has been opened, the get_chars() callback returns | ||
623 | * -EPIPE to signal the hvc console layer to hang up the tty. */ | ||
624 | priv->path = NULL; | ||
625 | if (priv->tty_state == TTY_CLOSED) | ||
626 | hvc_iucv_cleanup(priv); | ||
627 | spin_unlock(&priv->lock); | ||
628 | |||
629 | /* finally sever path (outside of priv->lock due to lock ordering) */ | ||
630 | iucv_path_sever(path, ipuser); | ||
631 | iucv_path_free(path); | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message. | ||
636 | * @path: Pending path (struct iucv_path) | ||
637 | * @msg: Pointer to the IUCV message | ||
638 | * | ||
639 | * The function stores an incoming message on the input queue for later | ||
640 | * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). | ||
641 | * However, if the tty has not yet been opened, the message is rejected. | ||
642 | * | ||
643 | * Locking: struct hvc_iucv_private->lock | ||
644 | */ | ||
645 | static void hvc_iucv_msg_pending(struct iucv_path *path, | ||
646 | struct iucv_message *msg) | ||
647 | { | ||
648 | struct hvc_iucv_private *priv = path->private; | ||
649 | struct iucv_tty_buffer *rb; | ||
650 | |||
651 | spin_lock(&priv->lock); | ||
652 | |||
653 | /* reject messages if tty has not yet been opened */ | ||
654 | if (priv->tty_state == TTY_CLOSED) { | ||
655 | iucv_message_reject(path, msg); | ||
656 | goto unlock_return; | ||
657 | } | ||
658 | |||
659 | /* allocate buffer an empty buffer element */ | ||
660 | rb = alloc_tty_buffer(0, GFP_ATOMIC); | ||
661 | if (!rb) { | ||
662 | iucv_message_reject(path, msg); | ||
663 | goto unlock_return; /* -ENOMEM */ | ||
664 | } | ||
665 | rb->msg = *msg; | ||
666 | |||
667 | list_add_tail(&rb->list, &priv->tty_inqueue); | ||
668 | |||
669 | hvc_kick(); /* wakup hvc console thread */ | ||
670 | |||
671 | unlock_return: | ||
672 | spin_unlock(&priv->lock); | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * hvc_iucv_msg_complete() - IUCV handler to process message completion | ||
677 | * @path: Pending path (struct iucv_path) | ||
678 | * @msg: Pointer to the IUCV message | ||
679 | * | ||
680 | * The function is called upon completion of message delivery and the | ||
681 | * message is removed from the outqueue. Additional delivery information | ||
682 | * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and | ||
683 | * purged messages (0x010000 (IPADPGNR)). | ||
684 | * | ||
685 | * Locking: struct hvc_iucv_private->lock | ||
686 | */ | ||
687 | static void hvc_iucv_msg_complete(struct iucv_path *path, | ||
688 | struct iucv_message *msg) | ||
689 | { | ||
690 | struct hvc_iucv_private *priv = path->private; | ||
691 | struct iucv_tty_buffer *ent, *next; | ||
692 | LIST_HEAD(list_remove); | ||
693 | |||
694 | spin_lock(&priv->lock); | ||
695 | list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) | ||
696 | if (ent->msg.id == msg->id) { | ||
697 | list_move(&ent->list, &list_remove); | ||
698 | break; | ||
699 | } | ||
700 | spin_unlock(&priv->lock); | ||
701 | destroy_tty_buffer_list(&list_remove); | ||
702 | } | ||
703 | |||
704 | |||
705 | /* HVC operations */ | ||
706 | static struct hv_ops hvc_iucv_ops = { | ||
707 | .get_chars = hvc_iucv_get_chars, | ||
708 | .put_chars = hvc_iucv_put_chars, | ||
709 | .notifier_add = hvc_iucv_notifier_add, | ||
710 | .notifier_del = hvc_iucv_notifier_del, | ||
711 | .notifier_hangup = hvc_iucv_notifier_hangup, | ||
712 | }; | ||
713 | |||
714 | /** | ||
715 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance | ||
716 | * @id: hvc_iucv_table index | ||
717 | * | ||
718 | * This function allocates a new hvc_iucv_private struct and put the | ||
719 | * instance into hvc_iucv_table at index @id. | ||
720 | * Returns 0 on success; otherwise non-zero. | ||
721 | */ | ||
722 | static int __init hvc_iucv_alloc(int id) | ||
723 | { | ||
724 | struct hvc_iucv_private *priv; | ||
725 | char name[9]; | ||
726 | int rc; | ||
727 | |||
728 | priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL); | ||
729 | if (!priv) | ||
730 | return -ENOMEM; | ||
731 | |||
732 | spin_lock_init(&priv->lock); | ||
733 | INIT_LIST_HEAD(&priv->tty_outqueue); | ||
734 | INIT_LIST_HEAD(&priv->tty_inqueue); | ||
735 | |||
736 | /* Finally allocate hvc */ | ||
737 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, | ||
738 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE); | ||
739 | if (IS_ERR(priv->hvc)) { | ||
740 | rc = PTR_ERR(priv->hvc); | ||
741 | kfree(priv); | ||
742 | return rc; | ||
743 | } | ||
744 | |||
745 | /* setup iucv related information */ | ||
746 | snprintf(name, 9, "ihvc%-4d", id); | ||
747 | memcpy(priv->srv_name, name, 8); | ||
748 | ASCEBC(priv->srv_name, 8); | ||
749 | |||
750 | hvc_iucv_table[id] = priv; | ||
751 | return 0; | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV | ||
756 | */ | ||
757 | static int __init hvc_iucv_init(void) | ||
758 | { | ||
759 | int rc, i; | ||
760 | |||
761 | if (!MACHINE_IS_VM) { | ||
762 | pr_warning("The z/VM IUCV Hypervisor console cannot be " | ||
763 | "used without z/VM.\n"); | ||
764 | return -ENODEV; | ||
765 | } | ||
766 | |||
767 | if (!hvc_iucv_devices) | ||
768 | return -ENODEV; | ||
769 | |||
770 | if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) | ||
771 | return -EINVAL; | ||
772 | |||
773 | hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, | ||
774 | sizeof(struct iucv_tty_buffer), | ||
775 | 0, 0, NULL); | ||
776 | if (!hvc_iucv_buffer_cache) { | ||
777 | pr_err("Not enough memory for driver initialization " | ||
778 | "(rs=%d).\n", 1); | ||
779 | return -ENOMEM; | ||
780 | } | ||
781 | |||
782 | hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, | ||
783 | hvc_iucv_buffer_cache); | ||
784 | if (!hvc_iucv_mempool) { | ||
785 | pr_err("Not enough memory for driver initialization " | ||
786 | "(rs=%d).\n", 2); | ||
787 | kmem_cache_destroy(hvc_iucv_buffer_cache); | ||
788 | return -ENOMEM; | ||
789 | } | ||
790 | |||
791 | /* allocate hvc_iucv_private structs */ | ||
792 | for (i = 0; i < hvc_iucv_devices; i++) { | ||
793 | rc = hvc_iucv_alloc(i); | ||
794 | if (rc) { | ||
795 | pr_err("Could not create new z/VM IUCV HVC backend " | ||
796 | "rc=%d.\n", rc); | ||
797 | goto out_error_hvc; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | /* register IUCV callback handler */ | ||
802 | rc = iucv_register(&hvc_iucv_handler, 0); | ||
803 | if (rc) { | ||
804 | pr_err("Could not register iucv handler (rc=%d).\n", rc); | ||
805 | goto out_error_iucv; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | |||
810 | out_error_iucv: | ||
811 | iucv_unregister(&hvc_iucv_handler, 0); | ||
812 | out_error_hvc: | ||
813 | for (i = 0; i < hvc_iucv_devices; i++) | ||
814 | if (hvc_iucv_table[i]) { | ||
815 | if (hvc_iucv_table[i]->hvc) | ||
816 | hvc_remove(hvc_iucv_table[i]->hvc); | ||
817 | kfree(hvc_iucv_table[i]); | ||
818 | } | ||
819 | mempool_destroy(hvc_iucv_mempool); | ||
820 | kmem_cache_destroy(hvc_iucv_buffer_cache); | ||
821 | return rc; | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * hvc_iucv_console_init() - Early console initialization | ||
826 | */ | ||
827 | static int __init hvc_iucv_console_init(void) | ||
828 | { | ||
829 | if (!MACHINE_IS_VM || !hvc_iucv_devices) | ||
830 | return -ENODEV; | ||
831 | return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops); | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter | ||
836 | * @val: Parameter value (numeric) | ||
837 | */ | ||
838 | static int __init hvc_iucv_config(char *val) | ||
839 | { | ||
840 | return strict_strtoul(val, 10, &hvc_iucv_devices); | ||
841 | } | ||
842 | |||
843 | |||
844 | module_init(hvc_iucv_init); | ||
845 | console_initcall(hvc_iucv_console_init); | ||
846 | __setup("hvc_iucv=", hvc_iucv_config); | ||
847 | |||
848 | MODULE_LICENSE("GPL"); | ||
849 | MODULE_DESCRIPTION("HVC back-end for z/VM IUCV."); | ||
850 | MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 363bd1303d21..570ae59c1d5e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1898,15 +1898,19 @@ restart_cb: | |||
1898 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); | 1898 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); |
1899 | /* Process finished ERP request. */ | 1899 | /* Process finished ERP request. */ |
1900 | if (cqr->refers) { | 1900 | if (cqr->refers) { |
1901 | spin_lock_bh(&block->queue_lock); | ||
1901 | __dasd_block_process_erp(block, cqr); | 1902 | __dasd_block_process_erp(block, cqr); |
1903 | spin_unlock_bh(&block->queue_lock); | ||
1902 | /* restart list_for_xx loop since dasd_process_erp | 1904 | /* restart list_for_xx loop since dasd_process_erp |
1903 | * might remove multiple elements */ | 1905 | * might remove multiple elements */ |
1904 | goto restart_cb; | 1906 | goto restart_cb; |
1905 | } | 1907 | } |
1906 | /* call the callback function */ | 1908 | /* call the callback function */ |
1909 | spin_lock_irq(&block->request_queue_lock); | ||
1907 | cqr->endclk = get_clock(); | 1910 | cqr->endclk = get_clock(); |
1908 | list_del_init(&cqr->blocklist); | 1911 | list_del_init(&cqr->blocklist); |
1909 | __dasd_cleanup_cqr(cqr); | 1912 | __dasd_cleanup_cqr(cqr); |
1913 | spin_unlock_irq(&block->request_queue_lock); | ||
1910 | } | 1914 | } |
1911 | return rc; | 1915 | return rc; |
1912 | } | 1916 | } |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 921443b01d16..2ef25731d197 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | /* This is ugly... */ | 24 | /* This is ugly... */ |
25 | #define PRINTK_HEADER "dasd_devmap:" | 25 | #define PRINTK_HEADER "dasd_devmap:" |
26 | #define DASD_BUS_ID_SIZE 20 | ||
26 | 27 | ||
27 | #include "dasd_int.h" | 28 | #include "dasd_int.h" |
28 | 29 | ||
@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache); | |||
41 | */ | 42 | */ |
42 | struct dasd_devmap { | 43 | struct dasd_devmap { |
43 | struct list_head list; | 44 | struct list_head list; |
44 | char bus_id[BUS_ID_SIZE]; | 45 | char bus_id[DASD_BUS_ID_SIZE]; |
45 | unsigned int devindex; | 46 | unsigned int devindex; |
46 | unsigned short features; | 47 | unsigned short features; |
47 | struct dasd_device *device; | 48 | struct dasd_device *device; |
@@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id) | |||
94 | int hash, i; | 95 | int hash, i; |
95 | 96 | ||
96 | hash = 0; | 97 | hash = 0; |
97 | for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++) | 98 | for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++) |
98 | hash += *bus_id; | 99 | hash += *bus_id; |
99 | return hash & 0xff; | 100 | return hash & 0xff; |
100 | } | 101 | } |
@@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) { | |||
301 | int from, from_id0, from_id1; | 302 | int from, from_id0, from_id1; |
302 | int to, to_id0, to_id1; | 303 | int to, to_id0, to_id1; |
303 | int features, rc; | 304 | int features, rc; |
304 | char bus_id[BUS_ID_SIZE+1], *str; | 305 | char bus_id[DASD_BUS_ID_SIZE+1], *str; |
305 | 306 | ||
306 | str = parsestring; | 307 | str = parsestring; |
307 | rc = dasd_busid(&str, &from_id0, &from_id1, &from); | 308 | rc = dasd_busid(&str, &from_id0, &from_id1, &from); |
@@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features) | |||
407 | devmap = NULL; | 408 | devmap = NULL; |
408 | hash = dasd_hash_busid(bus_id); | 409 | hash = dasd_hash_busid(bus_id); |
409 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) | 410 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) |
410 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 411 | if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { |
411 | devmap = tmp; | 412 | devmap = tmp; |
412 | break; | 413 | break; |
413 | } | 414 | } |
414 | if (!devmap) { | 415 | if (!devmap) { |
415 | /* This bus_id is new. */ | 416 | /* This bus_id is new. */ |
416 | new->devindex = dasd_max_devindex++; | 417 | new->devindex = dasd_max_devindex++; |
417 | strncpy(new->bus_id, bus_id, BUS_ID_SIZE); | 418 | strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE); |
418 | new->features = features; | 419 | new->features = features; |
419 | new->device = NULL; | 420 | new->device = NULL; |
420 | list_add(&new->list, &dasd_hashlists[hash]); | 421 | list_add(&new->list, &dasd_hashlists[hash]); |
@@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id) | |||
439 | devmap = ERR_PTR(-ENODEV); | 440 | devmap = ERR_PTR(-ENODEV); |
440 | hash = dasd_hash_busid(bus_id); | 441 | hash = dasd_hash_busid(bus_id); |
441 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) { | 442 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) { |
442 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 443 | if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { |
443 | devmap = tmp; | 444 | devmap = tmp; |
444 | break; | 445 | break; |
445 | } | 446 | } |
@@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev) | |||
561 | } | 562 | } |
562 | 563 | ||
563 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 564 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); |
564 | cdev->dev.driver_data = device; | 565 | dev_set_drvdata(&cdev->dev, device); |
565 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 566 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
566 | 567 | ||
567 | return device; | 568 | return device; |
@@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device) | |||
597 | 598 | ||
598 | /* Disconnect dasd_device structure from ccw_device structure. */ | 599 | /* Disconnect dasd_device structure from ccw_device structure. */ |
599 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 600 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
600 | device->cdev->dev.driver_data = NULL; | 601 | dev_set_drvdata(&device->cdev->dev, NULL); |
601 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 602 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
602 | 603 | ||
603 | /* | 604 | /* |
@@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) | |||
638 | struct dasd_device * | 639 | struct dasd_device * |
639 | dasd_device_from_cdev_locked(struct ccw_device *cdev) | 640 | dasd_device_from_cdev_locked(struct ccw_device *cdev) |
640 | { | 641 | { |
641 | struct dasd_device *device = cdev->dev.driver_data; | 642 | struct dasd_device *device = dev_get_drvdata(&cdev->dev); |
642 | 643 | ||
643 | if (!device) | 644 | if (!device) |
644 | return ERR_PTR(-ENODEV); | 645 | return ERR_PTR(-ENODEV); |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2e60d5f968c8..bd2c52e20762 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
1496 | 1496 | ||
1497 | 1497 | ||
1498 | /* service information message SIM */ | 1498 | /* service information message SIM */ |
1499 | if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && | 1499 | if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) && |
1500 | ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { | 1500 | ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { |
1501 | dasd_3990_erp_handle_sim(device, irb->ecw); | 1501 | dasd_3990_erp_handle_sim(device, irb->ecw); |
1502 | dasd_schedule_device_bh(device); | 1502 | dasd_schedule_device_bh(device); |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 9088de84b45d..bf6fd348f20e 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off, | |||
180 | 180 | ||
181 | #ifdef CONFIG_DASD_PROFILE | 181 | #ifdef CONFIG_DASD_PROFILE |
182 | static char * | 182 | static char * |
183 | dasd_statistics_array(char *str, unsigned int *array, int shift) | 183 | dasd_statistics_array(char *str, unsigned int *array, int factor) |
184 | { | 184 | { |
185 | int i; | 185 | int i; |
186 | 186 | ||
187 | for (i = 0; i < 32; i++) { | 187 | for (i = 0; i < 32; i++) { |
188 | str += sprintf(str, "%7d ", array[i] >> shift); | 188 | str += sprintf(str, "%7d ", array[i] / factor); |
189 | if (i == 15) | 189 | if (i == 15) |
190 | str += sprintf(str, "\n"); | 190 | str += sprintf(str, "\n"); |
191 | } | 191 | } |
@@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
202 | #ifdef CONFIG_DASD_PROFILE | 202 | #ifdef CONFIG_DASD_PROFILE |
203 | struct dasd_profile_info_t *prof; | 203 | struct dasd_profile_info_t *prof; |
204 | char *str; | 204 | char *str; |
205 | int shift; | 205 | int factor; |
206 | 206 | ||
207 | /* check for active profiling */ | 207 | /* check for active profiling */ |
208 | if (dasd_profile_level == DASD_PROFILE_OFF) { | 208 | if (dasd_profile_level == DASD_PROFILE_OFF) { |
@@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
214 | 214 | ||
215 | prof = &dasd_global_profile; | 215 | prof = &dasd_global_profile; |
216 | /* prevent couter 'overflow' on output */ | 216 | /* prevent couter 'overflow' on output */ |
217 | for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++); | 217 | for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; |
218 | factor *= 10); | ||
218 | 219 | ||
219 | str = page; | 220 | str = page; |
220 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); | 221 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); |
221 | str += sprintf(str, "with %d sectors(512B each)\n", | 222 | str += sprintf(str, "with %u sectors(512B each)\n", |
222 | prof->dasd_io_sects); | 223 | prof->dasd_io_sects); |
224 | str += sprintf(str, "Scale Factor is %d\n", factor); | ||
223 | str += sprintf(str, | 225 | str += sprintf(str, |
224 | " __<4 ___8 __16 __32 __64 _128 " | 226 | " __<4 ___8 __16 __32 __64 _128 " |
225 | " _256 _512 __1k __2k __4k __8k " | 227 | " _256 _512 __1k __2k __4k __8k " |
@@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
230 | " __1G __2G __4G " " _>4G\n"); | 232 | " __1G __2G __4G " " _>4G\n"); |
231 | 233 | ||
232 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); | 234 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); |
233 | str = dasd_statistics_array(str, prof->dasd_io_secs, shift); | 235 | str = dasd_statistics_array(str, prof->dasd_io_secs, factor); |
234 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); | 236 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); |
235 | str = dasd_statistics_array(str, prof->dasd_io_times, shift); | 237 | str = dasd_statistics_array(str, prof->dasd_io_times, factor); |
236 | str += sprintf(str, "Histogram of I/O times per sector\n"); | 238 | str += sprintf(str, "Histogram of I/O times per sector\n"); |
237 | str = dasd_statistics_array(str, prof->dasd_io_timps, shift); | 239 | str = dasd_statistics_array(str, prof->dasd_io_timps, factor); |
238 | str += sprintf(str, "Histogram of I/O time till ssch\n"); | 240 | str += sprintf(str, "Histogram of I/O time till ssch\n"); |
239 | str = dasd_statistics_array(str, prof->dasd_io_time1, shift); | 241 | str = dasd_statistics_array(str, prof->dasd_io_time1, factor); |
240 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); | 242 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); |
241 | str = dasd_statistics_array(str, prof->dasd_io_time2, shift); | 243 | str = dasd_statistics_array(str, prof->dasd_io_time2, factor); |
242 | str += sprintf(str, "Histogram of I/O time between ssch " | 244 | str += sprintf(str, "Histogram of I/O time between ssch " |
243 | "and irq per sector\n"); | 245 | "and irq per sector\n"); |
244 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift); | 246 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); |
245 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); | 247 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); |
246 | str = dasd_statistics_array(str, prof->dasd_io_time3, shift); | 248 | str = dasd_statistics_array(str, prof->dasd_io_time3, factor); |
247 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); | 249 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); |
248 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift); | 250 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); |
249 | len = str - page; | 251 | len = str - page; |
250 | #else | 252 | #else |
251 | len = sprintf(page, "Statistics are not activated in this kernel\n"); | 253 | len = sprintf(page, "Statistics are not activated in this kernel\n"); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 63f26a135fe5..26ffc6ab441d 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -4,6 +4,9 @@ | |||
4 | * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer | 4 | * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define KMSG_COMPONENT "dcssblk" | ||
8 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
9 | |||
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
8 | #include <linux/moduleparam.h> | 11 | #include <linux/moduleparam.h> |
9 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
@@ -17,19 +20,10 @@ | |||
17 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
18 | #include <asm/s390_rdev.h> | 21 | #include <asm/s390_rdev.h> |
19 | 22 | ||
20 | //#define DCSSBLK_DEBUG /* Debug messages on/off */ | ||
21 | #define DCSSBLK_NAME "dcssblk" | 23 | #define DCSSBLK_NAME "dcssblk" |
22 | #define DCSSBLK_MINORS_PER_DISK 1 | 24 | #define DCSSBLK_MINORS_PER_DISK 1 |
23 | #define DCSSBLK_PARM_LEN 400 | 25 | #define DCSSBLK_PARM_LEN 400 |
24 | 26 | #define DCSS_BUS_ID_SIZE 20 | |
25 | #ifdef DCSSBLK_DEBUG | ||
26 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x) | ||
27 | #else | ||
28 | #define PRINT_DEBUG(x...) do {} while (0) | ||
29 | #endif | ||
30 | #define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x) | ||
31 | #define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x) | ||
32 | #define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x) | ||
33 | 27 | ||
34 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); | 28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); |
35 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); | 29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); |
@@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = { | |||
50 | struct dcssblk_dev_info { | 44 | struct dcssblk_dev_info { |
51 | struct list_head lh; | 45 | struct list_head lh; |
52 | struct device dev; | 46 | struct device dev; |
53 | char segment_name[BUS_ID_SIZE]; | 47 | char segment_name[DCSS_BUS_ID_SIZE]; |
54 | atomic_t use_count; | 48 | atomic_t use_count; |
55 | struct gendisk *gd; | 49 | struct gendisk *gd; |
56 | unsigned long start; | 50 | unsigned long start; |
@@ -65,7 +59,7 @@ struct dcssblk_dev_info { | |||
65 | 59 | ||
66 | struct segment_info { | 60 | struct segment_info { |
67 | struct list_head lh; | 61 | struct list_head lh; |
68 | char segment_name[BUS_ID_SIZE]; | 62 | char segment_name[DCSS_BUS_ID_SIZE]; |
69 | unsigned long start; | 63 | unsigned long start; |
70 | unsigned long end; | 64 | unsigned long end; |
71 | int segment_type; | 65 | int segment_type; |
@@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) | |||
261 | /* check continuity */ | 255 | /* check continuity */ |
262 | for (i = 0; i < dev_info->num_of_segments - 1; i++) { | 256 | for (i = 0; i < dev_info->num_of_segments - 1; i++) { |
263 | if ((sort_list[i].end + 1) != sort_list[i+1].start) { | 257 | if ((sort_list[i].end + 1) != sort_list[i+1].start) { |
264 | PRINT_ERR("Segment %s is not contiguous with " | 258 | pr_err("Adjacent DCSSs %s and %s are not " |
265 | "segment %s\n", | 259 | "contiguous\n", sort_list[i].segment_name, |
266 | sort_list[i].segment_name, | 260 | sort_list[i+1].segment_name); |
267 | sort_list[i+1].segment_name); | ||
268 | rc = -EINVAL; | 261 | rc = -EINVAL; |
269 | goto out; | 262 | goto out; |
270 | } | 263 | } |
@@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) | |||
275 | !(sort_list[i+1].segment_type & | 268 | !(sort_list[i+1].segment_type & |
276 | SEGMENT_EXCLUSIVE) || | 269 | SEGMENT_EXCLUSIVE) || |
277 | (sort_list[i+1].segment_type == SEG_TYPE_ER)) { | 270 | (sort_list[i+1].segment_type == SEG_TYPE_ER)) { |
278 | PRINT_ERR("Segment %s has different type from " | 271 | pr_err("DCSS %s and DCSS %s have " |
279 | "segment %s\n", | 272 | "incompatible types\n", |
280 | sort_list[i].segment_name, | 273 | sort_list[i].segment_name, |
281 | sort_list[i+1].segment_name); | 274 | sort_list[i+1].segment_name); |
282 | rc = -EINVAL; | 275 | rc = -EINVAL; |
283 | goto out; | 276 | goto out; |
284 | } | 277 | } |
@@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
380 | } else if (inbuf[0] == '0') { | 373 | } else if (inbuf[0] == '0') { |
381 | /* reload segments in exclusive mode */ | 374 | /* reload segments in exclusive mode */ |
382 | if (dev_info->segment_type == SEG_TYPE_SC) { | 375 | if (dev_info->segment_type == SEG_TYPE_SC) { |
383 | PRINT_ERR("Segment type SC (%s) cannot be loaded in " | 376 | pr_err("DCSS %s is of type SC and cannot be " |
384 | "non-shared mode\n", dev_info->segment_name); | 377 | "loaded as exclusive-writable\n", |
378 | dev_info->segment_name); | ||
385 | rc = -EINVAL; | 379 | rc = -EINVAL; |
386 | goto out; | 380 | goto out; |
387 | } | 381 | } |
@@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
404 | goto out; | 398 | goto out; |
405 | 399 | ||
406 | removeseg: | 400 | removeseg: |
407 | PRINT_ERR("Could not reload segment(s) of the device %s, removing " | 401 | pr_err("DCSS device %s is removed after a failed access mode " |
408 | "segment(s) now!\n", | 402 | "change\n", dev_info->segment_name); |
409 | dev_info->segment_name); | ||
410 | temp = entry; | 403 | temp = entry; |
411 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 404 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
412 | if (entry != temp) | 405 | if (entry != temp) |
@@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
454 | if (inbuf[0] == '1') { | 447 | if (inbuf[0] == '1') { |
455 | if (atomic_read(&dev_info->use_count) == 0) { | 448 | if (atomic_read(&dev_info->use_count) == 0) { |
456 | // device is idle => we save immediately | 449 | // device is idle => we save immediately |
457 | PRINT_INFO("Saving segment(s) of the device %s\n", | 450 | pr_info("All DCSSs that map to device %s are " |
458 | dev_info->segment_name); | 451 | "saved\n", dev_info->segment_name); |
459 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 452 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
460 | segment_save(entry->segment_name); | 453 | segment_save(entry->segment_name); |
461 | } | 454 | } |
462 | } else { | 455 | } else { |
463 | // device is busy => we save it when it becomes | 456 | // device is busy => we save it when it becomes |
464 | // idle in dcssblk_release | 457 | // idle in dcssblk_release |
465 | PRINT_INFO("Device %s is currently busy, segment(s) " | 458 | pr_info("Device %s is in use, its DCSSs will be " |
466 | "will be saved when it becomes idle...\n", | 459 | "saved when it becomes idle\n", |
467 | dev_info->segment_name); | 460 | dev_info->segment_name); |
468 | dev_info->save_pending = 1; | 461 | dev_info->save_pending = 1; |
469 | } | 462 | } |
470 | } else if (inbuf[0] == '0') { | 463 | } else if (inbuf[0] == '0') { |
@@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
472 | // device is busy & the user wants to undo his save | 465 | // device is busy & the user wants to undo his save |
473 | // request | 466 | // request |
474 | dev_info->save_pending = 0; | 467 | dev_info->save_pending = 0; |
475 | PRINT_INFO("Pending save for segment(s) of the device " | 468 | pr_info("A pending save request for device %s " |
476 | "%s deactivated\n", | 469 | "has been canceled\n", |
477 | dev_info->segment_name); | 470 | dev_info->segment_name); |
478 | } | 471 | } |
479 | } else { | 472 | } else { |
480 | up_write(&dcssblk_devices_sem); | 473 | up_write(&dcssblk_devices_sem); |
@@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
614 | 607 | ||
615 | seg_byte_size = (dev_info->end - dev_info->start + 1); | 608 | seg_byte_size = (dev_info->end - dev_info->start + 1); |
616 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors | 609 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors |
617 | PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, " | 610 | pr_info("Loaded %s with total size %lu bytes and capacity %lu " |
618 | "capacity = %lu (512 Byte) sectors\n", local_buf, | 611 | "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); |
619 | seg_byte_size, seg_byte_size >> 9); | ||
620 | 612 | ||
621 | dev_info->save_pending = 0; | 613 | dev_info->save_pending = 0; |
622 | dev_info->is_shared = 1; | 614 | dev_info->is_shared = 1; |
@@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
744 | dev_info = dcssblk_get_device_by_name(local_buf); | 736 | dev_info = dcssblk_get_device_by_name(local_buf); |
745 | if (dev_info == NULL) { | 737 | if (dev_info == NULL) { |
746 | up_write(&dcssblk_devices_sem); | 738 | up_write(&dcssblk_devices_sem); |
747 | PRINT_WARN("Device %s is not loaded!\n", local_buf); | 739 | pr_warning("Device %s cannot be removed because it is not a " |
740 | "known device\n", local_buf); | ||
748 | rc = -ENODEV; | 741 | rc = -ENODEV; |
749 | goto out_buf; | 742 | goto out_buf; |
750 | } | 743 | } |
751 | if (atomic_read(&dev_info->use_count) != 0) { | 744 | if (atomic_read(&dev_info->use_count) != 0) { |
752 | up_write(&dcssblk_devices_sem); | 745 | up_write(&dcssblk_devices_sem); |
753 | PRINT_WARN("Device %s is in use!\n", local_buf); | 746 | pr_warning("Device %s cannot be removed while it is in " |
747 | "use\n", local_buf); | ||
754 | rc = -EBUSY; | 748 | rc = -EBUSY; |
755 | goto out_buf; | 749 | goto out_buf; |
756 | } | 750 | } |
@@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) | |||
807 | down_write(&dcssblk_devices_sem); | 801 | down_write(&dcssblk_devices_sem); |
808 | if (atomic_dec_and_test(&dev_info->use_count) | 802 | if (atomic_dec_and_test(&dev_info->use_count) |
809 | && (dev_info->save_pending)) { | 803 | && (dev_info->save_pending)) { |
810 | PRINT_INFO("Device %s became idle and is being saved now\n", | 804 | pr_info("Device %s has become idle and is being saved " |
811 | dev_info->segment_name); | 805 | "now\n", dev_info->segment_name); |
812 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 806 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
813 | segment_save(entry->segment_name); | 807 | segment_save(entry->segment_name); |
814 | } | 808 | } |
@@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
851 | case SEG_TYPE_SC: | 845 | case SEG_TYPE_SC: |
852 | /* cannot write to these segments */ | 846 | /* cannot write to these segments */ |
853 | if (bio_data_dir(bio) == WRITE) { | 847 | if (bio_data_dir(bio) == WRITE) { |
854 | PRINT_WARN("rejecting write to ro device %s\n", | 848 | pr_warning("Writing to %s failed because it " |
849 | "is a read-only device\n", | ||
855 | dev_name(&dev_info->dev)); | 850 | dev_name(&dev_info->dev)); |
856 | goto fail; | 851 | goto fail; |
857 | } | 852 | } |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 03916989ed2d..76814f3e898a 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -25,6 +25,9 @@ | |||
25 | * generic hard disk support to replace ad-hoc partitioning | 25 | * generic hard disk support to replace ad-hoc partitioning |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "xpram" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/module.h> | 31 | #include <linux/module.h> |
29 | #include <linux/moduleparam.h> | 32 | #include <linux/moduleparam.h> |
30 | #include <linux/ctype.h> /* isdigit, isxdigit */ | 33 | #include <linux/ctype.h> /* isdigit, isxdigit */ |
@@ -42,12 +45,6 @@ | |||
42 | #define XPRAM_DEVS 1 /* one partition */ | 45 | #define XPRAM_DEVS 1 /* one partition */ |
43 | #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ | 46 | #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ |
44 | 47 | ||
45 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x) | ||
46 | #define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x) | ||
47 | #define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x) | ||
48 | #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) | ||
49 | |||
50 | |||
51 | typedef struct { | 48 | typedef struct { |
52 | unsigned int size; /* size of xpram segment in pages */ | 49 | unsigned int size; /* size of xpram segment in pages */ |
53 | unsigned int offset; /* start page of xpram segment */ | 50 | unsigned int offset; /* start page of xpram segment */ |
@@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
264 | 261 | ||
265 | /* Check number of devices. */ | 262 | /* Check number of devices. */ |
266 | if (devs <= 0 || devs > XPRAM_MAX_DEVS) { | 263 | if (devs <= 0 || devs > XPRAM_MAX_DEVS) { |
267 | PRINT_ERR("invalid number %d of devices\n",devs); | 264 | pr_err("%d is not a valid number of XPRAM devices\n",devs); |
268 | return -EINVAL; | 265 | return -EINVAL; |
269 | } | 266 | } |
270 | xpram_devs = devs; | 267 | xpram_devs = devs; |
@@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
295 | mem_auto_no++; | 292 | mem_auto_no++; |
296 | } | 293 | } |
297 | 294 | ||
298 | PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); | 295 | pr_info(" number of devices (partitions): %d \n", xpram_devs); |
299 | for (i = 0; i < xpram_devs; i++) { | 296 | for (i = 0; i < xpram_devs; i++) { |
300 | if (xpram_sizes[i]) | 297 | if (xpram_sizes[i]) |
301 | PRINT_INFO(" size of partition %d: %u kB\n", | 298 | pr_info(" size of partition %d: %u kB\n", |
302 | i, xpram_sizes[i]); | 299 | i, xpram_sizes[i]); |
303 | else | 300 | else |
304 | PRINT_INFO(" size of partition %d to be set " | 301 | pr_info(" size of partition %d to be set " |
305 | "automatically\n",i); | 302 | "automatically\n",i); |
306 | } | 303 | } |
307 | PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n", | 304 | pr_info(" memory needed (for sized partitions): %lu kB\n", |
308 | mem_needed); | 305 | mem_needed); |
309 | PRINT_DEBUG(" partitions to be sized automatically: %d\n", | 306 | pr_info(" partitions to be sized automatically: %d\n", |
310 | mem_auto_no); | 307 | mem_auto_no); |
311 | 308 | ||
312 | if (mem_needed > pages * 4) { | 309 | if (mem_needed > pages * 4) { |
313 | PRINT_ERR("Not enough expanded memory available\n"); | 310 | pr_err("Not enough expanded memory available\n"); |
314 | return -EINVAL; | 311 | return -EINVAL; |
315 | } | 312 | } |
316 | 313 | ||
@@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
322 | */ | 319 | */ |
323 | if (mem_auto_no) { | 320 | if (mem_auto_no) { |
324 | mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; | 321 | mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; |
325 | PRINT_INFO(" automatically determined " | 322 | pr_info(" automatically determined " |
326 | "partition size: %lu kB\n", mem_auto); | 323 | "partition size: %lu kB\n", mem_auto); |
327 | for (i = 0; i < xpram_devs; i++) | 324 | for (i = 0; i < xpram_devs; i++) |
328 | if (xpram_sizes[i] == 0) | 325 | if (xpram_sizes[i] == 0) |
329 | xpram_sizes[i] = mem_auto; | 326 | xpram_sizes[i] = mem_auto; |
@@ -405,12 +402,12 @@ static int __init xpram_init(void) | |||
405 | 402 | ||
406 | /* Find out size of expanded memory. */ | 403 | /* Find out size of expanded memory. */ |
407 | if (xpram_present() != 0) { | 404 | if (xpram_present() != 0) { |
408 | PRINT_WARN("No expanded memory available\n"); | 405 | pr_err("No expanded memory available\n"); |
409 | return -ENODEV; | 406 | return -ENODEV; |
410 | } | 407 | } |
411 | xpram_pages = xpram_highest_page_index() + 1; | 408 | xpram_pages = xpram_highest_page_index() + 1; |
412 | PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", | 409 | pr_info(" %u pages expanded memory found (%lu KB).\n", |
413 | xpram_pages, (unsigned long) xpram_pages*4); | 410 | xpram_pages, (unsigned long) xpram_pages*4); |
414 | rc = xpram_setup_sizes(xpram_pages); | 411 | rc = xpram_setup_sizes(xpram_pages); |
415 | if (rc) | 412 | if (rc) |
416 | return rc; | 413 | return rc; |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 35fd8dfcaaa6..97e63cf46944 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "monreader" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/module.h> | 13 | #include <linux/module.h> |
11 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -24,19 +27,6 @@ | |||
24 | #include <asm/ebcdic.h> | 27 | #include <asm/ebcdic.h> |
25 | #include <asm/extmem.h> | 28 | #include <asm/extmem.h> |
26 | 29 | ||
27 | //#define MON_DEBUG /* Debug messages on/off */ | ||
28 | |||
29 | #define MON_NAME "monreader" | ||
30 | |||
31 | #define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x) | ||
32 | #define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x) | ||
33 | #define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x) | ||
34 | |||
35 | #ifdef MON_DEBUG | ||
36 | #define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x) | ||
37 | #else | ||
38 | #define P_DEBUG(x...) do {} while (0) | ||
39 | #endif | ||
40 | 30 | ||
41 | #define MON_COLLECT_SAMPLE 0x80 | 31 | #define MON_COLLECT_SAMPLE 0x80 |
42 | #define MON_COLLECT_EVENT 0x40 | 32 | #define MON_COLLECT_EVENT 0x40 |
@@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg, | |||
172 | } else | 162 | } else |
173 | monmsg->replied_msglim = 1; | 163 | monmsg->replied_msglim = 1; |
174 | if (rc) { | 164 | if (rc) { |
175 | P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc); | 165 | pr_err("Reading monitor data failed with rc=%i\n", rc); |
176 | return -EIO; | 166 | return -EIO; |
177 | } | 167 | } |
178 | return 0; | 168 | return 0; |
@@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
251 | { | 241 | { |
252 | struct mon_private *monpriv = path->private; | 242 | struct mon_private *monpriv = path->private; |
253 | 243 | ||
254 | P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); | 244 | pr_err("z/VM *MONITOR system service disconnected with rc=%i\n", |
245 | ipuser[0]); | ||
255 | iucv_path_sever(path, NULL); | 246 | iucv_path_sever(path, NULL); |
256 | atomic_set(&monpriv->iucv_severed, 1); | 247 | atomic_set(&monpriv->iucv_severed, 1); |
257 | wake_up(&mon_conn_wait_queue); | 248 | wake_up(&mon_conn_wait_queue); |
@@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path, | |||
266 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, | 257 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
267 | msg, sizeof(*msg)); | 258 | msg, sizeof(*msg)); |
268 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 259 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
269 | P_WARNING("IUCV message pending, message limit (%i) reached\n", | 260 | pr_warning("The read queue for monitor data is full\n"); |
270 | MON_MSGLIM); | ||
271 | monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; | 261 | monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; |
272 | } | 262 | } |
273 | monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; | 263 | monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; |
@@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
311 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, | 301 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, |
312 | MON_SERVICE, NULL, user_data_connect, monpriv); | 302 | MON_SERVICE, NULL, user_data_connect, monpriv); |
313 | if (rc) { | 303 | if (rc) { |
314 | P_ERROR("iucv connection to *MONITOR failed with " | 304 | pr_err("Connecting to the z/VM *MONITOR system service " |
315 | "IPUSER SEVER code = %i\n", rc); | 305 | "failed with rc=%i\n", rc); |
316 | rc = -EIO; | 306 | rc = -EIO; |
317 | goto out_path; | 307 | goto out_path; |
318 | } | 308 | } |
@@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp) | |||
353 | */ | 343 | */ |
354 | rc = iucv_path_sever(monpriv->path, user_data_sever); | 344 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
355 | if (rc) | 345 | if (rc) |
356 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 346 | pr_warning("Disconnecting the z/VM *MONITOR system service " |
347 | "failed with rc=%i\n", rc); | ||
357 | 348 | ||
358 | atomic_set(&monpriv->iucv_severed, 0); | 349 | atomic_set(&monpriv->iucv_severed, 0); |
359 | atomic_set(&monpriv->iucv_connected, 0); | 350 | atomic_set(&monpriv->iucv_connected, 0); |
@@ -469,7 +460,8 @@ static int __init mon_init(void) | |||
469 | int rc; | 460 | int rc; |
470 | 461 | ||
471 | if (!MACHINE_IS_VM) { | 462 | if (!MACHINE_IS_VM) { |
472 | P_ERROR("not running under z/VM, driver not loaded\n"); | 463 | pr_err("The z/VM *MONITOR record device driver cannot be " |
464 | "loaded without z/VM\n"); | ||
473 | return -ENODEV; | 465 | return -ENODEV; |
474 | } | 466 | } |
475 | 467 | ||
@@ -478,7 +470,8 @@ static int __init mon_init(void) | |||
478 | */ | 470 | */ |
479 | rc = iucv_register(&monreader_iucv_handler, 1); | 471 | rc = iucv_register(&monreader_iucv_handler, 1); |
480 | if (rc) { | 472 | if (rc) { |
481 | P_ERROR("failed to register with iucv driver\n"); | 473 | pr_err("The z/VM *MONITOR record device driver failed to " |
474 | "register with IUCV\n"); | ||
482 | return rc; | 475 | return rc; |
483 | } | 476 | } |
484 | 477 | ||
@@ -488,8 +481,8 @@ static int __init mon_init(void) | |||
488 | goto out_iucv; | 481 | goto out_iucv; |
489 | } | 482 | } |
490 | if (rc != SEG_TYPE_SC) { | 483 | if (rc != SEG_TYPE_SC) { |
491 | P_ERROR("segment %s has unsupported type, should be SC\n", | 484 | pr_err("The specified *MONITOR DCSS %s does not have the " |
492 | mon_dcss_name); | 485 | "required type SC\n", mon_dcss_name); |
493 | rc = -EINVAL; | 486 | rc = -EINVAL; |
494 | goto out_iucv; | 487 | goto out_iucv; |
495 | } | 488 | } |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 4d71aa8c1a79..c7d7483bab9a 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> | 8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "monwriter" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) | |||
64 | rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); | 67 | rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); |
65 | if (rc <= 0) | 68 | if (rc <= 0) |
66 | return rc; | 69 | return rc; |
70 | pr_err("Writing monitor data failed with rc=%i\n", rc); | ||
67 | if (rc == 5) | 71 | if (rc == 5) |
68 | return -EPERM; | 72 | return -EPERM; |
69 | printk("DIAG X'DC' error with return code: %i\n", rc); | ||
70 | return -EINVAL; | 73 | return -EINVAL; |
71 | } | 74 | } |
72 | 75 | ||
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index ec9c0bcf66ee..506390496416 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "sclp_cmd" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/completion.h> | 12 | #include <linux/completion.h> |
10 | #include <linux/init.h> | 13 | #include <linux/init.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
@@ -16,9 +19,8 @@ | |||
16 | #include <linux/memory.h> | 19 | #include <linux/memory.h> |
17 | #include <asm/chpid.h> | 20 | #include <asm/chpid.h> |
18 | #include <asm/sclp.h> | 21 | #include <asm/sclp.h> |
19 | #include "sclp.h" | ||
20 | 22 | ||
21 | #define TAG "sclp_cmd: " | 23 | #include "sclp.h" |
22 | 24 | ||
23 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | 25 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 |
24 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | 26 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 |
@@ -169,8 +171,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb) | |||
169 | 171 | ||
170 | /* Check response. */ | 172 | /* Check response. */ |
171 | if (request->status != SCLP_REQ_DONE) { | 173 | if (request->status != SCLP_REQ_DONE) { |
172 | printk(KERN_WARNING TAG "sync request failed " | 174 | pr_warning("sync request failed (cmd=0x%08x, " |
173 | "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status); | 175 | "status=0x%02x)\n", cmd, request->status); |
174 | rc = -EIO; | 176 | rc = -EIO; |
175 | } | 177 | } |
176 | out: | 178 | out: |
@@ -224,8 +226,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) | |||
224 | if (rc) | 226 | if (rc) |
225 | goto out; | 227 | goto out; |
226 | if (sccb->header.response_code != 0x0010) { | 228 | if (sccb->header.response_code != 0x0010) { |
227 | printk(KERN_WARNING TAG "readcpuinfo failed " | 229 | pr_warning("readcpuinfo failed (response=0x%04x)\n", |
228 | "(response=0x%04x)\n", sccb->header.response_code); | 230 | sccb->header.response_code); |
229 | rc = -EIO; | 231 | rc = -EIO; |
230 | goto out; | 232 | goto out; |
231 | } | 233 | } |
@@ -262,8 +264,9 @@ static int do_cpu_configure(sclp_cmdw_t cmd) | |||
262 | case 0x0120: | 264 | case 0x0120: |
263 | break; | 265 | break; |
264 | default: | 266 | default: |
265 | printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, " | 267 | pr_warning("configure cpu failed (cmd=0x%08x, " |
266 | "response=0x%04x)\n", cmd, sccb->header.response_code); | 268 | "response=0x%04x)\n", cmd, |
269 | sccb->header.response_code); | ||
267 | rc = -EIO; | 270 | rc = -EIO; |
268 | break; | 271 | break; |
269 | } | 272 | } |
@@ -626,9 +629,9 @@ static int do_chp_configure(sclp_cmdw_t cmd) | |||
626 | case 0x0450: | 629 | case 0x0450: |
627 | break; | 630 | break; |
628 | default: | 631 | default: |
629 | printk(KERN_WARNING TAG "configure channel-path failed " | 632 | pr_warning("configure channel-path failed " |
630 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | 633 | "(cmd=0x%08x, response=0x%04x)\n", cmd, |
631 | sccb->header.response_code); | 634 | sccb->header.response_code); |
632 | rc = -EIO; | 635 | rc = -EIO; |
633 | break; | 636 | break; |
634 | } | 637 | } |
@@ -695,8 +698,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info) | |||
695 | if (rc) | 698 | if (rc) |
696 | goto out; | 699 | goto out; |
697 | if (sccb->header.response_code != 0x0010) { | 700 | if (sccb->header.response_code != 0x0010) { |
698 | printk(KERN_WARNING TAG "read channel-path info failed " | 701 | pr_warning("read channel-path info failed " |
699 | "(response=0x%04x)\n", sccb->header.response_code); | 702 | "(response=0x%04x)\n", sccb->header.response_code); |
700 | rc = -EIO; | 703 | rc = -EIO; |
701 | goto out; | 704 | goto out; |
702 | } | 705 | } |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 4cebd6ee6d27..b497afe061cc 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
@@ -5,15 +5,17 @@ | |||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "sclp_config" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/init.h> | 11 | #include <linux/init.h> |
9 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
10 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
11 | #include <linux/sysdev.h> | 14 | #include <linux/sysdev.h> |
12 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
13 | #include <asm/smp.h> | 16 | #include <asm/smp.h> |
14 | #include "sclp.h" | ||
15 | 17 | ||
16 | #define TAG "sclp_config: " | 18 | #include "sclp.h" |
17 | 19 | ||
18 | struct conf_mgm_data { | 20 | struct conf_mgm_data { |
19 | u8 reserved; | 21 | u8 reserved; |
@@ -31,7 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
31 | int cpu; | 33 | int cpu; |
32 | struct sys_device *sysdev; | 34 | struct sys_device *sysdev; |
33 | 35 | ||
34 | printk(KERN_WARNING TAG "cpu capability changed.\n"); | 36 | pr_warning("cpu capability changed.\n"); |
35 | get_online_cpus(); | 37 | get_online_cpus(); |
36 | for_each_online_cpu(cpu) { | 38 | for_each_online_cpu(cpu) { |
37 | sysdev = get_cpu_sysdev(cpu); | 39 | sysdev = get_cpu_sysdev(cpu); |
@@ -78,7 +80,7 @@ static int __init sclp_conf_init(void) | |||
78 | return rc; | 80 | return rc; |
79 | 81 | ||
80 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { | 82 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { |
81 | printk(KERN_WARNING TAG "no configuration management.\n"); | 83 | pr_warning("no configuration management.\n"); |
82 | sclp_unregister(&sclp_conf_register); | 84 | sclp_unregister(&sclp_conf_register); |
83 | rc = -ENOSYS; | 85 | rc = -ENOSYS; |
84 | } | 86 | } |
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index d887bd261d28..62c2647f37f4 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Michael Ernst <mernst@de.ibm.com> | 7 | * Michael Ernst <mernst@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "sclp_cpi" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
12 | #include <linux/stat.h> | 15 | #include <linux/stat.h> |
@@ -20,6 +23,7 @@ | |||
20 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
21 | #include <asm/ebcdic.h> | 24 | #include <asm/ebcdic.h> |
22 | #include <asm/sclp.h> | 25 | #include <asm/sclp.h> |
26 | |||
23 | #include "sclp.h" | 27 | #include "sclp.h" |
24 | #include "sclp_rw.h" | 28 | #include "sclp_rw.h" |
25 | #include "sclp_cpi_sys.h" | 29 | #include "sclp_cpi_sys.h" |
@@ -150,16 +154,16 @@ static int cpi_req(void) | |||
150 | wait_for_completion(&completion); | 154 | wait_for_completion(&completion); |
151 | 155 | ||
152 | if (req->status != SCLP_REQ_DONE) { | 156 | if (req->status != SCLP_REQ_DONE) { |
153 | printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n", | 157 | pr_warning("request failed (status=0x%02x)\n", |
154 | req->status); | 158 | req->status); |
155 | rc = -EIO; | 159 | rc = -EIO; |
156 | goto out_free_req; | 160 | goto out_free_req; |
157 | } | 161 | } |
158 | 162 | ||
159 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; | 163 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; |
160 | if (response != 0x0020) { | 164 | if (response != 0x0020) { |
161 | printk(KERN_WARNING "cpi: failed with " | 165 | pr_warning("request failed with response code 0x%x\n", |
162 | "response code 0x%x\n", response); | 166 | response); |
163 | rc = -EIO; | 167 | rc = -EIO; |
164 | } | 168 | } |
165 | 169 | ||
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 8b854857ba07..6a1c58dc61a7 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c | |||
@@ -5,15 +5,18 @@ | |||
5 | * Author(s): Michael Holzheu | 5 | * Author(s): Michael Holzheu |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "sclp_sdias" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
9 | #include <asm/sclp.h> | 12 | #include <asm/sclp.h> |
10 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
11 | #include <asm/ipl.h> | 14 | #include <asm/ipl.h> |
15 | |||
12 | #include "sclp.h" | 16 | #include "sclp.h" |
13 | #include "sclp_rw.h" | 17 | #include "sclp_rw.h" |
14 | 18 | ||
15 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) | 19 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) |
16 | #define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) | ||
17 | 20 | ||
18 | #define SDIAS_RETRIES 300 | 21 | #define SDIAS_RETRIES 300 |
19 | #define SDIAS_SLEEP_TICKS 50 | 22 | #define SDIAS_SLEEP_TICKS 50 |
@@ -131,7 +134,7 @@ int sclp_sdias_blk_count(void) | |||
131 | 134 | ||
132 | rc = sdias_sclp_send(&request); | 135 | rc = sdias_sclp_send(&request); |
133 | if (rc) { | 136 | if (rc) { |
134 | ERROR_MSG("sclp_send failed for get_nr_blocks\n"); | 137 | pr_err("sclp_send failed for get_nr_blocks\n"); |
135 | goto out; | 138 | goto out; |
136 | } | 139 | } |
137 | if (sccb.hdr.response_code != 0x0020) { | 140 | if (sccb.hdr.response_code != 0x0020) { |
@@ -145,7 +148,8 @@ int sclp_sdias_blk_count(void) | |||
145 | rc = sccb.evbuf.blk_cnt; | 148 | rc = sccb.evbuf.blk_cnt; |
146 | break; | 149 | break; |
147 | default: | 150 | default: |
148 | ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); | 151 | pr_err("SCLP error: %x\n", |
152 | sccb.evbuf.event_status); | ||
149 | rc = -EIO; | 153 | rc = -EIO; |
150 | goto out; | 154 | goto out; |
151 | } | 155 | } |
@@ -201,7 +205,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | |||
201 | 205 | ||
202 | rc = sdias_sclp_send(&request); | 206 | rc = sdias_sclp_send(&request); |
203 | if (rc) { | 207 | if (rc) { |
204 | ERROR_MSG("sclp_send failed: %x\n", rc); | 208 | pr_err("sclp_send failed: %x\n", rc); |
205 | goto out; | 209 | goto out; |
206 | } | 210 | } |
207 | if (sccb.hdr.response_code != 0x0020) { | 211 | if (sccb.hdr.response_code != 0x0020) { |
@@ -219,9 +223,9 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | |||
219 | case EVSTATE_NO_DATA: | 223 | case EVSTATE_NO_DATA: |
220 | TRACE("no data\n"); | 224 | TRACE("no data\n"); |
221 | default: | 225 | default: |
222 | ERROR_MSG("Error from SCLP while copying hsa. " | 226 | pr_err("Error from SCLP while copying hsa. " |
223 | "Event status = %x\n", | 227 | "Event status = %x\n", |
224 | sccb.evbuf.event_status); | 228 | sccb.evbuf.event_status); |
225 | rc = -EIO; | 229 | rc = -EIO; |
226 | } | 230 | } |
227 | out: | 231 | out: |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 9854f19f5e62..a839aa531d7c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -583,23 +583,6 @@ sclp_vt220_chars_in_buffer(struct tty_struct *tty) | |||
583 | return count; | 583 | return count; |
584 | } | 584 | } |
585 | 585 | ||
586 | static void | ||
587 | __sclp_vt220_flush_buffer(void) | ||
588 | { | ||
589 | unsigned long flags; | ||
590 | |||
591 | sclp_vt220_emit_current(); | ||
592 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
593 | if (timer_pending(&sclp_vt220_timer)) | ||
594 | del_timer(&sclp_vt220_timer); | ||
595 | while (sclp_vt220_outqueue_count > 0) { | ||
596 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
597 | sclp_sync_wait(); | ||
598 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
599 | } | ||
600 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
601 | } | ||
602 | |||
603 | /* | 586 | /* |
604 | * Pass on all buffers to the hardware. Return only when there are no more | 587 | * Pass on all buffers to the hardware. Return only when there are no more |
605 | * buffers pending. | 588 | * buffers pending. |
@@ -745,6 +728,22 @@ sclp_vt220_con_device(struct console *c, int *index) | |||
745 | return sclp_vt220_driver; | 728 | return sclp_vt220_driver; |
746 | } | 729 | } |
747 | 730 | ||
731 | static void __sclp_vt220_flush_buffer(void) | ||
732 | { | ||
733 | unsigned long flags; | ||
734 | |||
735 | sclp_vt220_emit_current(); | ||
736 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
737 | if (timer_pending(&sclp_vt220_timer)) | ||
738 | del_timer(&sclp_vt220_timer); | ||
739 | while (sclp_vt220_outqueue_count > 0) { | ||
740 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
741 | sclp_sync_wait(); | ||
742 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
743 | } | ||
744 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
745 | } | ||
746 | |||
748 | static int | 747 | static int |
749 | sclp_vt220_notify(struct notifier_block *self, | 748 | sclp_vt220_notify(struct notifier_block *self, |
750 | unsigned long event, void *data) | 749 | unsigned long event, void *data) |
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 09e7d9bf438b..a6087cec55b4 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
@@ -11,12 +11,14 @@ | |||
11 | * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS | 11 | * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define KMSG_COMPONENT "vmcp" | ||
15 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
16 | |||
14 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
15 | #include <linux/init.h> | 18 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
17 | #include <linux/miscdevice.h> | 20 | #include <linux/miscdevice.h> |
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <asm/cpcmd.h> | 22 | #include <asm/cpcmd.h> |
21 | #include <asm/debug.h> | 23 | #include <asm/debug.h> |
22 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
@@ -26,8 +28,6 @@ MODULE_LICENSE("GPL"); | |||
26 | MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); | 28 | MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); |
27 | MODULE_DESCRIPTION("z/VM CP interface"); | 29 | MODULE_DESCRIPTION("z/VM CP interface"); |
28 | 30 | ||
29 | #define PRINTK_HEADER "vmcp: " | ||
30 | |||
31 | static debug_info_t *vmcp_debug; | 31 | static debug_info_t *vmcp_debug; |
32 | 32 | ||
33 | static int vmcp_open(struct inode *inode, struct file *file) | 33 | static int vmcp_open(struct inode *inode, struct file *file) |
@@ -41,13 +41,11 @@ static int vmcp_open(struct inode *inode, struct file *file) | |||
41 | if (!session) | 41 | if (!session) |
42 | return -ENOMEM; | 42 | return -ENOMEM; |
43 | 43 | ||
44 | lock_kernel(); | ||
45 | session->bufsize = PAGE_SIZE; | 44 | session->bufsize = PAGE_SIZE; |
46 | session->response = NULL; | 45 | session->response = NULL; |
47 | session->resp_size = 0; | 46 | session->resp_size = 0; |
48 | mutex_init(&session->mutex); | 47 | mutex_init(&session->mutex); |
49 | file->private_data = session; | 48 | file->private_data = session; |
50 | unlock_kernel(); | ||
51 | return nonseekable_open(inode, file); | 49 | return nonseekable_open(inode, file); |
52 | } | 50 | } |
53 | 51 | ||
@@ -193,7 +191,8 @@ static int __init vmcp_init(void) | |||
193 | int ret; | 191 | int ret; |
194 | 192 | ||
195 | if (!MACHINE_IS_VM) { | 193 | if (!MACHINE_IS_VM) { |
196 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); | 194 | pr_warning("The z/VM CP interface device driver cannot be " |
195 | "loaded without z/VM\n"); | ||
197 | return -ENODEV; | 196 | return -ENODEV; |
198 | } | 197 | } |
199 | 198 | ||
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 24762727bc27..aabbeb909cc6 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -10,6 +10,10 @@ | |||
10 | * Stefan Weinhuber <wein@de.ibm.com> | 10 | * Stefan Weinhuber <wein@de.ibm.com> |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | |||
14 | #define KMSG_COMPONENT "vmlogrdr" | ||
15 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
16 | |||
13 | #include <linux/module.h> | 17 | #include <linux/module.h> |
14 | #include <linux/init.h> | 18 | #include <linux/init.h> |
15 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
@@ -28,8 +32,6 @@ | |||
28 | #include <linux/smp_lock.h> | 32 | #include <linux/smp_lock.h> |
29 | #include <linux/string.h> | 33 | #include <linux/string.h> |
30 | 34 | ||
31 | |||
32 | |||
33 | MODULE_AUTHOR | 35 | MODULE_AUTHOR |
34 | ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" | 36 | ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" |
35 | " Stefan Weinhuber (wein@de.ibm.com)"); | 37 | " Stefan Weinhuber (wein@de.ibm.com)"); |
@@ -174,8 +176,7 @@ static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
174 | struct vmlogrdr_priv_t * logptr = path->private; | 176 | struct vmlogrdr_priv_t * logptr = path->private; |
175 | u8 reason = (u8) ipuser[8]; | 177 | u8 reason = (u8) ipuser[8]; |
176 | 178 | ||
177 | printk (KERN_ERR "vmlogrdr: connection severed with" | 179 | pr_err("vmlogrdr: connection severed with reason %i\n", reason); |
178 | " reason %i\n", reason); | ||
179 | 180 | ||
180 | iucv_path_sever(path, NULL); | 181 | iucv_path_sever(path, NULL); |
181 | kfree(path); | 182 | kfree(path); |
@@ -333,8 +334,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
333 | if (logptr->autorecording) { | 334 | if (logptr->autorecording) { |
334 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); | 335 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); |
335 | if (ret) | 336 | if (ret) |
336 | printk (KERN_WARNING "vmlogrdr: failed to start " | 337 | pr_warning("vmlogrdr: failed to start " |
337 | "recording automatically\n"); | 338 | "recording automatically\n"); |
338 | } | 339 | } |
339 | 340 | ||
340 | /* create connection to the system service */ | 341 | /* create connection to the system service */ |
@@ -345,9 +346,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
345 | logptr->system_service, NULL, NULL, | 346 | logptr->system_service, NULL, NULL, |
346 | logptr); | 347 | logptr); |
347 | if (connect_rc) { | 348 | if (connect_rc) { |
348 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " | 349 | pr_err("vmlogrdr: iucv connection to %s " |
349 | "failed with rc %i \n", logptr->system_service, | 350 | "failed with rc %i \n", |
350 | connect_rc); | 351 | logptr->system_service, connect_rc); |
351 | goto out_path; | 352 | goto out_path; |
352 | } | 353 | } |
353 | 354 | ||
@@ -388,8 +389,8 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) | |||
388 | if (logptr->autorecording) { | 389 | if (logptr->autorecording) { |
389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 390 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
390 | if (ret) | 391 | if (ret) |
391 | printk (KERN_WARNING "vmlogrdr: failed to stop " | 392 | pr_warning("vmlogrdr: failed to stop " |
392 | "recording automatically\n"); | 393 | "recording automatically\n"); |
393 | } | 394 | } |
394 | logptr->dev_in_use = 0; | 395 | logptr->dev_in_use = 0; |
395 | 396 | ||
@@ -823,8 +824,7 @@ static int __init vmlogrdr_init(void) | |||
823 | dev_t dev; | 824 | dev_t dev; |
824 | 825 | ||
825 | if (! MACHINE_IS_VM) { | 826 | if (! MACHINE_IS_VM) { |
826 | printk (KERN_ERR "vmlogrdr: not running under VM, " | 827 | pr_err("not running under VM, driver not loaded.\n"); |
827 | "driver not loaded.\n"); | ||
828 | return -ENODEV; | 828 | return -ENODEV; |
829 | } | 829 | } |
830 | 830 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 9020eba620ee..5dcef81fc9d9 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Munzert <munzert@de.ibm.com> | 8 | * Frank Munzert <munzert@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "vmur" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/cdev.h> | 14 | #include <linux/cdev.h> |
12 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
13 | 16 | ||
@@ -40,8 +43,6 @@ MODULE_AUTHOR("IBM Corporation"); | |||
40 | MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); | 43 | MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); |
41 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
42 | 45 | ||
43 | #define PRINTK_HEADER "vmur: " | ||
44 | |||
45 | static dev_t ur_first_dev_maj_min; | 46 | static dev_t ur_first_dev_maj_min; |
46 | static struct class *vmur_class; | 47 | static struct class *vmur_class; |
47 | static struct debug_info *vmur_dbf; | 48 | static struct debug_info *vmur_dbf; |
@@ -987,7 +988,8 @@ static int __init ur_init(void) | |||
987 | dev_t dev; | 988 | dev_t dev; |
988 | 989 | ||
989 | if (!MACHINE_IS_VM) { | 990 | if (!MACHINE_IS_VM) { |
990 | PRINT_ERR("%s is only available under z/VM.\n", ur_banner); | 991 | pr_err("The %s cannot be loaded without z/VM\n", |
992 | ur_banner); | ||
991 | return -ENODEV; | 993 | return -ENODEV; |
992 | } | 994 | } |
993 | 995 | ||
@@ -1006,7 +1008,8 @@ static int __init ur_init(void) | |||
1006 | 1008 | ||
1007 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); | 1009 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); |
1008 | if (rc) { | 1010 | if (rc) { |
1009 | PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); | 1011 | pr_err("Kernel function alloc_chrdev_region failed with " |
1012 | "error code %d\n", rc); | ||
1010 | goto fail_unregister_driver; | 1013 | goto fail_unregister_driver; |
1011 | } | 1014 | } |
1012 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); | 1015 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); |
@@ -1016,7 +1019,7 @@ static int __init ur_init(void) | |||
1016 | rc = PTR_ERR(vmur_class); | 1019 | rc = PTR_ERR(vmur_class); |
1017 | goto fail_unregister_region; | 1020 | goto fail_unregister_region; |
1018 | } | 1021 | } |
1019 | PRINT_INFO("%s loaded.\n", ur_banner); | 1022 | pr_info("%s loaded.\n", ur_banner); |
1020 | return 0; | 1023 | return 0; |
1021 | 1024 | ||
1022 | fail_unregister_region: | 1025 | fail_unregister_region: |
@@ -1034,7 +1037,7 @@ static void __exit ur_exit(void) | |||
1034 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); | 1037 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); |
1035 | ccw_driver_unregister(&ur_driver); | 1038 | ccw_driver_unregister(&ur_driver); |
1036 | debug_unregister(vmur_dbf); | 1039 | debug_unregister(vmur_dbf); |
1037 | PRINT_INFO("%s unloaded.\n", ur_banner); | 1040 | pr_info("%s unloaded.\n", ur_banner); |
1038 | } | 1041 | } |
1039 | 1042 | ||
1040 | module_init(ur_init); | 1043 | module_init(ur_init); |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 7fd84be11931..eefc6611412e 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Author(s): Michael Holzheu | 9 | * Author(s): Michael Holzheu |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "zdump" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
13 | #include <linux/miscdevice.h> | 16 | #include <linux/miscdevice.h> |
14 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
@@ -24,8 +27,6 @@ | |||
24 | #include "sclp.h" | 27 | #include "sclp.h" |
25 | 28 | ||
26 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) | 29 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) |
27 | #define MSG(x...) printk( KERN_ALERT x ) | ||
28 | #define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) | ||
29 | 30 | ||
30 | #define TO_USER 0 | 31 | #define TO_USER 0 |
31 | #define TO_KERNEL 1 | 32 | #define TO_KERNEL 1 |
@@ -563,19 +564,19 @@ static int __init sys_info_init(enum arch_id arch) | |||
563 | 564 | ||
564 | switch (arch) { | 565 | switch (arch) { |
565 | case ARCH_S390X: | 566 | case ARCH_S390X: |
566 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | 567 | pr_alert("DETECTED 'S390X (64 bit) OS'\n"); |
567 | sys_info.sa_base = SAVE_AREA_BASE_S390X; | 568 | sys_info.sa_base = SAVE_AREA_BASE_S390X; |
568 | sys_info.sa_size = sizeof(struct save_area_s390x); | 569 | sys_info.sa_size = sizeof(struct save_area_s390x); |
569 | set_s390x_lc_mask(&sys_info.lc_mask); | 570 | set_s390x_lc_mask(&sys_info.lc_mask); |
570 | break; | 571 | break; |
571 | case ARCH_S390: | 572 | case ARCH_S390: |
572 | MSG("DETECTED 'S390 (32 bit) OS'\n"); | 573 | pr_alert("DETECTED 'S390 (32 bit) OS'\n"); |
573 | sys_info.sa_base = SAVE_AREA_BASE_S390; | 574 | sys_info.sa_base = SAVE_AREA_BASE_S390; |
574 | sys_info.sa_size = sizeof(struct save_area_s390); | 575 | sys_info.sa_size = sizeof(struct save_area_s390); |
575 | set_s390_lc_mask(&sys_info.lc_mask); | 576 | set_s390_lc_mask(&sys_info.lc_mask); |
576 | break; | 577 | break; |
577 | default: | 578 | default: |
578 | ERROR_MSG("unknown architecture 0x%x.\n",arch); | 579 | pr_alert("0x%x is an unknown architecture.\n",arch); |
579 | return -EINVAL; | 580 | return -EINVAL; |
580 | } | 581 | } |
581 | sys_info.arch = arch; | 582 | sys_info.arch = arch; |
@@ -674,7 +675,8 @@ static int __init zcore_init(void) | |||
674 | 675 | ||
675 | #ifndef __s390x__ | 676 | #ifndef __s390x__ |
676 | if (arch == ARCH_S390X) { | 677 | if (arch == ARCH_S390X) { |
677 | ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); | 678 | pr_alert("The 32-bit dump tool cannot be used for a " |
679 | "64-bit system\n"); | ||
678 | rc = -EINVAL; | 680 | rc = -EINVAL; |
679 | goto fail; | 681 | goto fail; |
680 | } | 682 | } |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2f547b840ef0..fe00be3675cd 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Arnd Bergmann (arndb@de.ibm.com) | 9 | * Arnd Bergmann (arndb@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "cio" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
13 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid, | |||
50 | { | 53 | { |
51 | if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { | 54 | if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { |
52 | if (msgtrigger) | 55 | if (msgtrigger) |
53 | printk(KERN_WARNING "cio: Invalid cio_ignore range " | 56 | pr_warning("0.%x.%04x to 0.%x.%04x is not a valid " |
54 | "0.%x.%04x-0.%x.%04x\n", from_ssid, from, | 57 | "range for cio_ignore\n", from_ssid, from, |
55 | to_ssid, to); | 58 | to_ssid, to); |
59 | |||
56 | return 1; | 60 | return 1; |
57 | } | 61 | } |
58 | 62 | ||
@@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, | |||
140 | rc = 0; | 144 | rc = 0; |
141 | out: | 145 | out: |
142 | if (rc && msgtrigger) | 146 | if (rc && msgtrigger) |
143 | printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n", | 147 | pr_warning("%s is not a valid device for the cio_ignore " |
144 | str); | 148 | "kernel parameter\n", str); |
145 | 149 | ||
146 | return rc; | 150 | return rc; |
147 | } | 151 | } |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 3ac2c2019f5e..918e6fce2573 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <asm/ccwdev.h> | 19 | #include <asm/ccwdev.h> |
20 | #include <asm/ccwgroup.h> | 20 | #include <asm/ccwgroup.h> |
21 | 21 | ||
22 | #define CCW_BUS_ID_SIZE 20 | ||
23 | |||
22 | /* In Linux 2.4, we had a channel device layer called "chandev" | 24 | /* In Linux 2.4, we had a channel device layer called "chandev" |
23 | * that did all sorts of obscure stuff for networking devices. | 25 | * that did all sorts of obscure stuff for networking devices. |
24 | * This is another driver that serves as a replacement for just | 26 | * This is another driver that serves as a replacement for just |
@@ -89,15 +91,23 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const | |||
89 | 91 | ||
90 | gdev = to_ccwgroupdev(dev); | 92 | gdev = to_ccwgroupdev(dev); |
91 | 93 | ||
92 | if (gdev->state != CCWGROUP_OFFLINE) | 94 | /* Prevent concurrent online/offline processing and ungrouping. */ |
93 | return -EINVAL; | 95 | if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) |
94 | 96 | return -EAGAIN; | |
97 | if (gdev->state != CCWGROUP_OFFLINE) { | ||
98 | rc = -EINVAL; | ||
99 | goto out; | ||
100 | } | ||
95 | /* Note that we cannot unregister the device from one of its | 101 | /* Note that we cannot unregister the device from one of its |
96 | * attribute methods, so we have to use this roundabout approach. | 102 | * attribute methods, so we have to use this roundabout approach. |
97 | */ | 103 | */ |
98 | rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); | 104 | rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); |
99 | if (rc) | 105 | out: |
100 | count = rc; | 106 | if (rc) { |
107 | /* Release onoff "lock" when ungrouping failed. */ | ||
108 | atomic_set(&gdev->onoff, 0); | ||
109 | return rc; | ||
110 | } | ||
101 | return count; | 111 | return count; |
102 | } | 112 | } |
103 | 113 | ||
@@ -172,7 +182,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) | |||
172 | len = end - start + 1; | 182 | len = end - start + 1; |
173 | end++; | 183 | end++; |
174 | } | 184 | } |
175 | if (len < BUS_ID_SIZE) { | 185 | if (len < CCW_BUS_ID_SIZE) { |
176 | strlcpy(bus_id, start, len); | 186 | strlcpy(bus_id, start, len); |
177 | rc = 0; | 187 | rc = 0; |
178 | } else | 188 | } else |
@@ -181,7 +191,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) | |||
181 | return rc; | 191 | return rc; |
182 | } | 192 | } |
183 | 193 | ||
184 | static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE]) | 194 | static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) |
185 | { | 195 | { |
186 | int cssid, ssid, devno; | 196 | int cssid, ssid, devno; |
187 | 197 | ||
@@ -213,7 +223,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, | |||
213 | { | 223 | { |
214 | struct ccwgroup_device *gdev; | 224 | struct ccwgroup_device *gdev; |
215 | int rc, i; | 225 | int rc, i; |
216 | char tmp_bus_id[BUS_ID_SIZE]; | 226 | char tmp_bus_id[CCW_BUS_ID_SIZE]; |
217 | const char *curr_buf; | 227 | const char *curr_buf; |
218 | 228 | ||
219 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), | 229 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 29826fdd47b8..ebab6ea4659b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "cio" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -333,6 +336,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
333 | struct chp_config_data *data; | 336 | struct chp_config_data *data; |
334 | struct chp_id chpid; | 337 | struct chp_id chpid; |
335 | int num; | 338 | int num; |
339 | char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; | ||
336 | 340 | ||
337 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | 341 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); |
338 | if (sei_area->rs != 0) | 342 | if (sei_area->rs != 0) |
@@ -343,8 +347,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
343 | if (!chp_test_bit(data->map, num)) | 347 | if (!chp_test_bit(data->map, num)) |
344 | continue; | 348 | continue; |
345 | chpid.id = num; | 349 | chpid.id = num; |
346 | printk(KERN_WARNING "cio: processing configure event %d for " | 350 | pr_notice("Processing %s for channel path %x.%02x\n", |
347 | "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); | 351 | events[data->op], chpid.cssid, chpid.id); |
348 | switch (data->op) { | 352 | switch (data->op) { |
349 | case 0: | 353 | case 0: |
350 | chp_cfg_schedule(chpid, 1); | 354 | chp_cfg_schedule(chpid, 1); |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index f49f0e502b8d..0a2f2edafc03 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -61,7 +61,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) | |||
61 | } | 61 | } |
62 | private->request = NULL; | 62 | private->request = NULL; |
63 | memcpy(&request->irb, irb, sizeof(*irb)); | 63 | memcpy(&request->irb, irb, sizeof(*irb)); |
64 | stsch(sch->schid, &sch->schib); | 64 | cio_update_schib(sch); |
65 | complete(&request->completion); | 65 | complete(&request->completion); |
66 | put_device(&sch->dev); | 66 | put_device(&sch->dev); |
67 | } | 67 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3db2c386546f..8a8df7552969 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "cio" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -104,44 +107,6 @@ cio_get_options (struct subchannel *sch) | |||
104 | return flags; | 107 | return flags; |
105 | } | 108 | } |
106 | 109 | ||
107 | /* | ||
108 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
109 | * return a pointer to the subchannel structure. | ||
110 | */ | ||
111 | static int | ||
112 | cio_tpi(void) | ||
113 | { | ||
114 | struct tpi_info *tpi_info; | ||
115 | struct subchannel *sch; | ||
116 | struct irb *irb; | ||
117 | int irq_context; | ||
118 | |||
119 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
120 | if (tpi (NULL) != 1) | ||
121 | return 0; | ||
122 | irb = (struct irb *) __LC_IRB; | ||
123 | /* Store interrupt response block to lowcore. */ | ||
124 | if (tsch (tpi_info->schid, irb) != 0) | ||
125 | /* Not status pending or not operational. */ | ||
126 | return 1; | ||
127 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
128 | if (!sch) | ||
129 | return 1; | ||
130 | irq_context = in_interrupt(); | ||
131 | if (!irq_context) | ||
132 | local_bh_disable(); | ||
133 | irq_enter (); | ||
134 | spin_lock(sch->lock); | ||
135 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | ||
136 | if (sch->driver && sch->driver->irq) | ||
137 | sch->driver->irq(sch); | ||
138 | spin_unlock(sch->lock); | ||
139 | irq_exit (); | ||
140 | if (!irq_context) | ||
141 | _local_bh_enable(); | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | static int | 110 | static int |
146 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | 111 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) |
147 | { | 112 | { |
@@ -152,11 +117,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | |||
152 | else | 117 | else |
153 | sch->lpm = 0; | 118 | sch->lpm = 0; |
154 | 119 | ||
155 | stsch (sch->schid, &sch->schib); | ||
156 | |||
157 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " | 120 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " |
158 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, | 121 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, |
159 | sch->schid.sch_no); | 122 | sch->schid.sch_no); |
123 | |||
124 | if (cio_update_schib(sch)) | ||
125 | return -ENODEV; | ||
126 | |||
160 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); | 127 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); |
161 | CIO_TRACE_EVENT(0, dbf_text); | 128 | CIO_TRACE_EVENT(0, dbf_text); |
162 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); | 129 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); |
@@ -354,7 +321,8 @@ cio_cancel (struct subchannel *sch) | |||
354 | switch (ccode) { | 321 | switch (ccode) { |
355 | case 0: /* success */ | 322 | case 0: /* success */ |
356 | /* Update information in scsw. */ | 323 | /* Update information in scsw. */ |
357 | stsch (sch->schid, &sch->schib); | 324 | if (cio_update_schib(sch)) |
325 | return -ENODEV; | ||
358 | return 0; | 326 | return 0; |
359 | case 1: /* status pending */ | 327 | case 1: /* status pending */ |
360 | return -EBUSY; | 328 | return -EBUSY; |
@@ -365,30 +333,70 @@ cio_cancel (struct subchannel *sch) | |||
365 | } | 333 | } |
366 | } | 334 | } |
367 | 335 | ||
336 | |||
337 | static void cio_apply_config(struct subchannel *sch, struct schib *schib) | ||
338 | { | ||
339 | schib->pmcw.intparm = sch->config.intparm; | ||
340 | schib->pmcw.mbi = sch->config.mbi; | ||
341 | schib->pmcw.isc = sch->config.isc; | ||
342 | schib->pmcw.ena = sch->config.ena; | ||
343 | schib->pmcw.mme = sch->config.mme; | ||
344 | schib->pmcw.mp = sch->config.mp; | ||
345 | schib->pmcw.csense = sch->config.csense; | ||
346 | schib->pmcw.mbfc = sch->config.mbfc; | ||
347 | if (sch->config.mbfc) | ||
348 | schib->mba = sch->config.mba; | ||
349 | } | ||
350 | |||
351 | static int cio_check_config(struct subchannel *sch, struct schib *schib) | ||
352 | { | ||
353 | return (schib->pmcw.intparm == sch->config.intparm) && | ||
354 | (schib->pmcw.mbi == sch->config.mbi) && | ||
355 | (schib->pmcw.isc == sch->config.isc) && | ||
356 | (schib->pmcw.ena == sch->config.ena) && | ||
357 | (schib->pmcw.mme == sch->config.mme) && | ||
358 | (schib->pmcw.mp == sch->config.mp) && | ||
359 | (schib->pmcw.csense == sch->config.csense) && | ||
360 | (schib->pmcw.mbfc == sch->config.mbfc) && | ||
361 | (!sch->config.mbfc || (schib->mba == sch->config.mba)); | ||
362 | } | ||
363 | |||
368 | /* | 364 | /* |
369 | * Function: cio_modify | 365 | * cio_commit_config - apply configuration to the subchannel |
370 | * Issues a "Modify Subchannel" on the specified subchannel | ||
371 | */ | 366 | */ |
372 | int | 367 | int cio_commit_config(struct subchannel *sch) |
373 | cio_modify (struct subchannel *sch) | ||
374 | { | 368 | { |
375 | int ccode, retry, ret; | 369 | struct schib schib; |
370 | int ccode, retry, ret = 0; | ||
371 | |||
372 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | ||
373 | return -ENODEV; | ||
376 | 374 | ||
377 | ret = 0; | ||
378 | for (retry = 0; retry < 5; retry++) { | 375 | for (retry = 0; retry < 5; retry++) { |
379 | ccode = msch_err (sch->schid, &sch->schib); | 376 | /* copy desired changes to local schib */ |
380 | if (ccode < 0) /* -EIO if msch gets a program check. */ | 377 | cio_apply_config(sch, &schib); |
378 | ccode = msch_err(sch->schid, &schib); | ||
379 | if (ccode < 0) /* -EIO if msch gets a program check. */ | ||
381 | return ccode; | 380 | return ccode; |
382 | switch (ccode) { | 381 | switch (ccode) { |
383 | case 0: /* successfull */ | 382 | case 0: /* successfull */ |
384 | return 0; | 383 | if (stsch(sch->schid, &schib) || |
385 | case 1: /* status pending */ | 384 | !css_sch_is_valid(&schib)) |
385 | return -ENODEV; | ||
386 | if (cio_check_config(sch, &schib)) { | ||
387 | /* commit changes from local schib */ | ||
388 | memcpy(&sch->schib, &schib, sizeof(schib)); | ||
389 | return 0; | ||
390 | } | ||
391 | ret = -EAGAIN; | ||
392 | break; | ||
393 | case 1: /* status pending */ | ||
386 | return -EBUSY; | 394 | return -EBUSY; |
387 | case 2: /* busy */ | 395 | case 2: /* busy */ |
388 | udelay (100); /* allow for recovery */ | 396 | udelay(100); /* allow for recovery */ |
389 | ret = -EBUSY; | 397 | ret = -EBUSY; |
390 | break; | 398 | break; |
391 | case 3: /* not operational */ | 399 | case 3: /* not operational */ |
392 | return -ENODEV; | 400 | return -ENODEV; |
393 | } | 401 | } |
394 | } | 402 | } |
@@ -396,6 +404,23 @@ cio_modify (struct subchannel *sch) | |||
396 | } | 404 | } |
397 | 405 | ||
398 | /** | 406 | /** |
407 | * cio_update_schib - Perform stsch and update schib if subchannel is valid. | ||
408 | * @sch: subchannel on which to perform stsch | ||
409 | * Return zero on success, -ENODEV otherwise. | ||
410 | */ | ||
411 | int cio_update_schib(struct subchannel *sch) | ||
412 | { | ||
413 | struct schib schib; | ||
414 | |||
415 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | ||
416 | return -ENODEV; | ||
417 | |||
418 | memcpy(&sch->schib, &schib, sizeof(schib)); | ||
419 | return 0; | ||
420 | } | ||
421 | EXPORT_SYMBOL_GPL(cio_update_schib); | ||
422 | |||
423 | /** | ||
399 | * cio_enable_subchannel - enable a subchannel. | 424 | * cio_enable_subchannel - enable a subchannel. |
400 | * @sch: subchannel to be enabled | 425 | * @sch: subchannel to be enabled |
401 | * @intparm: interruption parameter to set | 426 | * @intparm: interruption parameter to set |
@@ -403,7 +428,6 @@ cio_modify (struct subchannel *sch) | |||
403 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 428 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
404 | { | 429 | { |
405 | char dbf_txt[15]; | 430 | char dbf_txt[15]; |
406 | int ccode; | ||
407 | int retry; | 431 | int retry; |
408 | int ret; | 432 | int ret; |
409 | 433 | ||
@@ -412,33 +436,27 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | |||
412 | 436 | ||
413 | if (sch_is_pseudo_sch(sch)) | 437 | if (sch_is_pseudo_sch(sch)) |
414 | return -EINVAL; | 438 | return -EINVAL; |
415 | ccode = stsch (sch->schid, &sch->schib); | 439 | if (cio_update_schib(sch)) |
416 | if (ccode) | ||
417 | return -ENODEV; | 440 | return -ENODEV; |
418 | 441 | ||
419 | for (retry = 5, ret = 0; retry > 0; retry--) { | 442 | sch->config.ena = 1; |
420 | sch->schib.pmcw.ena = 1; | 443 | sch->config.isc = sch->isc; |
421 | sch->schib.pmcw.isc = sch->isc; | 444 | sch->config.intparm = intparm; |
422 | sch->schib.pmcw.intparm = intparm; | 445 | |
423 | ret = cio_modify(sch); | 446 | for (retry = 0; retry < 3; retry++) { |
424 | if (ret == -ENODEV) | 447 | ret = cio_commit_config(sch); |
425 | break; | 448 | if (ret == -EIO) { |
426 | if (ret == -EIO) | ||
427 | /* | 449 | /* |
428 | * Got a program check in cio_modify. Try without | 450 | * Got a program check in msch. Try without |
429 | * the concurrent sense bit the next time. | 451 | * the concurrent sense bit the next time. |
430 | */ | 452 | */ |
431 | sch->schib.pmcw.csense = 0; | 453 | sch->config.csense = 0; |
432 | if (ret == 0) { | 454 | } else if (ret == -EBUSY) { |
433 | stsch (sch->schid, &sch->schib); | ||
434 | if (sch->schib.pmcw.ena) | ||
435 | break; | ||
436 | } | ||
437 | if (ret == -EBUSY) { | ||
438 | struct irb irb; | 455 | struct irb irb; |
439 | if (tsch(sch->schid, &irb) != 0) | 456 | if (tsch(sch->schid, &irb) != 0) |
440 | break; | 457 | break; |
441 | } | 458 | } else |
459 | break; | ||
442 | } | 460 | } |
443 | sprintf (dbf_txt, "ret:%d", ret); | 461 | sprintf (dbf_txt, "ret:%d", ret); |
444 | CIO_TRACE_EVENT (2, dbf_txt); | 462 | CIO_TRACE_EVENT (2, dbf_txt); |
@@ -453,8 +471,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); | |||
453 | int cio_disable_subchannel(struct subchannel *sch) | 471 | int cio_disable_subchannel(struct subchannel *sch) |
454 | { | 472 | { |
455 | char dbf_txt[15]; | 473 | char dbf_txt[15]; |
456 | int ccode; | ||
457 | int retry; | ||
458 | int ret; | 474 | int ret; |
459 | 475 | ||
460 | CIO_TRACE_EVENT (2, "dissch"); | 476 | CIO_TRACE_EVENT (2, "dissch"); |
@@ -462,8 +478,7 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
462 | 478 | ||
463 | if (sch_is_pseudo_sch(sch)) | 479 | if (sch_is_pseudo_sch(sch)) |
464 | return 0; | 480 | return 0; |
465 | ccode = stsch (sch->schid, &sch->schib); | 481 | if (cio_update_schib(sch)) |
466 | if (ccode == 3) /* Not operational. */ | ||
467 | return -ENODEV; | 482 | return -ENODEV; |
468 | 483 | ||
469 | if (scsw_actl(&sch->schib.scsw) != 0) | 484 | if (scsw_actl(&sch->schib.scsw) != 0) |
@@ -473,24 +488,9 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
473 | */ | 488 | */ |
474 | return -EBUSY; | 489 | return -EBUSY; |
475 | 490 | ||
476 | for (retry = 5, ret = 0; retry > 0; retry--) { | 491 | sch->config.ena = 0; |
477 | sch->schib.pmcw.ena = 0; | 492 | ret = cio_commit_config(sch); |
478 | ret = cio_modify(sch); | 493 | |
479 | if (ret == -ENODEV) | ||
480 | break; | ||
481 | if (ret == -EBUSY) | ||
482 | /* | ||
483 | * The subchannel is busy or status pending. | ||
484 | * We'll disable when the next interrupt was delivered | ||
485 | * via the state machine. | ||
486 | */ | ||
487 | break; | ||
488 | if (ret == 0) { | ||
489 | stsch (sch->schid, &sch->schib); | ||
490 | if (!sch->schib.pmcw.ena) | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | sprintf (dbf_txt, "ret:%d", ret); | 494 | sprintf (dbf_txt, "ret:%d", ret); |
495 | CIO_TRACE_EVENT (2, dbf_txt); | 495 | CIO_TRACE_EVENT (2, dbf_txt); |
496 | return ret; | 496 | return ret; |
@@ -687,6 +687,43 @@ static char console_sch_name[10] = "0.x.xxxx"; | |||
687 | static struct io_subchannel_private console_priv; | 687 | static struct io_subchannel_private console_priv; |
688 | static int console_subchannel_in_use; | 688 | static int console_subchannel_in_use; |
689 | 689 | ||
690 | /* | ||
691 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
692 | * return a pointer to the subchannel structure. | ||
693 | */ | ||
694 | static int cio_tpi(void) | ||
695 | { | ||
696 | struct tpi_info *tpi_info; | ||
697 | struct subchannel *sch; | ||
698 | struct irb *irb; | ||
699 | int irq_context; | ||
700 | |||
701 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
702 | if (tpi(NULL) != 1) | ||
703 | return 0; | ||
704 | irb = (struct irb *) __LC_IRB; | ||
705 | /* Store interrupt response block to lowcore. */ | ||
706 | if (tsch(tpi_info->schid, irb) != 0) | ||
707 | /* Not status pending or not operational. */ | ||
708 | return 1; | ||
709 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
710 | if (!sch) | ||
711 | return 1; | ||
712 | irq_context = in_interrupt(); | ||
713 | if (!irq_context) | ||
714 | local_bh_disable(); | ||
715 | irq_enter(); | ||
716 | spin_lock(sch->lock); | ||
717 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | ||
718 | if (sch->driver && sch->driver->irq) | ||
719 | sch->driver->irq(sch); | ||
720 | spin_unlock(sch->lock); | ||
721 | irq_exit(); | ||
722 | if (!irq_context) | ||
723 | _local_bh_enable(); | ||
724 | return 1; | ||
725 | } | ||
726 | |||
690 | void *cio_get_console_priv(void) | 727 | void *cio_get_console_priv(void) |
691 | { | 728 | { |
692 | return &console_priv; | 729 | return &console_priv; |
@@ -780,7 +817,7 @@ cio_probe_console(void) | |||
780 | sch_no = cio_get_console_sch_no(); | 817 | sch_no = cio_get_console_sch_no(); |
781 | if (sch_no == -1) { | 818 | if (sch_no == -1) { |
782 | console_subchannel_in_use = 0; | 819 | console_subchannel_in_use = 0; |
783 | printk(KERN_WARNING "cio: No ccw console found!\n"); | 820 | pr_warning("No CCW console was found\n"); |
784 | return ERR_PTR(-ENODEV); | 821 | return ERR_PTR(-ENODEV); |
785 | } | 822 | } |
786 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | 823 | memset(&console_subchannel, 0, sizeof(struct subchannel)); |
@@ -796,10 +833,9 @@ cio_probe_console(void) | |||
796 | * enable console I/O-interrupt subclass | 833 | * enable console I/O-interrupt subclass |
797 | */ | 834 | */ |
798 | isc_register(CONSOLE_ISC); | 835 | isc_register(CONSOLE_ISC); |
799 | console_subchannel.schib.pmcw.isc = CONSOLE_ISC; | 836 | console_subchannel.config.isc = CONSOLE_ISC; |
800 | console_subchannel.schib.pmcw.intparm = | 837 | console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; |
801 | (u32)(addr_t)&console_subchannel; | 838 | ret = cio_commit_config(&console_subchannel); |
802 | ret = cio_modify(&console_subchannel); | ||
803 | if (ret) { | 839 | if (ret) { |
804 | isc_unregister(CONSOLE_ISC); | 840 | isc_unregister(CONSOLE_ISC); |
805 | console_subchannel_in_use = 0; | 841 | console_subchannel_in_use = 0; |
@@ -811,8 +847,8 @@ cio_probe_console(void) | |||
811 | void | 847 | void |
812 | cio_release_console(void) | 848 | cio_release_console(void) |
813 | { | 849 | { |
814 | console_subchannel.schib.pmcw.intparm = 0; | 850 | console_subchannel.config.intparm = 0; |
815 | cio_modify(&console_subchannel); | 851 | cio_commit_config(&console_subchannel); |
816 | isc_unregister(CONSOLE_ISC); | 852 | isc_unregister(CONSOLE_ISC); |
817 | console_subchannel_in_use = 0; | 853 | console_subchannel_in_use = 0; |
818 | } | 854 | } |
@@ -852,7 +888,8 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
852 | cc = msch(schid, schib); | 888 | cc = msch(schid, schib); |
853 | if (cc) | 889 | if (cc) |
854 | return (cc==3?-ENODEV:-EBUSY); | 890 | return (cc==3?-ENODEV:-EBUSY); |
855 | stsch(schid, schib); | 891 | if (stsch(schid, schib) || !css_sch_is_valid(schib)) |
892 | return -ENODEV; | ||
856 | if (!schib->pmcw.ena) | 893 | if (!schib->pmcw.ena) |
857 | return 0; | 894 | return 0; |
858 | } | 895 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 0fb24784e925..5150fba742ac 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -45,6 +45,19 @@ struct pmcw { | |||
45 | /* ... in an operand exception. */ | 45 | /* ... in an operand exception. */ |
46 | } __attribute__ ((packed)); | 46 | } __attribute__ ((packed)); |
47 | 47 | ||
48 | /* Target SCHIB configuration. */ | ||
49 | struct schib_config { | ||
50 | u64 mba; | ||
51 | u32 intparm; | ||
52 | u16 mbi; | ||
53 | u32 isc:3; | ||
54 | u32 ena:1; | ||
55 | u32 mme:2; | ||
56 | u32 mp:1; | ||
57 | u32 csense:1; | ||
58 | u32 mbfc:1; | ||
59 | } __attribute__ ((packed)); | ||
60 | |||
48 | /* | 61 | /* |
49 | * subchannel information block | 62 | * subchannel information block |
50 | */ | 63 | */ |
@@ -82,6 +95,8 @@ struct subchannel { | |||
82 | struct device dev; /* entry in device tree */ | 95 | struct device dev; /* entry in device tree */ |
83 | struct css_driver *driver; | 96 | struct css_driver *driver; |
84 | void *private; /* private per subchannel type data */ | 97 | void *private; /* private per subchannel type data */ |
98 | struct work_struct work; | ||
99 | struct schib_config config; | ||
85 | } __attribute__ ((aligned(8))); | 100 | } __attribute__ ((aligned(8))); |
86 | 101 | ||
87 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ | 102 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ |
@@ -100,7 +115,8 @@ extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); | |||
100 | extern int cio_cancel (struct subchannel *); | 115 | extern int cio_cancel (struct subchannel *); |
101 | extern int cio_set_options (struct subchannel *, int); | 116 | extern int cio_set_options (struct subchannel *, int); |
102 | extern int cio_get_options (struct subchannel *); | 117 | extern int cio_get_options (struct subchannel *); |
103 | extern int cio_modify (struct subchannel *); | 118 | extern int cio_update_schib(struct subchannel *sch); |
119 | extern int cio_commit_config(struct subchannel *sch); | ||
104 | 120 | ||
105 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); | 121 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); |
106 | int cio_tm_intrg(struct subchannel *sch); | 122 | int cio_tm_intrg(struct subchannel *sch); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index a90b28c0be57..dc98b2c63862 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -25,6 +25,9 @@ | |||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "cio" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/bootmem.h> | 31 | #include <linux/bootmem.h> |
29 | #include <linux/device.h> | 32 | #include <linux/device.h> |
30 | #include <linux/init.h> | 33 | #include <linux/init.h> |
@@ -185,56 +188,19 @@ static inline void cmf_activate(void *area, unsigned int onoff) | |||
185 | static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, | 188 | static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, |
186 | unsigned long address) | 189 | unsigned long address) |
187 | { | 190 | { |
188 | int ret; | ||
189 | int retry; | ||
190 | struct subchannel *sch; | 191 | struct subchannel *sch; |
191 | struct schib *schib; | ||
192 | 192 | ||
193 | sch = to_subchannel(cdev->dev.parent); | 193 | sch = to_subchannel(cdev->dev.parent); |
194 | schib = &sch->schib; | ||
195 | /* msch can silently fail, so do it again if necessary */ | ||
196 | for (retry = 0; retry < 3; retry++) { | ||
197 | /* prepare schib */ | ||
198 | stsch(sch->schid, schib); | ||
199 | schib->pmcw.mme = mme; | ||
200 | schib->pmcw.mbfc = mbfc; | ||
201 | /* address can be either a block address or a block index */ | ||
202 | if (mbfc) | ||
203 | schib->mba = address; | ||
204 | else | ||
205 | schib->pmcw.mbi = address; | ||
206 | |||
207 | /* try to submit it */ | ||
208 | switch(ret = msch_err(sch->schid, schib)) { | ||
209 | case 0: | ||
210 | break; | ||
211 | case 1: | ||
212 | case 2: /* in I/O or status pending */ | ||
213 | ret = -EBUSY; | ||
214 | break; | ||
215 | case 3: /* subchannel is no longer valid */ | ||
216 | ret = -ENODEV; | ||
217 | break; | ||
218 | default: /* msch caught an exception */ | ||
219 | ret = -EINVAL; | ||
220 | break; | ||
221 | } | ||
222 | stsch(sch->schid, schib); /* restore the schib */ | ||
223 | |||
224 | if (ret) | ||
225 | break; | ||
226 | 194 | ||
227 | /* check if it worked */ | 195 | sch->config.mme = mme; |
228 | if (schib->pmcw.mme == mme && | 196 | sch->config.mbfc = mbfc; |
229 | schib->pmcw.mbfc == mbfc && | 197 | /* address can be either a block address or a block index */ |
230 | (mbfc ? (schib->mba == address) | 198 | if (mbfc) |
231 | : (schib->pmcw.mbi == address))) | 199 | sch->config.mba = address; |
232 | return 0; | 200 | else |
201 | sch->config.mbi = address; | ||
233 | 202 | ||
234 | ret = -EINVAL; | 203 | return cio_commit_config(sch); |
235 | } | ||
236 | |||
237 | return ret; | ||
238 | } | 204 | } |
239 | 205 | ||
240 | struct set_schib_struct { | 206 | struct set_schib_struct { |
@@ -338,7 +304,7 @@ static int cmf_copy_block(struct ccw_device *cdev) | |||
338 | 304 | ||
339 | sch = to_subchannel(cdev->dev.parent); | 305 | sch = to_subchannel(cdev->dev.parent); |
340 | 306 | ||
341 | if (stsch(sch->schid, &sch->schib)) | 307 | if (cio_update_schib(sch)) |
342 | return -ENODEV; | 308 | return -ENODEV; |
343 | 309 | ||
344 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { | 310 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { |
@@ -1359,9 +1325,8 @@ static int __init init_cmf(void) | |||
1359 | default: | 1325 | default: |
1360 | return 1; | 1326 | return 1; |
1361 | } | 1327 | } |
1362 | 1328 | pr_info("Channel measurement facility initialized using format " | |
1363 | printk(KERN_INFO "cio: Channel measurement facility using %s " | 1329 | "%s (mode %s)\n", format_string, detect_string); |
1364 | "format (%s)\n", format_string, detect_string); | ||
1365 | return 0; | 1330 | return 0; |
1366 | } | 1331 | } |
1367 | 1332 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 76bbb1e74c29..8019288bc6de 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -6,6 +6,10 @@ | |||
6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | */ | 8 | */ |
9 | |||
10 | #define KMSG_COMPONENT "cio" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
9 | #include <linux/module.h> | 13 | #include <linux/module.h> |
10 | #include <linux/init.h> | 14 | #include <linux/init.h> |
11 | #include <linux/device.h> | 15 | #include <linux/device.h> |
@@ -128,8 +132,8 @@ css_free_subchannel(struct subchannel *sch) | |||
128 | { | 132 | { |
129 | if (sch) { | 133 | if (sch) { |
130 | /* Reset intparm to zeroes. */ | 134 | /* Reset intparm to zeroes. */ |
131 | sch->schib.pmcw.intparm = 0; | 135 | sch->config.intparm = 0; |
132 | cio_modify(sch); | 136 | cio_commit_config(sch); |
133 | kfree(sch->lock); | 137 | kfree(sch->lock); |
134 | kfree(sch); | 138 | kfree(sch); |
135 | } | 139 | } |
@@ -844,8 +848,8 @@ out: | |||
844 | s390_unregister_crw_handler(CRW_RSC_CSS); | 848 | s390_unregister_crw_handler(CRW_RSC_CSS); |
845 | chsc_free_sei_area(); | 849 | chsc_free_sei_area(); |
846 | kfree(slow_subchannel_set); | 850 | kfree(slow_subchannel_set); |
847 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", | 851 | pr_alert("The CSS device driver initialization failed with " |
848 | ret); | 852 | "errno=%d\n", ret); |
849 | return ret; | 853 | return ret; |
850 | } | 854 | } |
851 | 855 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4e4008325e28..23d5752349b5 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -376,19 +376,23 @@ int ccw_device_set_offline(struct ccw_device *cdev) | |||
376 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | 376 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
377 | } | 377 | } |
378 | spin_unlock_irq(cdev->ccwlock); | 378 | spin_unlock_irq(cdev->ccwlock); |
379 | /* Give up reference from ccw_device_set_online(). */ | ||
380 | put_device(&cdev->dev); | ||
379 | return ret; | 381 | return ret; |
380 | } | 382 | } |
381 | spin_unlock_irq(cdev->ccwlock); | 383 | spin_unlock_irq(cdev->ccwlock); |
382 | if (ret == 0) | 384 | if (ret == 0) { |
383 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | 385 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); |
384 | else { | 386 | /* Give up reference from ccw_device_set_online(). */ |
387 | put_device(&cdev->dev); | ||
388 | } else { | ||
385 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " | 389 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " |
386 | "device 0.%x.%04x\n", | 390 | "device 0.%x.%04x\n", |
387 | ret, cdev->private->dev_id.ssid, | 391 | ret, cdev->private->dev_id.ssid, |
388 | cdev->private->dev_id.devno); | 392 | cdev->private->dev_id.devno); |
389 | cdev->online = 1; | 393 | cdev->online = 1; |
390 | } | 394 | } |
391 | return ret; | 395 | return ret; |
392 | } | 396 | } |
393 | 397 | ||
394 | /** | 398 | /** |
@@ -411,6 +415,9 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
411 | return -ENODEV; | 415 | return -ENODEV; |
412 | if (cdev->online || !cdev->drv) | 416 | if (cdev->online || !cdev->drv) |
413 | return -EINVAL; | 417 | return -EINVAL; |
418 | /* Hold on to an extra reference while device is online. */ | ||
419 | if (!get_device(&cdev->dev)) | ||
420 | return -ENODEV; | ||
414 | 421 | ||
415 | spin_lock_irq(cdev->ccwlock); | 422 | spin_lock_irq(cdev->ccwlock); |
416 | ret = ccw_device_online(cdev); | 423 | ret = ccw_device_online(cdev); |
@@ -422,10 +429,15 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
422 | "device 0.%x.%04x\n", | 429 | "device 0.%x.%04x\n", |
423 | ret, cdev->private->dev_id.ssid, | 430 | ret, cdev->private->dev_id.ssid, |
424 | cdev->private->dev_id.devno); | 431 | cdev->private->dev_id.devno); |
432 | /* Give up online reference since onlining failed. */ | ||
433 | put_device(&cdev->dev); | ||
425 | return ret; | 434 | return ret; |
426 | } | 435 | } |
427 | if (cdev->private->state != DEV_STATE_ONLINE) | 436 | if (cdev->private->state != DEV_STATE_ONLINE) { |
437 | /* Give up online reference since onlining failed. */ | ||
438 | put_device(&cdev->dev); | ||
428 | return -ENODEV; | 439 | return -ENODEV; |
440 | } | ||
429 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { | 441 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { |
430 | cdev->online = 1; | 442 | cdev->online = 1; |
431 | return 0; | 443 | return 0; |
@@ -440,6 +452,8 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
440 | "device 0.%x.%04x\n", | 452 | "device 0.%x.%04x\n", |
441 | ret, cdev->private->dev_id.ssid, | 453 | ret, cdev->private->dev_id.ssid, |
442 | cdev->private->dev_id.devno); | 454 | cdev->private->dev_id.devno); |
455 | /* Give up online reference since onlining failed. */ | ||
456 | put_device(&cdev->dev); | ||
443 | return (ret == 0) ? -ENODEV : ret; | 457 | return (ret == 0) ? -ENODEV : ret; |
444 | } | 458 | } |
445 | 459 | ||
@@ -704,6 +718,8 @@ ccw_device_release(struct device *dev) | |||
704 | struct ccw_device *cdev; | 718 | struct ccw_device *cdev; |
705 | 719 | ||
706 | cdev = to_ccwdev(dev); | 720 | cdev = to_ccwdev(dev); |
721 | /* Release reference of parent subchannel. */ | ||
722 | put_device(cdev->dev.parent); | ||
707 | kfree(cdev->private); | 723 | kfree(cdev->private); |
708 | kfree(cdev); | 724 | kfree(cdev); |
709 | } | 725 | } |
@@ -735,8 +751,8 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
735 | /* Do first half of device_register. */ | 751 | /* Do first half of device_register. */ |
736 | device_initialize(&cdev->dev); | 752 | device_initialize(&cdev->dev); |
737 | if (!get_device(&sch->dev)) { | 753 | if (!get_device(&sch->dev)) { |
738 | if (cdev->dev.release) | 754 | /* Release reference from device_initialize(). */ |
739 | cdev->dev.release(&cdev->dev); | 755 | put_device(&cdev->dev); |
740 | return -ENODEV; | 756 | return -ENODEV; |
741 | } | 757 | } |
742 | return 0; | 758 | return 0; |
@@ -778,37 +794,55 @@ static void sch_attach_disconnected_device(struct subchannel *sch, | |||
778 | struct subchannel *other_sch; | 794 | struct subchannel *other_sch; |
779 | int ret; | 795 | int ret; |
780 | 796 | ||
781 | other_sch = to_subchannel(get_device(cdev->dev.parent)); | 797 | /* Get reference for new parent. */ |
798 | if (!get_device(&sch->dev)) | ||
799 | return; | ||
800 | other_sch = to_subchannel(cdev->dev.parent); | ||
801 | /* Note: device_move() changes cdev->dev.parent */ | ||
782 | ret = device_move(&cdev->dev, &sch->dev); | 802 | ret = device_move(&cdev->dev, &sch->dev); |
783 | if (ret) { | 803 | if (ret) { |
784 | CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " | 804 | CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " |
785 | "(ret=%d)!\n", cdev->private->dev_id.ssid, | 805 | "(ret=%d)!\n", cdev->private->dev_id.ssid, |
786 | cdev->private->dev_id.devno, ret); | 806 | cdev->private->dev_id.devno, ret); |
787 | put_device(&other_sch->dev); | 807 | /* Put reference for new parent. */ |
808 | put_device(&sch->dev); | ||
788 | return; | 809 | return; |
789 | } | 810 | } |
790 | sch_set_cdev(other_sch, NULL); | 811 | sch_set_cdev(other_sch, NULL); |
791 | /* No need to keep a subchannel without ccw device around. */ | 812 | /* No need to keep a subchannel without ccw device around. */ |
792 | css_sch_device_unregister(other_sch); | 813 | css_sch_device_unregister(other_sch); |
793 | put_device(&other_sch->dev); | ||
794 | sch_attach_device(sch, cdev); | 814 | sch_attach_device(sch, cdev); |
815 | /* Put reference for old parent. */ | ||
816 | put_device(&other_sch->dev); | ||
795 | } | 817 | } |
796 | 818 | ||
797 | static void sch_attach_orphaned_device(struct subchannel *sch, | 819 | static void sch_attach_orphaned_device(struct subchannel *sch, |
798 | struct ccw_device *cdev) | 820 | struct ccw_device *cdev) |
799 | { | 821 | { |
800 | int ret; | 822 | int ret; |
823 | struct subchannel *pseudo_sch; | ||
801 | 824 | ||
802 | /* Try to move the ccw device to its new subchannel. */ | 825 | /* Get reference for new parent. */ |
826 | if (!get_device(&sch->dev)) | ||
827 | return; | ||
828 | pseudo_sch = to_subchannel(cdev->dev.parent); | ||
829 | /* | ||
830 | * Try to move the ccw device to its new subchannel. | ||
831 | * Note: device_move() changes cdev->dev.parent | ||
832 | */ | ||
803 | ret = device_move(&cdev->dev, &sch->dev); | 833 | ret = device_move(&cdev->dev, &sch->dev); |
804 | if (ret) { | 834 | if (ret) { |
805 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " | 835 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " |
806 | "failed (ret=%d)!\n", | 836 | "failed (ret=%d)!\n", |
807 | cdev->private->dev_id.ssid, | 837 | cdev->private->dev_id.ssid, |
808 | cdev->private->dev_id.devno, ret); | 838 | cdev->private->dev_id.devno, ret); |
839 | /* Put reference for new parent. */ | ||
840 | put_device(&sch->dev); | ||
809 | return; | 841 | return; |
810 | } | 842 | } |
811 | sch_attach_device(sch, cdev); | 843 | sch_attach_device(sch, cdev); |
844 | /* Put reference on pseudo subchannel. */ | ||
845 | put_device(&pseudo_sch->dev); | ||
812 | } | 846 | } |
813 | 847 | ||
814 | static void sch_create_and_recog_new_device(struct subchannel *sch) | 848 | static void sch_create_and_recog_new_device(struct subchannel *sch) |
@@ -830,9 +864,11 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) | |||
830 | spin_lock_irq(sch->lock); | 864 | spin_lock_irq(sch->lock); |
831 | sch_set_cdev(sch, NULL); | 865 | sch_set_cdev(sch, NULL); |
832 | spin_unlock_irq(sch->lock); | 866 | spin_unlock_irq(sch->lock); |
833 | if (cdev->dev.release) | ||
834 | cdev->dev.release(&cdev->dev); | ||
835 | css_sch_device_unregister(sch); | 867 | css_sch_device_unregister(sch); |
868 | /* Put reference from io_subchannel_create_ccwdev(). */ | ||
869 | put_device(&sch->dev); | ||
870 | /* Give up initial reference. */ | ||
871 | put_device(&cdev->dev); | ||
836 | } | 872 | } |
837 | } | 873 | } |
838 | 874 | ||
@@ -854,15 +890,20 @@ void ccw_device_move_to_orphanage(struct work_struct *work) | |||
854 | dev_id.devno = sch->schib.pmcw.dev; | 890 | dev_id.devno = sch->schib.pmcw.dev; |
855 | dev_id.ssid = sch->schid.ssid; | 891 | dev_id.ssid = sch->schid.ssid; |
856 | 892 | ||
893 | /* Increase refcount for pseudo subchannel. */ | ||
894 | get_device(&css->pseudo_subchannel->dev); | ||
857 | /* | 895 | /* |
858 | * Move the orphaned ccw device to the orphanage so the replacing | 896 | * Move the orphaned ccw device to the orphanage so the replacing |
859 | * ccw device can take its place on the subchannel. | 897 | * ccw device can take its place on the subchannel. |
898 | * Note: device_move() changes cdev->dev.parent | ||
860 | */ | 899 | */ |
861 | ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); | 900 | ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); |
862 | if (ret) { | 901 | if (ret) { |
863 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " | 902 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " |
864 | "(ret=%d)!\n", cdev->private->dev_id.ssid, | 903 | "(ret=%d)!\n", cdev->private->dev_id.ssid, |
865 | cdev->private->dev_id.devno, ret); | 904 | cdev->private->dev_id.devno, ret); |
905 | /* Decrease refcount for pseudo subchannel again. */ | ||
906 | put_device(&css->pseudo_subchannel->dev); | ||
866 | return; | 907 | return; |
867 | } | 908 | } |
868 | cdev->ccwlock = css->pseudo_subchannel->lock; | 909 | cdev->ccwlock = css->pseudo_subchannel->lock; |
@@ -875,17 +916,23 @@ void ccw_device_move_to_orphanage(struct work_struct *work) | |||
875 | if (replacing_cdev) { | 916 | if (replacing_cdev) { |
876 | sch_attach_disconnected_device(sch, replacing_cdev); | 917 | sch_attach_disconnected_device(sch, replacing_cdev); |
877 | /* Release reference from get_disc_ccwdev_by_dev_id() */ | 918 | /* Release reference from get_disc_ccwdev_by_dev_id() */ |
878 | put_device(&cdev->dev); | 919 | put_device(&replacing_cdev->dev); |
920 | /* Release reference of subchannel from old cdev. */ | ||
921 | put_device(&sch->dev); | ||
879 | return; | 922 | return; |
880 | } | 923 | } |
881 | replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); | 924 | replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); |
882 | if (replacing_cdev) { | 925 | if (replacing_cdev) { |
883 | sch_attach_orphaned_device(sch, replacing_cdev); | 926 | sch_attach_orphaned_device(sch, replacing_cdev); |
884 | /* Release reference from get_orphaned_ccwdev_by_dev_id() */ | 927 | /* Release reference from get_orphaned_ccwdev_by_dev_id() */ |
885 | put_device(&cdev->dev); | 928 | put_device(&replacing_cdev->dev); |
929 | /* Release reference of subchannel from old cdev. */ | ||
930 | put_device(&sch->dev); | ||
886 | return; | 931 | return; |
887 | } | 932 | } |
888 | sch_create_and_recog_new_device(sch); | 933 | sch_create_and_recog_new_device(sch); |
934 | /* Release reference of subchannel from old cdev. */ | ||
935 | put_device(&sch->dev); | ||
889 | } | 936 | } |
890 | 937 | ||
891 | /* | 938 | /* |
@@ -903,6 +950,14 @@ io_subchannel_register(struct work_struct *work) | |||
903 | priv = container_of(work, struct ccw_device_private, kick_work); | 950 | priv = container_of(work, struct ccw_device_private, kick_work); |
904 | cdev = priv->cdev; | 951 | cdev = priv->cdev; |
905 | sch = to_subchannel(cdev->dev.parent); | 952 | sch = to_subchannel(cdev->dev.parent); |
953 | /* | ||
954 | * Check if subchannel is still registered. It may have become | ||
955 | * unregistered if a machine check hit us after finishing | ||
956 | * device recognition but before the register work could be | ||
957 | * queued. | ||
958 | */ | ||
959 | if (!device_is_registered(&sch->dev)) | ||
960 | goto out_err; | ||
906 | css_update_ssd_info(sch); | 961 | css_update_ssd_info(sch); |
907 | /* | 962 | /* |
908 | * io_subchannel_register() will also be called after device | 963 | * io_subchannel_register() will also be called after device |
@@ -910,7 +965,7 @@ io_subchannel_register(struct work_struct *work) | |||
910 | * be registered). We need to reprobe since we may now have sense id | 965 | * be registered). We need to reprobe since we may now have sense id |
911 | * information. | 966 | * information. |
912 | */ | 967 | */ |
913 | if (klist_node_attached(&cdev->dev.knode_parent)) { | 968 | if (device_is_registered(&cdev->dev)) { |
914 | if (!cdev->drv) { | 969 | if (!cdev->drv) { |
915 | ret = device_reprobe(&cdev->dev); | 970 | ret = device_reprobe(&cdev->dev); |
916 | if (ret) | 971 | if (ret) |
@@ -934,22 +989,19 @@ io_subchannel_register(struct work_struct *work) | |||
934 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", | 989 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", |
935 | cdev->private->dev_id.ssid, | 990 | cdev->private->dev_id.ssid, |
936 | cdev->private->dev_id.devno, ret); | 991 | cdev->private->dev_id.devno, ret); |
937 | put_device(&cdev->dev); | ||
938 | spin_lock_irqsave(sch->lock, flags); | 992 | spin_lock_irqsave(sch->lock, flags); |
939 | sch_set_cdev(sch, NULL); | 993 | sch_set_cdev(sch, NULL); |
940 | spin_unlock_irqrestore(sch->lock, flags); | 994 | spin_unlock_irqrestore(sch->lock, flags); |
941 | kfree (cdev->private); | 995 | /* Release initial device reference. */ |
942 | kfree (cdev); | 996 | put_device(&cdev->dev); |
943 | put_device(&sch->dev); | 997 | goto out_err; |
944 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
945 | wake_up(&ccw_device_init_wq); | ||
946 | return; | ||
947 | } | 998 | } |
948 | put_device(&cdev->dev); | ||
949 | out: | 999 | out: |
950 | cdev->private->flags.recog_done = 1; | 1000 | cdev->private->flags.recog_done = 1; |
951 | put_device(&sch->dev); | ||
952 | wake_up(&cdev->private->wait_q); | 1001 | wake_up(&cdev->private->wait_q); |
1002 | out_err: | ||
1003 | /* Release reference for workqueue processing. */ | ||
1004 | put_device(&cdev->dev); | ||
953 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1005 | if (atomic_dec_and_test(&ccw_device_init_count)) |
954 | wake_up(&ccw_device_init_wq); | 1006 | wake_up(&ccw_device_init_wq); |
955 | } | 1007 | } |
@@ -968,8 +1020,8 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) | |||
968 | sch = to_subchannel(cdev->dev.parent); | 1020 | sch = to_subchannel(cdev->dev.parent); |
969 | css_sch_device_unregister(sch); | 1021 | css_sch_device_unregister(sch); |
970 | /* Reset intparm to zeroes. */ | 1022 | /* Reset intparm to zeroes. */ |
971 | sch->schib.pmcw.intparm = 0; | 1023 | sch->config.intparm = 0; |
972 | cio_modify(sch); | 1024 | cio_commit_config(sch); |
973 | /* Release cdev reference for workqueue processing.*/ | 1025 | /* Release cdev reference for workqueue processing.*/ |
974 | put_device(&cdev->dev); | 1026 | put_device(&cdev->dev); |
975 | /* Release subchannel reference for local processing. */ | 1027 | /* Release subchannel reference for local processing. */ |
@@ -998,8 +1050,6 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
998 | PREPARE_WORK(&cdev->private->kick_work, | 1050 | PREPARE_WORK(&cdev->private->kick_work, |
999 | ccw_device_call_sch_unregister); | 1051 | ccw_device_call_sch_unregister); |
1000 | queue_work(slow_path_wq, &cdev->private->kick_work); | 1052 | queue_work(slow_path_wq, &cdev->private->kick_work); |
1001 | /* Release subchannel reference for asynchronous recognition. */ | ||
1002 | put_device(&sch->dev); | ||
1003 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1053 | if (atomic_dec_and_test(&ccw_device_init_count)) |
1004 | wake_up(&ccw_device_init_wq); | 1054 | wake_up(&ccw_device_init_wq); |
1005 | break; | 1055 | break; |
@@ -1070,10 +1120,15 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1070 | priv = container_of(work, struct ccw_device_private, kick_work); | 1120 | priv = container_of(work, struct ccw_device_private, kick_work); |
1071 | sch = priv->sch; | 1121 | sch = priv->sch; |
1072 | cdev = priv->cdev; | 1122 | cdev = priv->cdev; |
1073 | former_parent = ccw_device_is_orphan(cdev) ? | 1123 | former_parent = to_subchannel(cdev->dev.parent); |
1074 | NULL : to_subchannel(get_device(cdev->dev.parent)); | 1124 | /* Get reference for new parent. */ |
1125 | if (!get_device(&sch->dev)) | ||
1126 | return; | ||
1075 | mutex_lock(&sch->reg_mutex); | 1127 | mutex_lock(&sch->reg_mutex); |
1076 | /* Try to move the ccw device to its new subchannel. */ | 1128 | /* |
1129 | * Try to move the ccw device to its new subchannel. | ||
1130 | * Note: device_move() changes cdev->dev.parent | ||
1131 | */ | ||
1077 | rc = device_move(&cdev->dev, &sch->dev); | 1132 | rc = device_move(&cdev->dev, &sch->dev); |
1078 | mutex_unlock(&sch->reg_mutex); | 1133 | mutex_unlock(&sch->reg_mutex); |
1079 | if (rc) { | 1134 | if (rc) { |
@@ -1083,21 +1138,23 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1083 | cdev->private->dev_id.devno, sch->schid.ssid, | 1138 | cdev->private->dev_id.devno, sch->schid.ssid, |
1084 | sch->schid.sch_no, rc); | 1139 | sch->schid.sch_no, rc); |
1085 | css_sch_device_unregister(sch); | 1140 | css_sch_device_unregister(sch); |
1141 | /* Put reference for new parent again. */ | ||
1142 | put_device(&sch->dev); | ||
1086 | goto out; | 1143 | goto out; |
1087 | } | 1144 | } |
1088 | if (former_parent) { | 1145 | if (!sch_is_pseudo_sch(former_parent)) { |
1089 | spin_lock_irq(former_parent->lock); | 1146 | spin_lock_irq(former_parent->lock); |
1090 | sch_set_cdev(former_parent, NULL); | 1147 | sch_set_cdev(former_parent, NULL); |
1091 | spin_unlock_irq(former_parent->lock); | 1148 | spin_unlock_irq(former_parent->lock); |
1092 | css_sch_device_unregister(former_parent); | 1149 | css_sch_device_unregister(former_parent); |
1093 | /* Reset intparm to zeroes. */ | 1150 | /* Reset intparm to zeroes. */ |
1094 | former_parent->schib.pmcw.intparm = 0; | 1151 | former_parent->config.intparm = 0; |
1095 | cio_modify(former_parent); | 1152 | cio_commit_config(former_parent); |
1096 | } | 1153 | } |
1097 | sch_attach_device(sch, cdev); | 1154 | sch_attach_device(sch, cdev); |
1098 | out: | 1155 | out: |
1099 | if (former_parent) | 1156 | /* Put reference for old parent. */ |
1100 | put_device(&former_parent->dev); | 1157 | put_device(&former_parent->dev); |
1101 | put_device(&cdev->dev); | 1158 | put_device(&cdev->dev); |
1102 | } | 1159 | } |
1103 | 1160 | ||
@@ -1113,6 +1170,15 @@ static void io_subchannel_irq(struct subchannel *sch) | |||
1113 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | 1170 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
1114 | } | 1171 | } |
1115 | 1172 | ||
1173 | void io_subchannel_init_config(struct subchannel *sch) | ||
1174 | { | ||
1175 | memset(&sch->config, 0, sizeof(sch->config)); | ||
1176 | sch->config.csense = 1; | ||
1177 | /* Use subchannel mp mode when there is more than 1 installed CHPID. */ | ||
1178 | if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0) | ||
1179 | sch->config.mp = 1; | ||
1180 | } | ||
1181 | |||
1116 | static void io_subchannel_init_fields(struct subchannel *sch) | 1182 | static void io_subchannel_init_fields(struct subchannel *sch) |
1117 | { | 1183 | { |
1118 | if (cio_is_console(sch->schid)) | 1184 | if (cio_is_console(sch->schid)) |
@@ -1127,18 +1193,34 @@ static void io_subchannel_init_fields(struct subchannel *sch) | |||
1127 | sch->schib.pmcw.dev, sch->schid.ssid, | 1193 | sch->schib.pmcw.dev, sch->schid.ssid, |
1128 | sch->schid.sch_no, sch->schib.pmcw.pim, | 1194 | sch->schid.sch_no, sch->schib.pmcw.pim, |
1129 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | 1195 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); |
1130 | /* Initially set up some fields in the pmcw. */ | 1196 | |
1131 | sch->schib.pmcw.ena = 0; | 1197 | io_subchannel_init_config(sch); |
1132 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
1133 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
1134 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
1135 | /* clean up possible residual cmf stuff */ | ||
1136 | sch->schib.pmcw.mme = 0; | ||
1137 | sch->schib.pmcw.mbfc = 0; | ||
1138 | sch->schib.pmcw.mbi = 0; | ||
1139 | sch->schib.mba = 0; | ||
1140 | } | 1198 | } |
1141 | 1199 | ||
1200 | static void io_subchannel_do_unreg(struct work_struct *work) | ||
1201 | { | ||
1202 | struct subchannel *sch; | ||
1203 | |||
1204 | sch = container_of(work, struct subchannel, work); | ||
1205 | css_sch_device_unregister(sch); | ||
1206 | /* Reset intparm to zeroes. */ | ||
1207 | sch->config.intparm = 0; | ||
1208 | cio_commit_config(sch); | ||
1209 | put_device(&sch->dev); | ||
1210 | } | ||
1211 | |||
1212 | /* Schedule unregister if we have no cdev. */ | ||
1213 | static void io_subchannel_schedule_removal(struct subchannel *sch) | ||
1214 | { | ||
1215 | get_device(&sch->dev); | ||
1216 | INIT_WORK(&sch->work, io_subchannel_do_unreg); | ||
1217 | queue_work(slow_path_wq, &sch->work); | ||
1218 | } | ||
1219 | |||
1220 | /* | ||
1221 | * Note: We always return 0 so that we bind to the device even on error. | ||
1222 | * This is needed so that our remove function is called on unregister. | ||
1223 | */ | ||
1142 | static int io_subchannel_probe(struct subchannel *sch) | 1224 | static int io_subchannel_probe(struct subchannel *sch) |
1143 | { | 1225 | { |
1144 | struct ccw_device *cdev; | 1226 | struct ccw_device *cdev; |
@@ -1168,9 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1168 | ccw_device_register(cdev); | 1250 | ccw_device_register(cdev); |
1169 | /* | 1251 | /* |
1170 | * Check if the device is already online. If it is | 1252 | * Check if the device is already online. If it is |
1171 | * the reference count needs to be corrected | 1253 | * the reference count needs to be corrected since we |
1172 | * (see ccw_device_online and css_init_done for the | 1254 | * didn't obtain a reference in ccw_device_set_online. |
1173 | * ugly details). | ||
1174 | */ | 1255 | */ |
1175 | if (cdev->private->state != DEV_STATE_NOT_OPER && | 1256 | if (cdev->private->state != DEV_STATE_NOT_OPER && |
1176 | cdev->private->state != DEV_STATE_OFFLINE && | 1257 | cdev->private->state != DEV_STATE_OFFLINE && |
@@ -1179,23 +1260,24 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1179 | return 0; | 1260 | return 0; |
1180 | } | 1261 | } |
1181 | io_subchannel_init_fields(sch); | 1262 | io_subchannel_init_fields(sch); |
1182 | /* | 1263 | rc = cio_commit_config(sch); |
1183 | * First check if a fitting device may be found amongst the | 1264 | if (rc) |
1184 | * disconnected devices or in the orphanage. | 1265 | goto out_schedule; |
1185 | */ | ||
1186 | dev_id.devno = sch->schib.pmcw.dev; | ||
1187 | dev_id.ssid = sch->schid.ssid; | ||
1188 | rc = sysfs_create_group(&sch->dev.kobj, | 1266 | rc = sysfs_create_group(&sch->dev.kobj, |
1189 | &io_subchannel_attr_group); | 1267 | &io_subchannel_attr_group); |
1190 | if (rc) | 1268 | if (rc) |
1191 | return rc; | 1269 | goto out_schedule; |
1192 | /* Allocate I/O subchannel private data. */ | 1270 | /* Allocate I/O subchannel private data. */ |
1193 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | 1271 | sch->private = kzalloc(sizeof(struct io_subchannel_private), |
1194 | GFP_KERNEL | GFP_DMA); | 1272 | GFP_KERNEL | GFP_DMA); |
1195 | if (!sch->private) { | 1273 | if (!sch->private) |
1196 | rc = -ENOMEM; | ||
1197 | goto out_err; | 1274 | goto out_err; |
1198 | } | 1275 | /* |
1276 | * First check if a fitting device may be found amongst the | ||
1277 | * disconnected devices or in the orphanage. | ||
1278 | */ | ||
1279 | dev_id.devno = sch->schib.pmcw.dev; | ||
1280 | dev_id.ssid = sch->schid.ssid; | ||
1199 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); | 1281 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); |
1200 | if (!cdev) | 1282 | if (!cdev) |
1201 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), | 1283 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), |
@@ -1213,24 +1295,21 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1213 | return 0; | 1295 | return 0; |
1214 | } | 1296 | } |
1215 | cdev = io_subchannel_create_ccwdev(sch); | 1297 | cdev = io_subchannel_create_ccwdev(sch); |
1216 | if (IS_ERR(cdev)) { | 1298 | if (IS_ERR(cdev)) |
1217 | rc = PTR_ERR(cdev); | ||
1218 | goto out_err; | 1299 | goto out_err; |
1219 | } | ||
1220 | rc = io_subchannel_recog(cdev, sch); | 1300 | rc = io_subchannel_recog(cdev, sch); |
1221 | if (rc) { | 1301 | if (rc) { |
1222 | spin_lock_irqsave(sch->lock, flags); | 1302 | spin_lock_irqsave(sch->lock, flags); |
1223 | sch_set_cdev(sch, NULL); | 1303 | io_subchannel_recog_done(cdev); |
1224 | spin_unlock_irqrestore(sch->lock, flags); | 1304 | spin_unlock_irqrestore(sch->lock, flags); |
1225 | if (cdev->dev.release) | ||
1226 | cdev->dev.release(&cdev->dev); | ||
1227 | goto out_err; | ||
1228 | } | 1305 | } |
1229 | return 0; | 1306 | return 0; |
1230 | out_err: | 1307 | out_err: |
1231 | kfree(sch->private); | 1308 | kfree(sch->private); |
1232 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | 1309 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); |
1233 | return rc; | 1310 | out_schedule: |
1311 | io_subchannel_schedule_removal(sch); | ||
1312 | return 0; | ||
1234 | } | 1313 | } |
1235 | 1314 | ||
1236 | static int | 1315 | static int |
@@ -1275,10 +1354,7 @@ static void io_subchannel_verify(struct subchannel *sch) | |||
1275 | 1354 | ||
1276 | static int check_for_io_on_path(struct subchannel *sch, int mask) | 1355 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
1277 | { | 1356 | { |
1278 | int cc; | 1357 | if (cio_update_schib(sch)) |
1279 | |||
1280 | cc = stsch(sch->schid, &sch->schib); | ||
1281 | if (cc) | ||
1282 | return 0; | 1358 | return 0; |
1283 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) | 1359 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) |
1284 | return 1; | 1360 | return 1; |
@@ -1347,15 +1423,13 @@ static int io_subchannel_chp_event(struct subchannel *sch, | |||
1347 | io_subchannel_verify(sch); | 1423 | io_subchannel_verify(sch); |
1348 | break; | 1424 | break; |
1349 | case CHP_OFFLINE: | 1425 | case CHP_OFFLINE: |
1350 | if (stsch(sch->schid, &sch->schib)) | 1426 | if (cio_update_schib(sch)) |
1351 | return -ENXIO; | ||
1352 | if (!css_sch_is_valid(&sch->schib)) | ||
1353 | return -ENODEV; | 1427 | return -ENODEV; |
1354 | io_subchannel_terminate_path(sch, mask); | 1428 | io_subchannel_terminate_path(sch, mask); |
1355 | break; | 1429 | break; |
1356 | case CHP_ONLINE: | 1430 | case CHP_ONLINE: |
1357 | if (stsch(sch->schid, &sch->schib)) | 1431 | if (cio_update_schib(sch)) |
1358 | return -ENXIO; | 1432 | return -ENODEV; |
1359 | sch->lpm |= mask & sch->opm; | 1433 | sch->lpm |= mask & sch->opm; |
1360 | io_subchannel_verify(sch); | 1434 | io_subchannel_verify(sch); |
1361 | break; | 1435 | break; |
@@ -1610,8 +1684,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1610 | spin_lock_irqsave(sch->lock, flags); | 1684 | spin_lock_irqsave(sch->lock, flags); |
1611 | 1685 | ||
1612 | /* Reset intparm to zeroes. */ | 1686 | /* Reset intparm to zeroes. */ |
1613 | sch->schib.pmcw.intparm = 0; | 1687 | sch->config.intparm = 0; |
1614 | cio_modify(sch); | 1688 | cio_commit_config(sch); |
1615 | break; | 1689 | break; |
1616 | case REPROBE: | 1690 | case REPROBE: |
1617 | ccw_device_trigger_reprobe(cdev); | 1691 | ccw_device_trigger_reprobe(cdev); |
@@ -1652,6 +1726,9 @@ static int ccw_device_console_enable(struct ccw_device *cdev, | |||
1652 | sch->private = cio_get_console_priv(); | 1726 | sch->private = cio_get_console_priv(); |
1653 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); | 1727 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); |
1654 | io_subchannel_init_fields(sch); | 1728 | io_subchannel_init_fields(sch); |
1729 | rc = cio_commit_config(sch); | ||
1730 | if (rc) | ||
1731 | return rc; | ||
1655 | sch->driver = &io_subchannel_driver; | 1732 | sch->driver = &io_subchannel_driver; |
1656 | /* Initialize the ccw_device structure. */ | 1733 | /* Initialize the ccw_device structure. */ |
1657 | cdev->dev.parent= &sch->dev; | 1734 | cdev->dev.parent= &sch->dev; |
@@ -1723,7 +1800,7 @@ __ccwdev_check_busid(struct device *dev, void *id) | |||
1723 | 1800 | ||
1724 | bus_id = id; | 1801 | bus_id = id; |
1725 | 1802 | ||
1726 | return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0); | 1803 | return (strcmp(bus_id, dev_name(dev)) == 0); |
1727 | } | 1804 | } |
1728 | 1805 | ||
1729 | 1806 | ||
@@ -1806,6 +1883,8 @@ ccw_device_remove (struct device *dev) | |||
1806 | "device 0.%x.%04x\n", | 1883 | "device 0.%x.%04x\n", |
1807 | ret, cdev->private->dev_id.ssid, | 1884 | ret, cdev->private->dev_id.ssid, |
1808 | cdev->private->dev_id.devno); | 1885 | cdev->private->dev_id.devno); |
1886 | /* Give up reference obtained in ccw_device_set_online(). */ | ||
1887 | put_device(&cdev->dev); | ||
1809 | } | 1888 | } |
1810 | ccw_device_set_timeout(cdev, 0); | 1889 | ccw_device_set_timeout(cdev, 0); |
1811 | cdev->drv = NULL; | 1890 | cdev->drv = NULL; |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 104ed669db43..0f2e63ea48de 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -76,6 +76,7 @@ extern wait_queue_head_t ccw_device_init_wq; | |||
76 | extern atomic_t ccw_device_init_count; | 76 | extern atomic_t ccw_device_init_count; |
77 | 77 | ||
78 | void io_subchannel_recog_done(struct ccw_device *cdev); | 78 | void io_subchannel_recog_done(struct ccw_device *cdev); |
79 | void io_subchannel_init_config(struct subchannel *sch); | ||
79 | 80 | ||
80 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 81 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
81 | 82 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 10bc03940fb3..8df5eaafc5ab 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -140,8 +140,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
140 | int ret; | 140 | int ret; |
141 | 141 | ||
142 | sch = to_subchannel(cdev->dev.parent); | 142 | sch = to_subchannel(cdev->dev.parent); |
143 | ret = stsch(sch->schid, &sch->schib); | 143 | if (cio_update_schib(sch)) |
144 | if (ret || !sch->schib.pmcw.dnv) | ||
145 | return -ENODEV; | 144 | return -ENODEV; |
146 | if (!sch->schib.pmcw.ena) | 145 | if (!sch->schib.pmcw.ena) |
147 | /* Not operational -> done. */ | 146 | /* Not operational -> done. */ |
@@ -245,11 +244,13 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
245 | * through ssch() and the path information is up to date. | 244 | * through ssch() and the path information is up to date. |
246 | */ | 245 | */ |
247 | old_lpm = sch->lpm; | 246 | old_lpm = sch->lpm; |
248 | stsch(sch->schid, &sch->schib); | 247 | |
249 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
250 | /* Check since device may again have become not operational. */ | 248 | /* Check since device may again have become not operational. */ |
251 | if (!sch->schib.pmcw.dnv) | 249 | if (cio_update_schib(sch)) |
252 | state = DEV_STATE_NOT_OPER; | 250 | state = DEV_STATE_NOT_OPER; |
251 | else | ||
252 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
253 | |||
253 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) | 254 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) |
254 | /* Force reprobe on all chpids. */ | 255 | /* Force reprobe on all chpids. */ |
255 | old_lpm = 0; | 256 | old_lpm = 0; |
@@ -399,9 +400,6 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
399 | ccw_device_oper_notify(cdev); | 400 | ccw_device_oper_notify(cdev); |
400 | } | 401 | } |
401 | wake_up(&cdev->private->wait_q); | 402 | wake_up(&cdev->private->wait_q); |
402 | |||
403 | if (css_init_done && state != DEV_STATE_ONLINE) | ||
404 | put_device (&cdev->dev); | ||
405 | } | 403 | } |
406 | 404 | ||
407 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) | 405 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) |
@@ -552,7 +550,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
552 | 550 | ||
553 | sch = to_subchannel(cdev->dev.parent); | 551 | sch = to_subchannel(cdev->dev.parent); |
554 | /* Update schib - pom may have changed. */ | 552 | /* Update schib - pom may have changed. */ |
555 | stsch(sch->schid, &sch->schib); | 553 | if (cio_update_schib(sch)) { |
554 | cdev->private->flags.donotify = 0; | ||
555 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
556 | return; | ||
557 | } | ||
556 | /* Update lpm with verified path mask. */ | 558 | /* Update lpm with verified path mask. */ |
557 | sch->lpm = sch->vpm; | 559 | sch->lpm = sch->vpm; |
558 | /* Repeat path verification? */ | 560 | /* Repeat path verification? */ |
@@ -611,8 +613,6 @@ ccw_device_online(struct ccw_device *cdev) | |||
611 | (cdev->private->state != DEV_STATE_BOXED)) | 613 | (cdev->private->state != DEV_STATE_BOXED)) |
612 | return -EINVAL; | 614 | return -EINVAL; |
613 | sch = to_subchannel(cdev->dev.parent); | 615 | sch = to_subchannel(cdev->dev.parent); |
614 | if (css_init_done && !get_device(&cdev->dev)) | ||
615 | return -ENODEV; | ||
616 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); | 616 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
617 | if (ret != 0) { | 617 | if (ret != 0) { |
618 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 618 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
@@ -672,7 +672,7 @@ ccw_device_offline(struct ccw_device *cdev) | |||
672 | return 0; | 672 | return 0; |
673 | } | 673 | } |
674 | sch = to_subchannel(cdev->dev.parent); | 674 | sch = to_subchannel(cdev->dev.parent); |
675 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) | 675 | if (cio_update_schib(sch)) |
676 | return -ENODEV; | 676 | return -ENODEV; |
677 | if (scsw_actl(&sch->schib.scsw) != 0) | 677 | if (scsw_actl(&sch->schib.scsw) != 0) |
678 | return -EBUSY; | 678 | return -EBUSY; |
@@ -750,7 +750,10 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
750 | * Since we might not just be coming from an interrupt from the | 750 | * Since we might not just be coming from an interrupt from the |
751 | * subchannel we have to update the schib. | 751 | * subchannel we have to update the schib. |
752 | */ | 752 | */ |
753 | stsch(sch->schid, &sch->schib); | 753 | if (cio_update_schib(sch)) { |
754 | ccw_device_verify_done(cdev, -ENODEV); | ||
755 | return; | ||
756 | } | ||
754 | 757 | ||
755 | if (scsw_actl(&sch->schib.scsw) != 0 || | 758 | if (scsw_actl(&sch->schib.scsw) != 0 || |
756 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || | 759 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || |
@@ -1016,20 +1019,21 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) | |||
1016 | 1019 | ||
1017 | sch = to_subchannel(cdev->dev.parent); | 1020 | sch = to_subchannel(cdev->dev.parent); |
1018 | /* Update some values. */ | 1021 | /* Update some values. */ |
1019 | if (stsch(sch->schid, &sch->schib)) | 1022 | if (cio_update_schib(sch)) |
1020 | return; | ||
1021 | if (!sch->schib.pmcw.dnv) | ||
1022 | return; | 1023 | return; |
1023 | /* | 1024 | /* |
1024 | * The pim, pam, pom values may not be accurate, but they are the best | 1025 | * The pim, pam, pom values may not be accurate, but they are the best |
1025 | * we have before performing device selection :/ | 1026 | * we have before performing device selection :/ |
1026 | */ | 1027 | */ |
1027 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 1028 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
1028 | /* Re-set some bits in the pmcw that were lost. */ | 1029 | /* |
1029 | sch->schib.pmcw.csense = 1; | 1030 | * Use the initial configuration since we can't be shure that the old |
1030 | sch->schib.pmcw.ena = 0; | 1031 | * paths are valid. |
1031 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1032 | */ |
1032 | sch->schib.pmcw.mp = 1; | 1033 | io_subchannel_init_config(sch); |
1034 | if (cio_commit_config(sch)) | ||
1035 | return; | ||
1036 | |||
1033 | /* We should also udate ssd info, but this has to wait. */ | 1037 | /* We should also udate ssd info, but this has to wait. */ |
1034 | /* Check if this is another device which appeared on the same sch. */ | 1038 | /* Check if this is another device which appeared on the same sch. */ |
1035 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1039 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 86bc94eb607f..fc5ca1dd52b3 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -504,7 +504,7 @@ ccw_device_verify_start(struct ccw_device *cdev) | |||
504 | sch->vpm = 0; | 504 | sch->vpm = 0; |
505 | 505 | ||
506 | /* Get current pam. */ | 506 | /* Get current pam. */ |
507 | if (stsch(sch->schid, &sch->schib)) { | 507 | if (cio_update_schib(sch)) { |
508 | ccw_device_verify_done(cdev, -ENODEV); | 508 | ccw_device_verify_done(cdev, -ENODEV); |
509 | return; | 509 | return; |
510 | } | 510 | } |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 1b03c5423be2..5814dbee2410 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -56,7 +56,8 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
56 | struct subchannel *sch; | 56 | struct subchannel *sch; |
57 | 57 | ||
58 | sch = to_subchannel(cdev->dev.parent); | 58 | sch = to_subchannel(cdev->dev.parent); |
59 | stsch (sch->schid, &sch->schib); | 59 | if (cio_update_schib(sch)) |
60 | goto doverify; | ||
60 | 61 | ||
61 | CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " | 62 | CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " |
62 | "not operational \n", __func__, | 63 | "not operational \n", __func__, |
@@ -64,6 +65,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
64 | sch->schib.pmcw.pnom); | 65 | sch->schib.pmcw.pnom); |
65 | 66 | ||
66 | sch->lpm &= ~sch->schib.pmcw.pnom; | 67 | sch->lpm &= ~sch->schib.pmcw.pnom; |
68 | doverify: | ||
67 | cdev->private->flags.doverify = 1; | 69 | cdev->private->flags.doverify = 1; |
68 | } | 70 | } |
69 | 71 | ||
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index e3ea1d5f2810..42f2b09631b6 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -10,10 +10,10 @@ | |||
10 | 10 | ||
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/schid.h> | 12 | #include <asm/schid.h> |
13 | #include <asm/debug.h> | ||
13 | #include "chsc.h" | 14 | #include "chsc.h" |
14 | 15 | ||
15 | #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ | 16 | #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ |
16 | #define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ | ||
17 | #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ | 17 | #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ |
18 | 18 | ||
19 | /* | 19 | /* |
@@ -111,12 +111,12 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, | 113 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
114 | int *start, int *count) | 114 | int *start, int *count, int ack) |
115 | { | 115 | { |
116 | register unsigned long _ccq asm ("0") = *count; | 116 | register unsigned long _ccq asm ("0") = *count; |
117 | register unsigned long _token asm ("1") = token; | 117 | register unsigned long _token asm ("1") = token; |
118 | unsigned long _queuestart = ((unsigned long)queue << 32) | *start; | 118 | unsigned long _queuestart = ((unsigned long)queue << 32) | *start; |
119 | unsigned long _state = 0; | 119 | unsigned long _state = (unsigned long)ack << 63; |
120 | 120 | ||
121 | asm volatile( | 121 | asm volatile( |
122 | " .insn rrf,0xB99c0000,%1,%2,0,0" | 122 | " .insn rrf,0xB99c0000,%1,%2,0,0" |
@@ -133,7 +133,7 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue, | |||
133 | static inline int do_sqbs(u64 token, unsigned char state, int queue, | 133 | static inline int do_sqbs(u64 token, unsigned char state, int queue, |
134 | int *start, int *count) { return 0; } | 134 | int *start, int *count) { return 0; } |
135 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, | 135 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
136 | int *start, int *count) { return 0; } | 136 | int *start, int *count, int ack) { return 0; } |
137 | #endif /* CONFIG_64BIT */ | 137 | #endif /* CONFIG_64BIT */ |
138 | 138 | ||
139 | struct qdio_irq; | 139 | struct qdio_irq; |
@@ -186,20 +186,14 @@ struct qdio_input_q { | |||
186 | /* input buffer acknowledgement flag */ | 186 | /* input buffer acknowledgement flag */ |
187 | int polling; | 187 | int polling; |
188 | 188 | ||
189 | /* how much sbals are acknowledged with qebsm */ | ||
190 | int ack_count; | ||
191 | |||
189 | /* last time of noticing incoming data */ | 192 | /* last time of noticing incoming data */ |
190 | u64 timestamp; | 193 | u64 timestamp; |
191 | |||
192 | /* lock for clearing the acknowledgement */ | ||
193 | spinlock_t lock; | ||
194 | }; | 194 | }; |
195 | 195 | ||
196 | struct qdio_output_q { | 196 | struct qdio_output_q { |
197 | /* failed siga-w attempts*/ | ||
198 | atomic_t busy_siga_counter; | ||
199 | |||
200 | /* start time of busy condition */ | ||
201 | u64 timestamp; | ||
202 | |||
203 | /* PCIs are enabled for the queue */ | 197 | /* PCIs are enabled for the queue */ |
204 | int pci_out_enabled; | 198 | int pci_out_enabled; |
205 | 199 | ||
@@ -250,6 +244,7 @@ struct qdio_q { | |||
250 | 244 | ||
251 | struct qdio_irq *irq_ptr; | 245 | struct qdio_irq *irq_ptr; |
252 | struct tasklet_struct tasklet; | 246 | struct tasklet_struct tasklet; |
247 | spinlock_t lock; | ||
253 | 248 | ||
254 | /* error condition during a data transfer */ | 249 | /* error condition during a data transfer */ |
255 | unsigned int qdio_error; | 250 | unsigned int qdio_error; |
@@ -300,11 +295,13 @@ struct qdio_irq { | |||
300 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; | 295 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
301 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; | 296 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
302 | 297 | ||
298 | debug_info_t *debug_area; | ||
303 | struct mutex setup_mutex; | 299 | struct mutex setup_mutex; |
304 | }; | 300 | }; |
305 | 301 | ||
306 | /* helper functions */ | 302 | /* helper functions */ |
307 | #define queue_type(q) q->irq_ptr->qib.qfmt | 303 | #define queue_type(q) q->irq_ptr->qib.qfmt |
304 | #define SCH_NO(q) (q->irq_ptr->schid.sch_no) | ||
308 | 305 | ||
309 | #define is_thinint_irq(irq) \ | 306 | #define is_thinint_irq(irq) \ |
310 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ | 307 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ |
@@ -348,10 +345,13 @@ static inline unsigned long long get_usecs(void) | |||
348 | ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) | 345 | ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) |
349 | #define add_buf(bufnr, inc) \ | 346 | #define add_buf(bufnr, inc) \ |
350 | ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) | 347 | ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) |
348 | #define sub_buf(bufnr, dec) \ | ||
349 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | ||
351 | 350 | ||
352 | /* prototypes for thin interrupt */ | 351 | /* prototypes for thin interrupt */ |
353 | void qdio_sync_after_thinint(struct qdio_q *q); | 352 | void qdio_sync_after_thinint(struct qdio_q *q); |
354 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); | 353 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, |
354 | int auto_ack); | ||
355 | void qdio_check_outbound_after_thinint(struct qdio_q *q); | 355 | void qdio_check_outbound_after_thinint(struct qdio_q *q); |
356 | int qdio_inbound_q_moved(struct qdio_q *q); | 356 | int qdio_inbound_q_moved(struct qdio_q *q); |
357 | void qdio_kick_inbound_handler(struct qdio_q *q); | 357 | void qdio_kick_inbound_handler(struct qdio_q *q); |
@@ -378,10 +378,15 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
378 | int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, | 378 | int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, |
379 | int nr_output_qs); | 379 | int nr_output_qs); |
380 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); | 380 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); |
381 | int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, | ||
382 | struct subchannel_id *schid, | ||
383 | struct qdio_ssqd_desc *data); | ||
381 | int qdio_setup_irq(struct qdio_initialize *init_data); | 384 | int qdio_setup_irq(struct qdio_initialize *init_data); |
382 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | 385 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, |
383 | struct ccw_device *cdev); | 386 | struct ccw_device *cdev); |
384 | void qdio_release_memory(struct qdio_irq *irq_ptr); | 387 | void qdio_release_memory(struct qdio_irq *irq_ptr); |
388 | int qdio_setup_create_sysfs(struct ccw_device *cdev); | ||
389 | void qdio_setup_destroy_sysfs(struct ccw_device *cdev); | ||
385 | int qdio_setup_init(void); | 390 | int qdio_setup_init(void); |
386 | void qdio_setup_exit(void); | 391 | void qdio_setup_exit(void); |
387 | 392 | ||
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f05590355be8..f8a3b6967f69 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include "qdio.h" | 14 | #include "qdio.h" |
15 | 15 | ||
16 | debug_info_t *qdio_dbf_setup; | 16 | debug_info_t *qdio_dbf_setup; |
17 | debug_info_t *qdio_dbf_trace; | 17 | debug_info_t *qdio_dbf_error; |
18 | 18 | ||
19 | static struct dentry *debugfs_root; | 19 | static struct dentry *debugfs_root; |
20 | #define MAX_DEBUGFS_QUEUES 32 | 20 | #define MAX_DEBUGFS_QUEUES 32 |
@@ -22,59 +22,33 @@ static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; | |||
22 | static DEFINE_MUTEX(debugfs_mutex); | 22 | static DEFINE_MUTEX(debugfs_mutex); |
23 | #define QDIO_DEBUGFS_NAME_LEN 40 | 23 | #define QDIO_DEBUGFS_NAME_LEN 40 |
24 | 24 | ||
25 | void qdio_allocate_do_dbf(struct qdio_initialize *init_data) | 25 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
26 | struct qdio_irq *irq_ptr) | ||
26 | { | 27 | { |
27 | char dbf_text[20]; | 28 | char text[20]; |
28 | 29 | ||
29 | sprintf(dbf_text, "qfmt:%x", init_data->q_format); | 30 | DBF_EVENT("qfmt:%1d", init_data->q_format); |
30 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 31 | DBF_HEX(init_data->adapter_name, 8); |
31 | QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); | 32 | DBF_EVENT("qpff%4x", init_data->qib_param_field_format); |
32 | sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); | 33 | DBF_HEX(&init_data->qib_param_field, sizeof(void *)); |
33 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 34 | DBF_HEX(&init_data->input_slib_elements, sizeof(void *)); |
34 | QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); | 35 | DBF_HEX(&init_data->output_slib_elements, sizeof(void *)); |
35 | QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); | 36 | DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs, |
36 | QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); | 37 | init_data->no_output_qs); |
37 | sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); | 38 | DBF_HEX(&init_data->input_handler, sizeof(void *)); |
38 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 39 | DBF_HEX(&init_data->output_handler, sizeof(void *)); |
39 | sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); | 40 | DBF_HEX(&init_data->int_parm, sizeof(long)); |
40 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 41 | DBF_HEX(&init_data->flags, sizeof(long)); |
41 | QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); | 42 | DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); |
42 | QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); | 43 | DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); |
43 | QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); | 44 | DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); |
44 | QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); | 45 | |
45 | QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); | 46 | /* allocate trace view for the interface */ |
46 | QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); | 47 | snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); |
47 | } | 48 | irq_ptr->debug_area = debug_register(text, 2, 1, 16); |
48 | 49 | debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); | |
49 | static void qdio_unregister_dbf_views(void) | 50 | debug_set_level(irq_ptr->debug_area, DBF_WARN); |
50 | { | 51 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); |
51 | if (qdio_dbf_setup) | ||
52 | debug_unregister(qdio_dbf_setup); | ||
53 | if (qdio_dbf_trace) | ||
54 | debug_unregister(qdio_dbf_trace); | ||
55 | } | ||
56 | |||
57 | static int qdio_register_dbf_views(void) | ||
58 | { | ||
59 | qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, | ||
60 | QDIO_DBF_SETUP_NR_AREAS, | ||
61 | QDIO_DBF_SETUP_LEN); | ||
62 | if (!qdio_dbf_setup) | ||
63 | goto oom; | ||
64 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | ||
65 | debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); | ||
66 | |||
67 | qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, | ||
68 | QDIO_DBF_TRACE_NR_AREAS, | ||
69 | QDIO_DBF_TRACE_LEN); | ||
70 | if (!qdio_dbf_trace) | ||
71 | goto oom; | ||
72 | debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); | ||
73 | debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); | ||
74 | return 0; | ||
75 | oom: | ||
76 | qdio_unregister_dbf_views(); | ||
77 | return -ENOMEM; | ||
78 | } | 52 | } |
79 | 53 | ||
80 | static int qstat_show(struct seq_file *m, void *v) | 54 | static int qstat_show(struct seq_file *m, void *v) |
@@ -86,16 +60,18 @@ static int qstat_show(struct seq_file *m, void *v) | |||
86 | if (!q) | 60 | if (!q) |
87 | return 0; | 61 | return 0; |
88 | 62 | ||
89 | seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); | 63 | seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); |
90 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); | 64 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); |
91 | seq_printf(m, "ftc: %d\n", q->first_to_check); | 65 | seq_printf(m, "ftc: %d\n", q->first_to_check); |
92 | seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); | 66 | seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); |
93 | seq_printf(m, "polling: %d\n", q->u.in.polling); | 67 | seq_printf(m, "polling: %d\n", q->u.in.polling); |
68 | seq_printf(m, "ack count: %d\n", q->u.in.ack_count); | ||
94 | seq_printf(m, "slsb buffer states:\n"); | 69 | seq_printf(m, "slsb buffer states:\n"); |
70 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | ||
95 | 71 | ||
96 | qdio_siga_sync_q(q); | 72 | qdio_siga_sync_q(q); |
97 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | 73 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { |
98 | get_buf_state(q, i, &state); | 74 | get_buf_state(q, i, &state, 0); |
99 | switch (state) { | 75 | switch (state) { |
100 | case SLSB_P_INPUT_NOT_INIT: | 76 | case SLSB_P_INPUT_NOT_INIT: |
101 | case SLSB_P_OUTPUT_NOT_INIT: | 77 | case SLSB_P_OUTPUT_NOT_INIT: |
@@ -127,6 +103,7 @@ static int qstat_show(struct seq_file *m, void *v) | |||
127 | seq_printf(m, "\n"); | 103 | seq_printf(m, "\n"); |
128 | } | 104 | } |
129 | seq_printf(m, "\n"); | 105 | seq_printf(m, "\n"); |
106 | seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); | ||
130 | return 0; | 107 | return 0; |
131 | } | 108 | } |
132 | 109 | ||
@@ -223,11 +200,24 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd | |||
223 | int __init qdio_debug_init(void) | 200 | int __init qdio_debug_init(void) |
224 | { | 201 | { |
225 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); | 202 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); |
226 | return qdio_register_dbf_views(); | 203 | |
204 | qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); | ||
205 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | ||
206 | debug_set_level(qdio_dbf_setup, DBF_INFO); | ||
207 | DBF_EVENT("dbf created\n"); | ||
208 | |||
209 | qdio_dbf_error = debug_register("qdio_error", 4, 1, 16); | ||
210 | debug_register_view(qdio_dbf_error, &debug_hex_ascii_view); | ||
211 | debug_set_level(qdio_dbf_error, DBF_INFO); | ||
212 | DBF_ERROR("dbf created\n"); | ||
213 | return 0; | ||
227 | } | 214 | } |
228 | 215 | ||
229 | void qdio_debug_exit(void) | 216 | void qdio_debug_exit(void) |
230 | { | 217 | { |
231 | debugfs_remove(debugfs_root); | 218 | debugfs_remove(debugfs_root); |
232 | qdio_unregister_dbf_views(); | 219 | if (qdio_dbf_setup) |
220 | debug_unregister(qdio_dbf_setup); | ||
221 | if (qdio_dbf_error) | ||
222 | debug_unregister(qdio_dbf_error); | ||
233 | } | 223 | } |
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 5a4d85b829ad..5d70bd162ae9 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h | |||
@@ -12,80 +12,72 @@ | |||
12 | #include <asm/qdio.h> | 12 | #include <asm/qdio.h> |
13 | #include "qdio.h" | 13 | #include "qdio.h" |
14 | 14 | ||
15 | #define QDIO_DBF_HEX(ex, name, level, addr, len) \ | 15 | /* that gives us 15 characters in the text event views */ |
16 | #define QDIO_DBF_LEN 16 | ||
17 | |||
18 | extern debug_info_t *qdio_dbf_setup; | ||
19 | extern debug_info_t *qdio_dbf_error; | ||
20 | |||
21 | /* sort out low debug levels early to avoid wasted sprints */ | ||
22 | static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) | ||
23 | { | ||
24 | return (level <= dbf_grp->level); | ||
25 | } | ||
26 | |||
27 | #define DBF_ERR 3 /* error conditions */ | ||
28 | #define DBF_WARN 4 /* warning conditions */ | ||
29 | #define DBF_INFO 6 /* informational */ | ||
30 | |||
31 | #undef DBF_EVENT | ||
32 | #undef DBF_ERROR | ||
33 | #undef DBF_DEV_EVENT | ||
34 | |||
35 | #define DBF_EVENT(text...) \ | ||
16 | do { \ | 36 | do { \ |
17 | if (ex) \ | 37 | char debug_buffer[QDIO_DBF_LEN]; \ |
18 | debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ | 38 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ |
19 | else \ | 39 | debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ |
20 | debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ | ||
21 | } while (0) | 40 | } while (0) |
22 | #define QDIO_DBF_TEXT(ex, name, level, text) \ | 41 | |
42 | #define DBF_HEX(addr, len) \ | ||
23 | do { \ | 43 | do { \ |
24 | if (ex) \ | 44 | debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \ |
25 | debug_text_exception(qdio_dbf_##name, level, text); \ | ||
26 | else \ | ||
27 | debug_text_event(qdio_dbf_##name, level, text); \ | ||
28 | } while (0) | 45 | } while (0) |
29 | 46 | ||
30 | #define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) | 47 | #define DBF_ERROR(text...) \ |
31 | #define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) | 48 | do { \ |
32 | #define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) | 49 | char debug_buffer[QDIO_DBF_LEN]; \ |
33 | 50 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ | |
34 | #ifdef CONFIG_QDIO_DEBUG | 51 | debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ |
35 | #define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) | 52 | } while (0) |
36 | #define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) | ||
37 | #define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) | ||
38 | #define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) | ||
39 | #else | ||
40 | #define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) | ||
41 | #define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) | ||
42 | #define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) | ||
43 | #define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) | ||
44 | #endif /* CONFIG_QDIO_DEBUG */ | ||
45 | |||
46 | #define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) | ||
47 | #define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) | ||
48 | #define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) | ||
49 | |||
50 | #ifdef CONFIG_QDIO_DEBUG | ||
51 | #define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) | ||
52 | #define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) | ||
53 | #define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) | ||
54 | #define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) | ||
55 | #else | ||
56 | #define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) | ||
57 | #define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) | ||
58 | #define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) | ||
59 | #define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) | ||
60 | #endif /* CONFIG_QDIO_DEBUG */ | ||
61 | 53 | ||
62 | /* s390dbf views */ | 54 | #define DBF_ERROR_HEX(addr, len) \ |
63 | #define QDIO_DBF_SETUP_LEN 8 | 55 | do { \ |
64 | #define QDIO_DBF_SETUP_PAGES 8 | 56 | debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \ |
65 | #define QDIO_DBF_SETUP_NR_AREAS 1 | 57 | } while (0) |
66 | 58 | ||
67 | #define QDIO_DBF_TRACE_LEN 8 | ||
68 | #define QDIO_DBF_TRACE_NR_AREAS 2 | ||
69 | 59 | ||
70 | #ifdef CONFIG_QDIO_DEBUG | 60 | #define DBF_DEV_EVENT(level, device, text...) \ |
71 | #define QDIO_DBF_TRACE_PAGES 32 | 61 | do { \ |
72 | #define QDIO_DBF_SETUP_LEVEL 6 | 62 | char debug_buffer[QDIO_DBF_LEN]; \ |
73 | #define QDIO_DBF_TRACE_LEVEL 4 | 63 | if (qdio_dbf_passes(device->debug_area, level)) { \ |
74 | #else /* !CONFIG_QDIO_DEBUG */ | 64 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ |
75 | #define QDIO_DBF_TRACE_PAGES 8 | 65 | debug_text_event(device->debug_area, level, debug_buffer); \ |
76 | #define QDIO_DBF_SETUP_LEVEL 2 | 66 | } \ |
77 | #define QDIO_DBF_TRACE_LEVEL 2 | 67 | } while (0) |
78 | #endif /* CONFIG_QDIO_DEBUG */ | ||
79 | 68 | ||
80 | extern debug_info_t *qdio_dbf_setup; | 69 | #define DBF_DEV_HEX(level, device, addr, len) \ |
81 | extern debug_info_t *qdio_dbf_trace; | 70 | do { \ |
71 | debug_event(device->debug_area, level, (void*)(addr), len); \ | ||
72 | } while (0) | ||
82 | 73 | ||
83 | void qdio_allocate_do_dbf(struct qdio_initialize *init_data); | 74 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
84 | void debug_print_bstat(struct qdio_q *q); | 75 | struct qdio_irq *irq_ptr); |
85 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, | 76 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, |
86 | struct ccw_device *cdev); | 77 | struct ccw_device *cdev); |
87 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, | 78 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, |
88 | struct ccw_device *cdev); | 79 | struct ccw_device *cdev); |
89 | int qdio_debug_init(void); | 80 | int qdio_debug_init(void); |
90 | void qdio_debug_exit(void); | 81 | void qdio_debug_exit(void); |
82 | |||
91 | #endif | 83 | #endif |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 7c8659151993..744f928a59ea 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -74,7 +74,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) | |||
74 | * Note: For IQDC unicast queues only the highest priority queue is processed. | 74 | * Note: For IQDC unicast queues only the highest priority queue is processed. |
75 | */ | 75 | */ |
76 | static inline int do_siga_output(unsigned long schid, unsigned long mask, | 76 | static inline int do_siga_output(unsigned long schid, unsigned long mask, |
77 | u32 *bb, unsigned int fc) | 77 | unsigned int *bb, unsigned int fc) |
78 | { | 78 | { |
79 | register unsigned long __fc asm("0") = fc; | 79 | register unsigned long __fc asm("0") = fc; |
80 | register unsigned long __schid asm("1") = schid; | 80 | register unsigned long __schid asm("1") = schid; |
@@ -95,8 +95,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, | |||
95 | 95 | ||
96 | static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | 96 | static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) |
97 | { | 97 | { |
98 | char dbf_text[15]; | ||
99 | |||
100 | /* all done or next buffer state different */ | 98 | /* all done or next buffer state different */ |
101 | if (ccq == 0 || ccq == 32) | 99 | if (ccq == 0 || ccq == 32) |
102 | return 0; | 100 | return 0; |
@@ -104,8 +102,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
104 | if (ccq == 96 || ccq == 97) | 102 | if (ccq == 96 || ccq == 97) |
105 | return 1; | 103 | return 1; |
106 | /* notify devices immediately */ | 104 | /* notify devices immediately */ |
107 | sprintf(dbf_text, "%d", ccq); | 105 | DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); |
108 | QDIO_DBF_TEXT2(1, trace, dbf_text); | ||
109 | return -EIO; | 106 | return -EIO; |
110 | } | 107 | } |
111 | 108 | ||
@@ -115,41 +112,45 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
115 | * @state: state of the extracted buffers | 112 | * @state: state of the extracted buffers |
116 | * @start: buffer number to start at | 113 | * @start: buffer number to start at |
117 | * @count: count of buffers to examine | 114 | * @count: count of buffers to examine |
115 | * @auto_ack: automatically acknowledge buffers | ||
118 | * | 116 | * |
119 | * Returns the number of successfull extracted equal buffer states. | 117 | * Returns the number of successfull extracted equal buffer states. |
120 | * Stops processing if a state is different from the last buffers state. | 118 | * Stops processing if a state is different from the last buffers state. |
121 | */ | 119 | */ |
122 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | 120 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, |
123 | int start, int count) | 121 | int start, int count, int auto_ack) |
124 | { | 122 | { |
125 | unsigned int ccq = 0; | 123 | unsigned int ccq = 0; |
126 | int tmp_count = count, tmp_start = start; | 124 | int tmp_count = count, tmp_start = start; |
127 | int nr = q->nr; | 125 | int nr = q->nr; |
128 | int rc; | 126 | int rc; |
129 | char dbf_text[15]; | ||
130 | 127 | ||
131 | BUG_ON(!q->irq_ptr->sch_token); | 128 | BUG_ON(!q->irq_ptr->sch_token); |
129 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); | ||
132 | 130 | ||
133 | if (!q->is_input_q) | 131 | if (!q->is_input_q) |
134 | nr += q->irq_ptr->nr_input_qs; | 132 | nr += q->irq_ptr->nr_input_qs; |
135 | again: | 133 | again: |
136 | ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); | 134 | ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, |
135 | auto_ack); | ||
137 | rc = qdio_check_ccq(q, ccq); | 136 | rc = qdio_check_ccq(q, ccq); |
138 | 137 | ||
139 | /* At least one buffer was processed, return and extract the remaining | 138 | /* At least one buffer was processed, return and extract the remaining |
140 | * buffers later. | 139 | * buffers later. |
141 | */ | 140 | */ |
142 | if ((ccq == 96) && (count != tmp_count)) | 141 | if ((ccq == 96) && (count != tmp_count)) { |
142 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); | ||
143 | return (count - tmp_count); | 143 | return (count - tmp_count); |
144 | } | ||
145 | |||
144 | if (rc == 1) { | 146 | if (rc == 1) { |
145 | QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); | 147 | DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); |
146 | goto again; | 148 | goto again; |
147 | } | 149 | } |
148 | 150 | ||
149 | if (rc < 0) { | 151 | if (rc < 0) { |
150 | QDIO_DBF_TEXT2(1, trace, "eqberr"); | 152 | DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); |
151 | sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); | 153 | DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); |
152 | QDIO_DBF_TEXT2(1, trace, dbf_text); | ||
153 | q->handler(q->irq_ptr->cdev, | 154 | q->handler(q->irq_ptr->cdev, |
154 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, | 155 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, |
155 | 0, -1, -1, q->irq_ptr->int_parm); | 156 | 0, -1, -1, q->irq_ptr->int_parm); |
@@ -176,9 +177,12 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, | |||
176 | int tmp_count = count, tmp_start = start; | 177 | int tmp_count = count, tmp_start = start; |
177 | int nr = q->nr; | 178 | int nr = q->nr; |
178 | int rc; | 179 | int rc; |
179 | char dbf_text[15]; | 180 | |
181 | if (!count) | ||
182 | return 0; | ||
180 | 183 | ||
181 | BUG_ON(!q->irq_ptr->sch_token); | 184 | BUG_ON(!q->irq_ptr->sch_token); |
185 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); | ||
182 | 186 | ||
183 | if (!q->is_input_q) | 187 | if (!q->is_input_q) |
184 | nr += q->irq_ptr->nr_input_qs; | 188 | nr += q->irq_ptr->nr_input_qs; |
@@ -186,16 +190,13 @@ again: | |||
186 | ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); | 190 | ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); |
187 | rc = qdio_check_ccq(q, ccq); | 191 | rc = qdio_check_ccq(q, ccq); |
188 | if (rc == 1) { | 192 | if (rc == 1) { |
189 | QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); | 193 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); |
194 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); | ||
190 | goto again; | 195 | goto again; |
191 | } | 196 | } |
192 | if (rc < 0) { | 197 | if (rc < 0) { |
193 | QDIO_DBF_TEXT3(1, trace, "sqberr"); | 198 | DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); |
194 | sprintf(dbf_text, "%2x,%2x", count, tmp_count); | 199 | DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); |
195 | QDIO_DBF_TEXT3(1, trace, dbf_text); | ||
196 | sprintf(dbf_text, "%d,%d", ccq, nr); | ||
197 | QDIO_DBF_TEXT3(1, trace, dbf_text); | ||
198 | |||
199 | q->handler(q->irq_ptr->cdev, | 200 | q->handler(q->irq_ptr->cdev, |
200 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, | 201 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, |
201 | 0, -1, -1, q->irq_ptr->int_parm); | 202 | 0, -1, -1, q->irq_ptr->int_parm); |
@@ -207,7 +208,8 @@ again: | |||
207 | 208 | ||
208 | /* returns number of examined buffers and their common state in *state */ | 209 | /* returns number of examined buffers and their common state in *state */ |
209 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | 210 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, |
210 | unsigned char *state, unsigned int count) | 211 | unsigned char *state, unsigned int count, |
212 | int auto_ack) | ||
211 | { | 213 | { |
212 | unsigned char __state = 0; | 214 | unsigned char __state = 0; |
213 | int i; | 215 | int i; |
@@ -216,7 +218,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
216 | BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); | 218 | BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); |
217 | 219 | ||
218 | if (is_qebsm(q)) | 220 | if (is_qebsm(q)) |
219 | return qdio_do_eqbs(q, state, bufnr, count); | 221 | return qdio_do_eqbs(q, state, bufnr, count, auto_ack); |
220 | 222 | ||
221 | for (i = 0; i < count; i++) { | 223 | for (i = 0; i < count; i++) { |
222 | if (!__state) | 224 | if (!__state) |
@@ -230,9 +232,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
230 | } | 232 | } |
231 | 233 | ||
232 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 234 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
233 | unsigned char *state) | 235 | unsigned char *state, int auto_ack) |
234 | { | 236 | { |
235 | return get_buf_states(q, bufnr, state, 1); | 237 | return get_buf_states(q, bufnr, state, 1, auto_ack); |
236 | } | 238 | } |
237 | 239 | ||
238 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ | 240 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ |
@@ -282,14 +284,12 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
282 | if (!need_siga_sync(q)) | 284 | if (!need_siga_sync(q)) |
283 | return 0; | 285 | return 0; |
284 | 286 | ||
287 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); | ||
285 | qdio_perf_stat_inc(&perf_stats.siga_sync); | 288 | qdio_perf_stat_inc(&perf_stats.siga_sync); |
286 | 289 | ||
287 | cc = do_siga_sync(q->irq_ptr->schid, output, input); | 290 | cc = do_siga_sync(q->irq_ptr->schid, output, input); |
288 | if (cc) { | 291 | if (cc) |
289 | QDIO_DBF_TEXT4(0, trace, "sigasync"); | 292 | DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); |
290 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
291 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | ||
292 | } | ||
293 | return cc; | 293 | return cc; |
294 | } | 294 | } |
295 | 295 | ||
@@ -311,50 +311,37 @@ static inline int qdio_siga_sync_all(struct qdio_q *q) | |||
311 | return qdio_siga_sync(q, ~0U, ~0U); | 311 | return qdio_siga_sync(q, ~0U, ~0U); |
312 | } | 312 | } |
313 | 313 | ||
314 | static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) | 314 | static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) |
315 | { | 315 | { |
316 | unsigned int fc = 0; | ||
317 | unsigned long schid; | 316 | unsigned long schid; |
317 | unsigned int fc = 0; | ||
318 | u64 start_time = 0; | ||
319 | int cc; | ||
318 | 320 | ||
319 | if (q->u.out.use_enh_siga) { | 321 | if (q->u.out.use_enh_siga) |
320 | fc = 3; | 322 | fc = 3; |
321 | } | 323 | |
322 | if (!is_qebsm(q)) | 324 | if (is_qebsm(q)) { |
323 | schid = *((u32 *)&q->irq_ptr->schid); | ||
324 | else { | ||
325 | schid = q->irq_ptr->sch_token; | 325 | schid = q->irq_ptr->sch_token; |
326 | fc |= 0x80; | 326 | fc |= 0x80; |
327 | } | 327 | } |
328 | return do_siga_output(schid, q->mask, busy_bit, fc); | 328 | else |
329 | } | 329 | schid = *((u32 *)&q->irq_ptr->schid); |
330 | |||
331 | static int qdio_siga_output(struct qdio_q *q) | ||
332 | { | ||
333 | int cc; | ||
334 | u32 busy_bit; | ||
335 | u64 start_time = 0; | ||
336 | char dbf_text[15]; | ||
337 | |||
338 | QDIO_DBF_TEXT5(0, trace, "sigaout"); | ||
339 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | ||
340 | 330 | ||
341 | qdio_perf_stat_inc(&perf_stats.siga_out); | ||
342 | again: | 331 | again: |
343 | cc = qdio_do_siga_output(q, &busy_bit); | 332 | cc = do_siga_output(schid, q->mask, busy_bit, fc); |
344 | if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { | ||
345 | sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); | ||
346 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
347 | 333 | ||
348 | if (!start_time) | 334 | /* hipersocket busy condition */ |
335 | if (*busy_bit) { | ||
336 | WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); | ||
337 | |||
338 | if (!start_time) { | ||
349 | start_time = get_usecs(); | 339 | start_time = get_usecs(); |
350 | else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | 340 | goto again; |
341 | } | ||
342 | if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | ||
351 | goto again; | 343 | goto again; |
352 | } | 344 | } |
353 | |||
354 | if (cc == 2 && busy_bit) | ||
355 | cc |= QDIO_ERROR_SIGA_BUSY; | ||
356 | if (cc) | ||
357 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | ||
358 | return cc; | 345 | return cc; |
359 | } | 346 | } |
360 | 347 | ||
@@ -362,14 +349,12 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
362 | { | 349 | { |
363 | int cc; | 350 | int cc; |
364 | 351 | ||
365 | QDIO_DBF_TEXT4(0, trace, "sigain"); | 352 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); |
366 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
367 | |||
368 | qdio_perf_stat_inc(&perf_stats.siga_in); | 353 | qdio_perf_stat_inc(&perf_stats.siga_in); |
369 | 354 | ||
370 | cc = do_siga_input(q->irq_ptr->schid, q->mask); | 355 | cc = do_siga_input(q->irq_ptr->schid, q->mask); |
371 | if (cc) | 356 | if (cc) |
372 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | 357 | DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); |
373 | return cc; | 358 | return cc; |
374 | } | 359 | } |
375 | 360 | ||
@@ -387,35 +372,91 @@ void qdio_sync_after_thinint(struct qdio_q *q) | |||
387 | 372 | ||
388 | inline void qdio_stop_polling(struct qdio_q *q) | 373 | inline void qdio_stop_polling(struct qdio_q *q) |
389 | { | 374 | { |
390 | spin_lock_bh(&q->u.in.lock); | 375 | if (!q->u.in.polling) |
391 | if (!q->u.in.polling) { | ||
392 | spin_unlock_bh(&q->u.in.lock); | ||
393 | return; | 376 | return; |
394 | } | 377 | |
395 | q->u.in.polling = 0; | 378 | q->u.in.polling = 0; |
396 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); | 379 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); |
397 | 380 | ||
398 | /* show the card that we are not polling anymore */ | 381 | /* show the card that we are not polling anymore */ |
399 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | 382 | if (is_qebsm(q)) { |
400 | spin_unlock_bh(&q->u.in.lock); | 383 | set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, |
384 | q->u.in.ack_count); | ||
385 | q->u.in.ack_count = 0; | ||
386 | } else | ||
387 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | ||
401 | } | 388 | } |
402 | 389 | ||
403 | static void announce_buffer_error(struct qdio_q *q) | 390 | static void announce_buffer_error(struct qdio_q *q, int count) |
404 | { | 391 | { |
405 | char dbf_text[15]; | 392 | q->qdio_error |= QDIO_ERROR_SLSB_STATE; |
393 | |||
394 | /* special handling for no target buffer empty */ | ||
395 | if ((!q->is_input_q && | ||
396 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { | ||
397 | qdio_perf_stat_inc(&perf_stats.outbound_target_full); | ||
398 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d", | ||
399 | q->first_to_check); | ||
400 | return; | ||
401 | } | ||
406 | 402 | ||
407 | if (q->is_input_q) | 403 | DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); |
408 | QDIO_DBF_TEXT3(1, trace, "inperr"); | 404 | DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); |
409 | else | 405 | DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); |
410 | QDIO_DBF_TEXT3(0, trace, "outperr"); | 406 | DBF_ERROR("F14:%2x F15:%2x", |
407 | q->sbal[q->first_to_check]->element[14].flags & 0xff, | ||
408 | q->sbal[q->first_to_check]->element[15].flags & 0xff); | ||
409 | } | ||
410 | |||
411 | static inline void inbound_primed(struct qdio_q *q, int count) | ||
412 | { | ||
413 | int new; | ||
414 | |||
415 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count); | ||
416 | |||
417 | /* for QEBSM the ACK was already set by EQBS */ | ||
418 | if (is_qebsm(q)) { | ||
419 | if (!q->u.in.polling) { | ||
420 | q->u.in.polling = 1; | ||
421 | q->u.in.ack_count = count; | ||
422 | q->last_move_ftc = q->first_to_check; | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | /* delete the previous ACK's */ | ||
427 | set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, | ||
428 | q->u.in.ack_count); | ||
429 | q->u.in.ack_count = count; | ||
430 | q->last_move_ftc = q->first_to_check; | ||
431 | return; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * ACK the newest buffer. The ACK will be removed in qdio_stop_polling | ||
436 | * or by the next inbound run. | ||
437 | */ | ||
438 | new = add_buf(q->first_to_check, count - 1); | ||
439 | if (q->u.in.polling) { | ||
440 | /* reset the previous ACK but first set the new one */ | ||
441 | set_buf_state(q, new, SLSB_P_INPUT_ACK); | ||
442 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | ||
443 | } | ||
444 | else { | ||
445 | q->u.in.polling = 1; | ||
446 | set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); | ||
447 | } | ||
411 | 448 | ||
412 | sprintf(dbf_text, "%x-%x-%x", q->first_to_check, | 449 | q->last_move_ftc = new; |
413 | q->sbal[q->first_to_check]->element[14].flags, | 450 | count--; |
414 | q->sbal[q->first_to_check]->element[15].flags); | 451 | if (!count) |
415 | QDIO_DBF_TEXT3(1, trace, dbf_text); | 452 | return; |
416 | QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); | ||
417 | 453 | ||
418 | q->qdio_error = QDIO_ERROR_SLSB_STATE; | 454 | /* |
455 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
456 | * we're loosing initiative in the thinint code. | ||
457 | */ | ||
458 | set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, | ||
459 | count); | ||
419 | } | 460 | } |
420 | 461 | ||
421 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 462 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
@@ -424,13 +465,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
424 | unsigned char state; | 465 | unsigned char state; |
425 | 466 | ||
426 | /* | 467 | /* |
427 | * If we still poll don't update last_move_ftc, keep the | ||
428 | * previously ACK buffer there. | ||
429 | */ | ||
430 | if (!q->u.in.polling) | ||
431 | q->last_move_ftc = q->first_to_check; | ||
432 | |||
433 | /* | ||
434 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved | 468 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved |
435 | * would return 0. | 469 | * would return 0. |
436 | */ | 470 | */ |
@@ -450,34 +484,13 @@ check_next: | |||
450 | if (q->first_to_check == stop) | 484 | if (q->first_to_check == stop) |
451 | goto out; | 485 | goto out; |
452 | 486 | ||
453 | count = get_buf_states(q, q->first_to_check, &state, count); | 487 | count = get_buf_states(q, q->first_to_check, &state, count, 1); |
454 | if (!count) | 488 | if (!count) |
455 | goto out; | 489 | goto out; |
456 | 490 | ||
457 | switch (state) { | 491 | switch (state) { |
458 | case SLSB_P_INPUT_PRIMED: | 492 | case SLSB_P_INPUT_PRIMED: |
459 | QDIO_DBF_TEXT5(0, trace, "inptprim"); | 493 | inbound_primed(q, count); |
460 | |||
461 | /* | ||
462 | * Only ACK the first buffer. The ACK will be removed in | ||
463 | * qdio_stop_polling. | ||
464 | */ | ||
465 | if (q->u.in.polling) | ||
466 | state = SLSB_P_INPUT_NOT_INIT; | ||
467 | else { | ||
468 | q->u.in.polling = 1; | ||
469 | state = SLSB_P_INPUT_ACK; | ||
470 | } | ||
471 | set_buf_state(q, q->first_to_check, state); | ||
472 | |||
473 | /* | ||
474 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
475 | * we're loosing initiative in the thinint code. | ||
476 | */ | ||
477 | if (count > 1) | ||
478 | set_buf_states(q, next_buf(q->first_to_check), | ||
479 | SLSB_P_INPUT_NOT_INIT, count - 1); | ||
480 | |||
481 | /* | 494 | /* |
482 | * No siga-sync needed for non-qebsm here, as the inbound queue | 495 | * No siga-sync needed for non-qebsm here, as the inbound queue |
483 | * will be synced on the next siga-r, resp. | 496 | * will be synced on the next siga-r, resp. |
@@ -487,7 +500,7 @@ check_next: | |||
487 | atomic_sub(count, &q->nr_buf_used); | 500 | atomic_sub(count, &q->nr_buf_used); |
488 | goto check_next; | 501 | goto check_next; |
489 | case SLSB_P_INPUT_ERROR: | 502 | case SLSB_P_INPUT_ERROR: |
490 | announce_buffer_error(q); | 503 | announce_buffer_error(q, count); |
491 | /* process the buffer, the upper layer will take care of it */ | 504 | /* process the buffer, the upper layer will take care of it */ |
492 | q->first_to_check = add_buf(q->first_to_check, count); | 505 | q->first_to_check = add_buf(q->first_to_check, count); |
493 | atomic_sub(count, &q->nr_buf_used); | 506 | atomic_sub(count, &q->nr_buf_used); |
@@ -495,13 +508,12 @@ check_next: | |||
495 | case SLSB_CU_INPUT_EMPTY: | 508 | case SLSB_CU_INPUT_EMPTY: |
496 | case SLSB_P_INPUT_NOT_INIT: | 509 | case SLSB_P_INPUT_NOT_INIT: |
497 | case SLSB_P_INPUT_ACK: | 510 | case SLSB_P_INPUT_ACK: |
498 | QDIO_DBF_TEXT5(0, trace, "inpnipro"); | 511 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); |
499 | break; | 512 | break; |
500 | default: | 513 | default: |
501 | BUG(); | 514 | BUG(); |
502 | } | 515 | } |
503 | out: | 516 | out: |
504 | QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); | ||
505 | return q->first_to_check; | 517 | return q->first_to_check; |
506 | } | 518 | } |
507 | 519 | ||
@@ -515,8 +527,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
515 | if (!need_siga_sync(q) && !pci_out_supported(q)) | 527 | if (!need_siga_sync(q) && !pci_out_supported(q)) |
516 | q->u.in.timestamp = get_usecs(); | 528 | q->u.in.timestamp = get_usecs(); |
517 | 529 | ||
518 | QDIO_DBF_TEXT4(0, trace, "inhasmvd"); | 530 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); |
519 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
520 | return 1; | 531 | return 1; |
521 | } else | 532 | } else |
522 | return 0; | 533 | return 0; |
@@ -524,10 +535,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
524 | 535 | ||
525 | static int qdio_inbound_q_done(struct qdio_q *q) | 536 | static int qdio_inbound_q_done(struct qdio_q *q) |
526 | { | 537 | { |
527 | unsigned char state; | 538 | unsigned char state = 0; |
528 | #ifdef CONFIG_QDIO_DEBUG | ||
529 | char dbf_text[15]; | ||
530 | #endif | ||
531 | 539 | ||
532 | if (!atomic_read(&q->nr_buf_used)) | 540 | if (!atomic_read(&q->nr_buf_used)) |
533 | return 1; | 541 | return 1; |
@@ -538,7 +546,7 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
538 | */ | 546 | */ |
539 | qdio_siga_sync_q(q); | 547 | qdio_siga_sync_q(q); |
540 | 548 | ||
541 | get_buf_state(q, q->first_to_check, &state); | 549 | get_buf_state(q, q->first_to_check, &state, 0); |
542 | if (state == SLSB_P_INPUT_PRIMED) | 550 | if (state == SLSB_P_INPUT_PRIMED) |
543 | /* we got something to do */ | 551 | /* we got something to do */ |
544 | return 0; | 552 | return 0; |
@@ -552,20 +560,12 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
552 | * has (probably) not moved (see qdio_inbound_processing). | 560 | * has (probably) not moved (see qdio_inbound_processing). |
553 | */ | 561 | */ |
554 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { | 562 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { |
555 | #ifdef CONFIG_QDIO_DEBUG | 563 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", |
556 | QDIO_DBF_TEXT4(0, trace, "inqisdon"); | 564 | q->first_to_check); |
557 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
558 | sprintf(dbf_text, "pf%02x", q->first_to_check); | ||
559 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
560 | #endif /* CONFIG_QDIO_DEBUG */ | ||
561 | return 1; | 565 | return 1; |
562 | } else { | 566 | } else { |
563 | #ifdef CONFIG_QDIO_DEBUG | 567 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", |
564 | QDIO_DBF_TEXT4(0, trace, "inqisntd"); | 568 | q->first_to_check); |
565 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
566 | sprintf(dbf_text, "pf%02x", q->first_to_check); | ||
567 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
568 | #endif /* CONFIG_QDIO_DEBUG */ | ||
569 | return 0; | 569 | return 0; |
570 | } | 570 | } |
571 | } | 571 | } |
@@ -573,9 +573,6 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
573 | void qdio_kick_inbound_handler(struct qdio_q *q) | 573 | void qdio_kick_inbound_handler(struct qdio_q *q) |
574 | { | 574 | { |
575 | int count, start, end; | 575 | int count, start, end; |
576 | #ifdef CONFIG_QDIO_DEBUG | ||
577 | char dbf_text[15]; | ||
578 | #endif | ||
579 | 576 | ||
580 | qdio_perf_stat_inc(&perf_stats.inbound_handler); | 577 | qdio_perf_stat_inc(&perf_stats.inbound_handler); |
581 | 578 | ||
@@ -586,10 +583,7 @@ void qdio_kick_inbound_handler(struct qdio_q *q) | |||
586 | else | 583 | else |
587 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; | 584 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; |
588 | 585 | ||
589 | #ifdef CONFIG_QDIO_DEBUG | 586 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); |
590 | sprintf(dbf_text, "s=%2xc=%2x", start, count); | ||
591 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
592 | #endif /* CONFIG_QDIO_DEBUG */ | ||
593 | 587 | ||
594 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | 588 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
595 | return; | 589 | return; |
@@ -655,14 +649,14 @@ check_next: | |||
655 | if (q->first_to_check == stop) | 649 | if (q->first_to_check == stop) |
656 | return q->first_to_check; | 650 | return q->first_to_check; |
657 | 651 | ||
658 | count = get_buf_states(q, q->first_to_check, &state, count); | 652 | count = get_buf_states(q, q->first_to_check, &state, count, 0); |
659 | if (!count) | 653 | if (!count) |
660 | return q->first_to_check; | 654 | return q->first_to_check; |
661 | 655 | ||
662 | switch (state) { | 656 | switch (state) { |
663 | case SLSB_P_OUTPUT_EMPTY: | 657 | case SLSB_P_OUTPUT_EMPTY: |
664 | /* the adapter got it */ | 658 | /* the adapter got it */ |
665 | QDIO_DBF_TEXT5(0, trace, "outpempt"); | 659 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count); |
666 | 660 | ||
667 | atomic_sub(count, &q->nr_buf_used); | 661 | atomic_sub(count, &q->nr_buf_used); |
668 | q->first_to_check = add_buf(q->first_to_check, count); | 662 | q->first_to_check = add_buf(q->first_to_check, count); |
@@ -674,14 +668,14 @@ check_next: | |||
674 | break; | 668 | break; |
675 | goto check_next; | 669 | goto check_next; |
676 | case SLSB_P_OUTPUT_ERROR: | 670 | case SLSB_P_OUTPUT_ERROR: |
677 | announce_buffer_error(q); | 671 | announce_buffer_error(q, count); |
678 | /* process the buffer, the upper layer will take care of it */ | 672 | /* process the buffer, the upper layer will take care of it */ |
679 | q->first_to_check = add_buf(q->first_to_check, count); | 673 | q->first_to_check = add_buf(q->first_to_check, count); |
680 | atomic_sub(count, &q->nr_buf_used); | 674 | atomic_sub(count, &q->nr_buf_used); |
681 | break; | 675 | break; |
682 | case SLSB_CU_OUTPUT_PRIMED: | 676 | case SLSB_CU_OUTPUT_PRIMED: |
683 | /* the adapter has not fetched the output yet */ | 677 | /* the adapter has not fetched the output yet */ |
684 | QDIO_DBF_TEXT5(0, trace, "outpprim"); | 678 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); |
685 | break; | 679 | break; |
686 | case SLSB_P_OUTPUT_NOT_INIT: | 680 | case SLSB_P_OUTPUT_NOT_INIT: |
687 | case SLSB_P_OUTPUT_HALTED: | 681 | case SLSB_P_OUTPUT_HALTED: |
@@ -706,99 +700,48 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) | |||
706 | 700 | ||
707 | if ((bufnr != q->last_move_ftc) || q->qdio_error) { | 701 | if ((bufnr != q->last_move_ftc) || q->qdio_error) { |
708 | q->last_move_ftc = bufnr; | 702 | q->last_move_ftc = bufnr; |
709 | QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); | 703 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); |
710 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
711 | return 1; | 704 | return 1; |
712 | } else | 705 | } else |
713 | return 0; | 706 | return 0; |
714 | } | 707 | } |
715 | 708 | ||
716 | /* | ||
717 | * VM could present us cc=2 and busy bit set on SIGA-write | ||
718 | * during reconfiguration of their Guest LAN (only in iqdio mode, | ||
719 | * otherwise qdio is asynchronous and cc=2 and busy bit there will take | ||
720 | * the queues down immediately). | ||
721 | * | ||
722 | * Therefore qdio_siga_output will try for a short time constantly, | ||
723 | * if such a condition occurs. If it doesn't change, it will | ||
724 | * increase the busy_siga_counter and save the timestamp, and | ||
725 | * schedule the queue for later processing. qdio_outbound_processing | ||
726 | * will check out the counter. If non-zero, it will call qdio_kick_outbound_q | ||
727 | * as often as the value of the counter. This will attempt further SIGA | ||
728 | * instructions. For each successful SIGA, the counter is | ||
729 | * decreased, for failing SIGAs the counter remains the same, after | ||
730 | * all. After some time of no movement, qdio_kick_outbound_q will | ||
731 | * finally fail and reflect corresponding error codes to call | ||
732 | * the upper layer module and have it take the queues down. | ||
733 | * | ||
734 | * Note that this is a change from the original HiperSockets design | ||
735 | * (saying cc=2 and busy bit means take the queues down), but in | ||
736 | * these days Guest LAN didn't exist... excessive cc=2 with busy bit | ||
737 | * conditions will still take the queues down, but the threshold is | ||
738 | * higher due to the Guest LAN environment. | ||
739 | * | ||
740 | * Called from outbound tasklet and do_QDIO handler. | ||
741 | */ | ||
742 | static void qdio_kick_outbound_q(struct qdio_q *q) | 709 | static void qdio_kick_outbound_q(struct qdio_q *q) |
743 | { | 710 | { |
744 | int rc; | 711 | unsigned int busy_bit; |
745 | #ifdef CONFIG_QDIO_DEBUG | 712 | int cc; |
746 | char dbf_text[15]; | ||
747 | |||
748 | QDIO_DBF_TEXT5(0, trace, "kickoutq"); | ||
749 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | ||
750 | #endif /* CONFIG_QDIO_DEBUG */ | ||
751 | 713 | ||
752 | if (!need_siga_out(q)) | 714 | if (!need_siga_out(q)) |
753 | return; | 715 | return; |
754 | 716 | ||
755 | rc = qdio_siga_output(q); | 717 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); |
756 | switch (rc) { | 718 | qdio_perf_stat_inc(&perf_stats.siga_out); |
719 | |||
720 | cc = qdio_siga_output(q, &busy_bit); | ||
721 | switch (cc) { | ||
757 | case 0: | 722 | case 0: |
758 | /* TODO: improve error handling for CC=0 case */ | ||
759 | #ifdef CONFIG_QDIO_DEBUG | ||
760 | if (q->u.out.timestamp) { | ||
761 | QDIO_DBF_TEXT3(0, trace, "cc2reslv"); | ||
762 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, | ||
763 | q->nr, | ||
764 | atomic_read(&q->u.out.busy_siga_counter)); | ||
765 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
766 | } | ||
767 | #endif /* CONFIG_QDIO_DEBUG */ | ||
768 | /* went smooth this time, reset timestamp */ | ||
769 | q->u.out.timestamp = 0; | ||
770 | break; | 723 | break; |
771 | /* cc=2 and busy bit */ | 724 | case 2: |
772 | case (2 | QDIO_ERROR_SIGA_BUSY): | 725 | if (busy_bit) { |
773 | atomic_inc(&q->u.out.busy_siga_counter); | 726 | DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); |
774 | 727 | q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; | |
775 | /* if the last siga was successful, save timestamp here */ | 728 | } else { |
776 | if (!q->u.out.timestamp) | 729 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", |
777 | q->u.out.timestamp = get_usecs(); | 730 | q->nr); |
778 | 731 | q->qdio_error = cc; | |
779 | /* if we're in time, don't touch qdio_error */ | ||
780 | if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { | ||
781 | tasklet_schedule(&q->tasklet); | ||
782 | break; | ||
783 | } | 732 | } |
784 | QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); | 733 | break; |
785 | #ifdef CONFIG_QDIO_DEBUG | 734 | case 1: |
786 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, | 735 | case 3: |
787 | atomic_read(&q->u.out.busy_siga_counter)); | 736 | DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); |
788 | QDIO_DBF_TEXT3(0, trace, dbf_text); | 737 | q->qdio_error = cc; |
789 | #endif /* CONFIG_QDIO_DEBUG */ | 738 | break; |
790 | default: | ||
791 | /* for plain cc=1, 2 or 3 */ | ||
792 | q->qdio_error = rc; | ||
793 | } | 739 | } |
794 | } | 740 | } |
795 | 741 | ||
796 | static void qdio_kick_outbound_handler(struct qdio_q *q) | 742 | static void qdio_kick_outbound_handler(struct qdio_q *q) |
797 | { | 743 | { |
798 | int start, end, count; | 744 | int start, end, count; |
799 | #ifdef CONFIG_QDIO_DEBUG | ||
800 | char dbf_text[15]; | ||
801 | #endif | ||
802 | 745 | ||
803 | start = q->first_to_kick; | 746 | start = q->first_to_kick; |
804 | end = q->last_move_ftc; | 747 | end = q->last_move_ftc; |
@@ -807,13 +750,8 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) | |||
807 | else | 750 | else |
808 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; | 751 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; |
809 | 752 | ||
810 | #ifdef CONFIG_QDIO_DEBUG | 753 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr); |
811 | QDIO_DBF_TEXT4(0, trace, "kickouth"); | 754 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); |
812 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
813 | |||
814 | sprintf(dbf_text, "s=%2xc=%2x", start, count); | ||
815 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
816 | #endif /* CONFIG_QDIO_DEBUG */ | ||
817 | 755 | ||
818 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | 756 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
819 | return; | 757 | return; |
@@ -828,22 +766,18 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) | |||
828 | 766 | ||
829 | static void __qdio_outbound_processing(struct qdio_q *q) | 767 | static void __qdio_outbound_processing(struct qdio_q *q) |
830 | { | 768 | { |
831 | int siga_attempts; | 769 | unsigned long flags; |
832 | 770 | ||
833 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); | 771 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); |
834 | 772 | spin_lock_irqsave(&q->lock, flags); | |
835 | /* see comment in qdio_kick_outbound_q */ | ||
836 | siga_attempts = atomic_read(&q->u.out.busy_siga_counter); | ||
837 | while (siga_attempts--) { | ||
838 | atomic_dec(&q->u.out.busy_siga_counter); | ||
839 | qdio_kick_outbound_q(q); | ||
840 | } | ||
841 | 773 | ||
842 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); | 774 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); |
843 | 775 | ||
844 | if (qdio_outbound_q_moved(q)) | 776 | if (qdio_outbound_q_moved(q)) |
845 | qdio_kick_outbound_handler(q); | 777 | qdio_kick_outbound_handler(q); |
846 | 778 | ||
779 | spin_unlock_irqrestore(&q->lock, flags); | ||
780 | |||
847 | if (queue_type(q) == QDIO_ZFCP_QFMT) { | 781 | if (queue_type(q) == QDIO_ZFCP_QFMT) { |
848 | if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) | 782 | if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) |
849 | tasklet_schedule(&q->tasklet); | 783 | tasklet_schedule(&q->tasklet); |
@@ -908,27 +842,18 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
908 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, | 842 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, |
909 | enum qdio_irq_states state) | 843 | enum qdio_irq_states state) |
910 | { | 844 | { |
911 | #ifdef CONFIG_QDIO_DEBUG | 845 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); |
912 | char dbf_text[15]; | ||
913 | |||
914 | QDIO_DBF_TEXT5(0, trace, "newstate"); | ||
915 | sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); | ||
916 | QDIO_DBF_TEXT5(0, trace, dbf_text); | ||
917 | #endif /* CONFIG_QDIO_DEBUG */ | ||
918 | 846 | ||
919 | irq_ptr->state = state; | 847 | irq_ptr->state = state; |
920 | mb(); | 848 | mb(); |
921 | } | 849 | } |
922 | 850 | ||
923 | static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | 851 | static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) |
924 | { | 852 | { |
925 | char dbf_text[15]; | ||
926 | |||
927 | if (irb->esw.esw0.erw.cons) { | 853 | if (irb->esw.esw0.erw.cons) { |
928 | sprintf(dbf_text, "sens%4x", schid.sch_no); | 854 | DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); |
929 | QDIO_DBF_TEXT2(1, trace, dbf_text); | 855 | DBF_ERROR_HEX(irb, 64); |
930 | QDIO_DBF_HEX0(0, trace, irb, 64); | 856 | DBF_ERROR_HEX(irb->ecw, 64); |
931 | QDIO_DBF_HEX0(0, trace, irb->ecw, 64); | ||
932 | } | 857 | } |
933 | } | 858 | } |
934 | 859 | ||
@@ -962,14 +887,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, | |||
962 | { | 887 | { |
963 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 888 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
964 | struct qdio_q *q; | 889 | struct qdio_q *q; |
965 | char dbf_text[15]; | ||
966 | 890 | ||
967 | QDIO_DBF_TEXT2(1, trace, "ick2"); | 891 | DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); |
968 | sprintf(dbf_text, "%s", dev_name(&cdev->dev)); | 892 | DBF_ERROR("intp :%lx", intparm); |
969 | QDIO_DBF_TEXT2(1, trace, dbf_text); | 893 | DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); |
970 | QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); | ||
971 | QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); | ||
972 | QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); | ||
973 | 894 | ||
974 | if (irq_ptr->nr_input_qs) { | 895 | if (irq_ptr->nr_input_qs) { |
975 | q = irq_ptr->input_qs[0]; | 896 | q = irq_ptr->input_qs[0]; |
@@ -1022,28 +943,29 @@ static void qdio_int_error(struct ccw_device *cdev) | |||
1022 | } | 943 | } |
1023 | 944 | ||
1024 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, | 945 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, |
1025 | int dstat) | 946 | int dstat) |
1026 | { | 947 | { |
1027 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 948 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1028 | 949 | ||
1029 | if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { | 950 | if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { |
1030 | QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); | 951 | DBF_ERROR("EQ:ck con"); |
1031 | goto error; | 952 | goto error; |
1032 | } | 953 | } |
1033 | 954 | ||
1034 | if (!(dstat & DEV_STAT_DEV_END)) { | 955 | if (!(dstat & DEV_STAT_DEV_END)) { |
1035 | QDIO_DBF_TEXT2(1, setup, "eq:no de"); | 956 | DBF_ERROR("EQ:no dev"); |
1036 | goto error; | 957 | goto error; |
1037 | } | 958 | } |
1038 | 959 | ||
1039 | if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { | 960 | if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { |
1040 | QDIO_DBF_TEXT2(1, setup, "eq:badio"); | 961 | DBF_ERROR("EQ: bad io"); |
1041 | goto error; | 962 | goto error; |
1042 | } | 963 | } |
1043 | return 0; | 964 | return 0; |
1044 | error: | 965 | error: |
1045 | QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); | 966 | DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); |
1046 | QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); | 967 | DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); |
968 | |||
1047 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | 969 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); |
1048 | return 1; | 970 | return 1; |
1049 | } | 971 | } |
@@ -1052,12 +974,8 @@ static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, | |||
1052 | int dstat) | 974 | int dstat) |
1053 | { | 975 | { |
1054 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 976 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1055 | char dbf_text[15]; | ||
1056 | |||
1057 | sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); | ||
1058 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1059 | QDIO_DBF_TEXT0(0, trace, dbf_text); | ||
1060 | 977 | ||
978 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); | ||
1061 | if (!qdio_establish_check_errors(cdev, cstat, dstat)) | 979 | if (!qdio_establish_check_errors(cdev, cstat, dstat)) |
1062 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); | 980 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); |
1063 | } | 981 | } |
@@ -1068,25 +986,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1068 | { | 986 | { |
1069 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 987 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1070 | int cstat, dstat; | 988 | int cstat, dstat; |
1071 | char dbf_text[15]; | ||
1072 | 989 | ||
1073 | qdio_perf_stat_inc(&perf_stats.qdio_int); | 990 | qdio_perf_stat_inc(&perf_stats.qdio_int); |
1074 | 991 | ||
1075 | if (!intparm || !irq_ptr) { | 992 | if (!intparm || !irq_ptr) { |
1076 | sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); | 993 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); |
1077 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1078 | return; | 994 | return; |
1079 | } | 995 | } |
1080 | 996 | ||
1081 | if (IS_ERR(irb)) { | 997 | if (IS_ERR(irb)) { |
1082 | switch (PTR_ERR(irb)) { | 998 | switch (PTR_ERR(irb)) { |
1083 | case -EIO: | 999 | case -EIO: |
1084 | sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); | 1000 | DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); |
1085 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1086 | return; | 1001 | return; |
1087 | case -ETIMEDOUT: | 1002 | case -ETIMEDOUT: |
1088 | sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); | 1003 | DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no); |
1089 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1090 | qdio_int_error(cdev); | 1004 | qdio_int_error(cdev); |
1091 | return; | 1005 | return; |
1092 | default: | 1006 | default: |
@@ -1094,7 +1008,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1094 | return; | 1008 | return; |
1095 | } | 1009 | } |
1096 | } | 1010 | } |
1097 | qdio_irq_check_sense(irq_ptr->schid, irb); | 1011 | qdio_irq_check_sense(irq_ptr, irb); |
1098 | 1012 | ||
1099 | cstat = irb->scsw.cmd.cstat; | 1013 | cstat = irb->scsw.cmd.cstat; |
1100 | dstat = irb->scsw.cmd.dstat; | 1014 | dstat = irb->scsw.cmd.dstat; |
@@ -1129,23 +1043,20 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1129 | /** | 1043 | /** |
1130 | * qdio_get_ssqd_desc - get qdio subchannel description | 1044 | * qdio_get_ssqd_desc - get qdio subchannel description |
1131 | * @cdev: ccw device to get description for | 1045 | * @cdev: ccw device to get description for |
1046 | * @data: where to store the ssqd | ||
1132 | * | 1047 | * |
1133 | * Returns a pointer to the saved qdio subchannel description, | 1048 | * Returns 0 or an error code. The results of the chsc are stored in the |
1134 | * or NULL for not setup qdio devices. | 1049 | * specified structure. |
1135 | */ | 1050 | */ |
1136 | struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) | 1051 | int qdio_get_ssqd_desc(struct ccw_device *cdev, |
1052 | struct qdio_ssqd_desc *data) | ||
1137 | { | 1053 | { |
1138 | struct qdio_irq *irq_ptr; | ||
1139 | char dbf_text[15]; | ||
1140 | |||
1141 | sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); | ||
1142 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1143 | 1054 | ||
1144 | irq_ptr = cdev->private->qdio_data; | 1055 | if (!cdev || !cdev->private) |
1145 | if (!irq_ptr) | 1056 | return -EINVAL; |
1146 | return NULL; | ||
1147 | 1057 | ||
1148 | return &irq_ptr->ssqd_desc; | 1058 | DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); |
1059 | return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); | ||
1149 | } | 1060 | } |
1150 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | 1061 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); |
1151 | 1062 | ||
@@ -1159,14 +1070,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | |||
1159 | */ | 1070 | */ |
1160 | int qdio_cleanup(struct ccw_device *cdev, int how) | 1071 | int qdio_cleanup(struct ccw_device *cdev, int how) |
1161 | { | 1072 | { |
1162 | struct qdio_irq *irq_ptr; | 1073 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1163 | char dbf_text[15]; | ||
1164 | int rc; | 1074 | int rc; |
1165 | 1075 | ||
1166 | sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); | ||
1167 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1168 | |||
1169 | irq_ptr = cdev->private->qdio_data; | ||
1170 | if (!irq_ptr) | 1076 | if (!irq_ptr) |
1171 | return -ENODEV; | 1077 | return -ENODEV; |
1172 | 1078 | ||
@@ -1199,18 +1105,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1199 | */ | 1105 | */ |
1200 | int qdio_shutdown(struct ccw_device *cdev, int how) | 1106 | int qdio_shutdown(struct ccw_device *cdev, int how) |
1201 | { | 1107 | { |
1202 | struct qdio_irq *irq_ptr; | 1108 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1203 | int rc; | 1109 | int rc; |
1204 | unsigned long flags; | 1110 | unsigned long flags; |
1205 | char dbf_text[15]; | ||
1206 | 1111 | ||
1207 | sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); | ||
1208 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1209 | |||
1210 | irq_ptr = cdev->private->qdio_data; | ||
1211 | if (!irq_ptr) | 1112 | if (!irq_ptr) |
1212 | return -ENODEV; | 1113 | return -ENODEV; |
1213 | 1114 | ||
1115 | DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); | ||
1116 | |||
1214 | mutex_lock(&irq_ptr->setup_mutex); | 1117 | mutex_lock(&irq_ptr->setup_mutex); |
1215 | /* | 1118 | /* |
1216 | * Subchannel was already shot down. We cannot prevent being called | 1119 | * Subchannel was already shot down. We cannot prevent being called |
@@ -1234,10 +1137,8 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1234 | /* default behaviour is halt */ | 1137 | /* default behaviour is halt */ |
1235 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); | 1138 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); |
1236 | if (rc) { | 1139 | if (rc) { |
1237 | sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); | 1140 | DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); |
1238 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 1141 | DBF_ERROR("rc:%4d", rc); |
1239 | sprintf(dbf_text, "rc=%d", rc); | ||
1240 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1241 | goto no_cleanup; | 1142 | goto no_cleanup; |
1242 | } | 1143 | } |
1243 | 1144 | ||
@@ -1271,17 +1172,18 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); | |||
1271 | */ | 1172 | */ |
1272 | int qdio_free(struct ccw_device *cdev) | 1173 | int qdio_free(struct ccw_device *cdev) |
1273 | { | 1174 | { |
1274 | struct qdio_irq *irq_ptr; | 1175 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1275 | char dbf_text[15]; | ||
1276 | |||
1277 | sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); | ||
1278 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1279 | 1176 | ||
1280 | irq_ptr = cdev->private->qdio_data; | ||
1281 | if (!irq_ptr) | 1177 | if (!irq_ptr) |
1282 | return -ENODEV; | 1178 | return -ENODEV; |
1283 | 1179 | ||
1180 | DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); | ||
1284 | mutex_lock(&irq_ptr->setup_mutex); | 1181 | mutex_lock(&irq_ptr->setup_mutex); |
1182 | |||
1183 | if (irq_ptr->debug_area != NULL) { | ||
1184 | debug_unregister(irq_ptr->debug_area); | ||
1185 | irq_ptr->debug_area = NULL; | ||
1186 | } | ||
1285 | cdev->private->qdio_data = NULL; | 1187 | cdev->private->qdio_data = NULL; |
1286 | mutex_unlock(&irq_ptr->setup_mutex); | 1188 | mutex_unlock(&irq_ptr->setup_mutex); |
1287 | 1189 | ||
@@ -1300,10 +1202,6 @@ EXPORT_SYMBOL_GPL(qdio_free); | |||
1300 | int qdio_initialize(struct qdio_initialize *init_data) | 1202 | int qdio_initialize(struct qdio_initialize *init_data) |
1301 | { | 1203 | { |
1302 | int rc; | 1204 | int rc; |
1303 | char dbf_text[15]; | ||
1304 | |||
1305 | sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); | ||
1306 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1307 | 1205 | ||
1308 | rc = qdio_allocate(init_data); | 1206 | rc = qdio_allocate(init_data); |
1309 | if (rc) | 1207 | if (rc) |
@@ -1323,10 +1221,8 @@ EXPORT_SYMBOL_GPL(qdio_initialize); | |||
1323 | int qdio_allocate(struct qdio_initialize *init_data) | 1221 | int qdio_allocate(struct qdio_initialize *init_data) |
1324 | { | 1222 | { |
1325 | struct qdio_irq *irq_ptr; | 1223 | struct qdio_irq *irq_ptr; |
1326 | char dbf_text[15]; | ||
1327 | 1224 | ||
1328 | sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); | 1225 | DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); |
1329 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1330 | 1226 | ||
1331 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1227 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1332 | (init_data->no_output_qs && !init_data->output_handler)) | 1228 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1340,16 +1236,13 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1340 | (!init_data->output_sbal_addr_array)) | 1236 | (!init_data->output_sbal_addr_array)) |
1341 | return -EINVAL; | 1237 | return -EINVAL; |
1342 | 1238 | ||
1343 | qdio_allocate_do_dbf(init_data); | ||
1344 | |||
1345 | /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ | 1239 | /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ |
1346 | irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1240 | irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1347 | if (!irq_ptr) | 1241 | if (!irq_ptr) |
1348 | goto out_err; | 1242 | goto out_err; |
1349 | QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); | ||
1350 | QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); | ||
1351 | 1243 | ||
1352 | mutex_init(&irq_ptr->setup_mutex); | 1244 | mutex_init(&irq_ptr->setup_mutex); |
1245 | qdio_allocate_dbf(init_data, irq_ptr); | ||
1353 | 1246 | ||
1354 | /* | 1247 | /* |
1355 | * Allocate a page for the chsc calls in qdio_establish. | 1248 | * Allocate a page for the chsc calls in qdio_establish. |
@@ -1367,9 +1260,6 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1367 | goto out_rel; | 1260 | goto out_rel; |
1368 | WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); | 1261 | WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); |
1369 | 1262 | ||
1370 | QDIO_DBF_TEXT0(0, setup, "qdr:"); | ||
1371 | QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); | ||
1372 | |||
1373 | if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, | 1263 | if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, |
1374 | init_data->no_output_qs)) | 1264 | init_data->no_output_qs)) |
1375 | goto out_rel; | 1265 | goto out_rel; |
@@ -1390,14 +1280,12 @@ EXPORT_SYMBOL_GPL(qdio_allocate); | |||
1390 | */ | 1280 | */ |
1391 | int qdio_establish(struct qdio_initialize *init_data) | 1281 | int qdio_establish(struct qdio_initialize *init_data) |
1392 | { | 1282 | { |
1393 | char dbf_text[20]; | ||
1394 | struct qdio_irq *irq_ptr; | 1283 | struct qdio_irq *irq_ptr; |
1395 | struct ccw_device *cdev = init_data->cdev; | 1284 | struct ccw_device *cdev = init_data->cdev; |
1396 | unsigned long saveflags; | 1285 | unsigned long saveflags; |
1397 | int rc; | 1286 | int rc; |
1398 | 1287 | ||
1399 | sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); | 1288 | DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); |
1400 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1401 | 1289 | ||
1402 | irq_ptr = cdev->private->qdio_data; | 1290 | irq_ptr = cdev->private->qdio_data; |
1403 | if (!irq_ptr) | 1291 | if (!irq_ptr) |
@@ -1427,10 +1315,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1427 | 1315 | ||
1428 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); | 1316 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); |
1429 | if (rc) { | 1317 | if (rc) { |
1430 | sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); | 1318 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); |
1431 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1319 | DBF_ERROR("rc:%4x", rc); |
1432 | sprintf(dbf_text, "eq:rc%4x", rc); | ||
1433 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1434 | } | 1320 | } |
1435 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | 1321 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); |
1436 | 1322 | ||
@@ -1451,10 +1337,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1451 | } | 1337 | } |
1452 | 1338 | ||
1453 | qdio_setup_ssqd_info(irq_ptr); | 1339 | qdio_setup_ssqd_info(irq_ptr); |
1454 | sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc); | 1340 | DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); |
1455 | QDIO_DBF_TEXT2(0, setup, dbf_text); | 1341 | DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); |
1456 | sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); | ||
1457 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
1458 | 1342 | ||
1459 | /* qebsm is now setup if available, initialize buffer states */ | 1343 | /* qebsm is now setup if available, initialize buffer states */ |
1460 | qdio_init_buf_states(irq_ptr); | 1344 | qdio_init_buf_states(irq_ptr); |
@@ -1475,10 +1359,8 @@ int qdio_activate(struct ccw_device *cdev) | |||
1475 | struct qdio_irq *irq_ptr; | 1359 | struct qdio_irq *irq_ptr; |
1476 | int rc; | 1360 | int rc; |
1477 | unsigned long saveflags; | 1361 | unsigned long saveflags; |
1478 | char dbf_text[20]; | ||
1479 | 1362 | ||
1480 | sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); | 1363 | DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); |
1481 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1482 | 1364 | ||
1483 | irq_ptr = cdev->private->qdio_data; | 1365 | irq_ptr = cdev->private->qdio_data; |
1484 | if (!irq_ptr) | 1366 | if (!irq_ptr) |
@@ -1504,10 +1386,8 @@ int qdio_activate(struct ccw_device *cdev) | |||
1504 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, | 1386 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, |
1505 | 0, DOIO_DENY_PREFETCH); | 1387 | 0, DOIO_DENY_PREFETCH); |
1506 | if (rc) { | 1388 | if (rc) { |
1507 | sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); | 1389 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); |
1508 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1390 | DBF_ERROR("rc:%4x", rc); |
1509 | sprintf(dbf_text, "aq:rc%4x", rc); | ||
1510 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1511 | } | 1391 | } |
1512 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | 1392 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); |
1513 | 1393 | ||
@@ -1565,23 +1445,38 @@ static inline int buf_in_between(int bufnr, int start, int count) | |||
1565 | static void handle_inbound(struct qdio_q *q, unsigned int callflags, | 1445 | static void handle_inbound(struct qdio_q *q, unsigned int callflags, |
1566 | int bufnr, int count) | 1446 | int bufnr, int count) |
1567 | { | 1447 | { |
1568 | unsigned long flags; | 1448 | int used, cc, diff; |
1569 | int used, rc; | ||
1570 | 1449 | ||
1571 | /* | 1450 | if (!q->u.in.polling) |
1572 | * do_QDIO could run in parallel with the queue tasklet so the | 1451 | goto set; |
1573 | * upper-layer programm could empty the ACK'ed buffer here. | 1452 | |
1574 | * If that happens we must clear the polling flag, otherwise | 1453 | /* protect against stop polling setting an ACK for an emptied slsb */ |
1575 | * qdio_stop_polling() could set the buffer to NOT_INIT after | 1454 | if (count == QDIO_MAX_BUFFERS_PER_Q) { |
1576 | * it was set to EMPTY which would kill us. | 1455 | /* overwriting everything, just delete polling status */ |
1577 | */ | 1456 | q->u.in.polling = 0; |
1578 | spin_lock_irqsave(&q->u.in.lock, flags); | 1457 | q->u.in.ack_count = 0; |
1579 | if (q->u.in.polling) | 1458 | goto set; |
1580 | if (buf_in_between(q->last_move_ftc, bufnr, count)) | 1459 | } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { |
1460 | if (is_qebsm(q)) { | ||
1461 | /* partial overwrite, just update last_move_ftc */ | ||
1462 | diff = add_buf(bufnr, count); | ||
1463 | diff = sub_buf(diff, q->last_move_ftc); | ||
1464 | q->u.in.ack_count -= diff; | ||
1465 | if (q->u.in.ack_count <= 0) { | ||
1466 | q->u.in.polling = 0; | ||
1467 | q->u.in.ack_count = 0; | ||
1468 | /* TODO: must we set last_move_ftc to something meaningful? */ | ||
1469 | goto set; | ||
1470 | } | ||
1471 | q->last_move_ftc = add_buf(q->last_move_ftc, diff); | ||
1472 | } | ||
1473 | else | ||
1474 | /* the only ACK will be deleted, so stop polling */ | ||
1581 | q->u.in.polling = 0; | 1475 | q->u.in.polling = 0; |
1476 | } | ||
1582 | 1477 | ||
1478 | set: | ||
1583 | count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); | 1479 | count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); |
1584 | spin_unlock_irqrestore(&q->u.in.lock, flags); | ||
1585 | 1480 | ||
1586 | used = atomic_add_return(count, &q->nr_buf_used) - count; | 1481 | used = atomic_add_return(count, &q->nr_buf_used) - count; |
1587 | BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); | 1482 | BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); |
@@ -1591,9 +1486,9 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags, | |||
1591 | return; | 1486 | return; |
1592 | 1487 | ||
1593 | if (need_siga_in(q)) { | 1488 | if (need_siga_in(q)) { |
1594 | rc = qdio_siga_input(q); | 1489 | cc = qdio_siga_input(q); |
1595 | if (rc) | 1490 | if (cc) |
1596 | q->qdio_error = rc; | 1491 | q->qdio_error = cc; |
1597 | } | 1492 | } |
1598 | } | 1493 | } |
1599 | 1494 | ||
@@ -1640,6 +1535,10 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1640 | while (count--) | 1535 | while (count--) |
1641 | qdio_kick_outbound_q(q); | 1536 | qdio_kick_outbound_q(q); |
1642 | } | 1537 | } |
1538 | |||
1539 | /* report CC=2 conditions synchronously */ | ||
1540 | if (q->qdio_error) | ||
1541 | __qdio_outbound_processing(q); | ||
1643 | goto out; | 1542 | goto out; |
1644 | } | 1543 | } |
1645 | 1544 | ||
@@ -1649,11 +1548,11 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1649 | } | 1548 | } |
1650 | 1549 | ||
1651 | /* try to fast requeue buffers */ | 1550 | /* try to fast requeue buffers */ |
1652 | get_buf_state(q, prev_buf(bufnr), &state); | 1551 | get_buf_state(q, prev_buf(bufnr), &state, 0); |
1653 | if (state != SLSB_CU_OUTPUT_PRIMED) | 1552 | if (state != SLSB_CU_OUTPUT_PRIMED) |
1654 | qdio_kick_outbound_q(q); | 1553 | qdio_kick_outbound_q(q); |
1655 | else { | 1554 | else { |
1656 | QDIO_DBF_TEXT5(0, trace, "fast-req"); | 1555 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); |
1657 | qdio_perf_stat_inc(&perf_stats.fast_requeue); | 1556 | qdio_perf_stat_inc(&perf_stats.fast_requeue); |
1658 | } | 1557 | } |
1659 | out: | 1558 | out: |
@@ -1673,12 +1572,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1673 | int q_nr, int bufnr, int count) | 1572 | int q_nr, int bufnr, int count) |
1674 | { | 1573 | { |
1675 | struct qdio_irq *irq_ptr; | 1574 | struct qdio_irq *irq_ptr; |
1676 | #ifdef CONFIG_QDIO_DEBUG | ||
1677 | char dbf_text[20]; | ||
1678 | |||
1679 | sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); | ||
1680 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1681 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1682 | 1575 | ||
1683 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || | 1576 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || |
1684 | (count > QDIO_MAX_BUFFERS_PER_Q) || | 1577 | (count > QDIO_MAX_BUFFERS_PER_Q) || |
@@ -1692,33 +1585,24 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1692 | if (!irq_ptr) | 1585 | if (!irq_ptr) |
1693 | return -ENODEV; | 1586 | return -ENODEV; |
1694 | 1587 | ||
1695 | #ifdef CONFIG_QDIO_DEBUG | ||
1696 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1588 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1697 | QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], | 1589 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input"); |
1698 | sizeof(void *)); | ||
1699 | else | 1590 | else |
1700 | QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], | 1591 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output"); |
1701 | sizeof(void *)); | 1592 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags); |
1702 | 1593 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count); | |
1703 | sprintf(dbf_text, "flag%04x", callflags); | ||
1704 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1705 | sprintf(dbf_text, "qi%02xct%02x", bufnr, count); | ||
1706 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1707 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1708 | 1594 | ||
1709 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) | 1595 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) |
1710 | return -EBUSY; | 1596 | return -EBUSY; |
1711 | 1597 | ||
1712 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1598 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1713 | handle_inbound(irq_ptr->input_qs[q_nr], | 1599 | handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, |
1714 | callflags, bufnr, count); | 1600 | count); |
1715 | else if (callflags & QDIO_FLAG_SYNC_OUTPUT) | 1601 | else if (callflags & QDIO_FLAG_SYNC_OUTPUT) |
1716 | handle_outbound(irq_ptr->output_qs[q_nr], | 1602 | handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, |
1717 | callflags, bufnr, count); | 1603 | count); |
1718 | else { | 1604 | else |
1719 | QDIO_DBF_TEXT3(1, trace, "doQD:inv"); | ||
1720 | return -EINVAL; | 1605 | return -EINVAL; |
1721 | } | ||
1722 | return 0; | 1606 | return 0; |
1723 | } | 1607 | } |
1724 | EXPORT_SYMBOL_GPL(do_QDIO); | 1608 | EXPORT_SYMBOL_GPL(do_QDIO); |
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c index ec5c4a414235..136d0f0b1e93 100644 --- a/drivers/s390/cio/qdio_perf.c +++ b/drivers/s390/cio/qdio_perf.c | |||
@@ -74,12 +74,20 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v) | |||
74 | seq_printf(m, "\n"); | 74 | seq_printf(m, "\n"); |
75 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", | 75 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", |
76 | (long)atomic_long_read(&perf_stats.fast_requeue)); | 76 | (long)atomic_long_read(&perf_stats.fast_requeue)); |
77 | seq_printf(m, "Number of outbound target full condition\t: %li\n", | ||
78 | (long)atomic_long_read(&perf_stats.outbound_target_full)); | ||
77 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", | 79 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", |
78 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); | 80 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); |
79 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", | 81 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", |
80 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); | 82 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); |
81 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", | 83 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", |
82 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); | 84 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); |
85 | seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", | ||
86 | (long)atomic_long_read(&perf_stats.debug_eqbs_all), | ||
87 | (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); | ||
88 | seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", | ||
89 | (long)atomic_long_read(&perf_stats.debug_sqbs_all), | ||
90 | (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); | ||
83 | seq_printf(m, "\n"); | 91 | seq_printf(m, "\n"); |
84 | return 0; | 92 | return 0; |
85 | } | 93 | } |
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h index 5c406a8b7387..7821ac4fa517 100644 --- a/drivers/s390/cio/qdio_perf.h +++ b/drivers/s390/cio/qdio_perf.h | |||
@@ -36,10 +36,15 @@ struct qdio_perf_stats { | |||
36 | atomic_long_t inbound_handler; | 36 | atomic_long_t inbound_handler; |
37 | atomic_long_t outbound_handler; | 37 | atomic_long_t outbound_handler; |
38 | atomic_long_t fast_requeue; | 38 | atomic_long_t fast_requeue; |
39 | atomic_long_t outbound_target_full; | ||
39 | 40 | ||
40 | /* for debugging */ | 41 | /* for debugging */ |
41 | atomic_long_t debug_tl_out_timer; | 42 | atomic_long_t debug_tl_out_timer; |
42 | atomic_long_t debug_stop_polling; | 43 | atomic_long_t debug_stop_polling; |
44 | atomic_long_t debug_eqbs_all; | ||
45 | atomic_long_t debug_eqbs_incomplete; | ||
46 | atomic_long_t debug_sqbs_all; | ||
47 | atomic_long_t debug_sqbs_incomplete; | ||
43 | }; | 48 | }; |
44 | 49 | ||
45 | extern struct qdio_perf_stats perf_stats; | 50 | extern struct qdio_perf_stats perf_stats; |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index a0b6b46e7466..c08356b95bf5 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -117,17 +117,16 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, | |||
117 | q->mask = 1 << (31 - i); | 117 | q->mask = 1 << (31 - i); |
118 | q->nr = i; | 118 | q->nr = i; |
119 | q->handler = handler; | 119 | q->handler = handler; |
120 | spin_lock_init(&q->lock); | ||
120 | } | 121 | } |
121 | 122 | ||
122 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | 123 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, |
123 | void **sbals_array, char *dbf_text, int i) | 124 | void **sbals_array, int i) |
124 | { | 125 | { |
125 | struct qdio_q *prev; | 126 | struct qdio_q *prev; |
126 | int j; | 127 | int j; |
127 | 128 | ||
128 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 129 | DBF_HEX(&q, sizeof(void *)); |
129 | QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); | ||
130 | |||
131 | q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); | 130 | q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); |
132 | 131 | ||
133 | /* fill in sbal */ | 132 | /* fill in sbal */ |
@@ -150,31 +149,26 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | |||
150 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) | 149 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) |
151 | q->sl->element[j].sbal = (unsigned long)q->sbal[j]; | 150 | q->sl->element[j].sbal = (unsigned long)q->sbal[j]; |
152 | 151 | ||
153 | QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); | 152 | DBF_EVENT("sl-slsb-sbal"); |
154 | QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); | 153 | DBF_HEX(q->sl, sizeof(void *)); |
155 | QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); | 154 | DBF_HEX(&q->slsb, sizeof(void *)); |
156 | QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); | 155 | DBF_HEX(q->sbal, sizeof(void *)); |
157 | } | 156 | } |
158 | 157 | ||
159 | static void setup_queues(struct qdio_irq *irq_ptr, | 158 | static void setup_queues(struct qdio_irq *irq_ptr, |
160 | struct qdio_initialize *qdio_init) | 159 | struct qdio_initialize *qdio_init) |
161 | { | 160 | { |
162 | char dbf_text[20]; | ||
163 | struct qdio_q *q; | 161 | struct qdio_q *q; |
164 | void **input_sbal_array = qdio_init->input_sbal_addr_array; | 162 | void **input_sbal_array = qdio_init->input_sbal_addr_array; |
165 | void **output_sbal_array = qdio_init->output_sbal_addr_array; | 163 | void **output_sbal_array = qdio_init->output_sbal_addr_array; |
166 | int i; | 164 | int i; |
167 | 165 | ||
168 | sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no); | ||
169 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
170 | |||
171 | for_each_input_queue(irq_ptr, q, i) { | 166 | for_each_input_queue(irq_ptr, q, i) { |
172 | sprintf(dbf_text, "in-q%4x", i); | 167 | DBF_EVENT("in-q:%1d", i); |
173 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); | 168 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); |
174 | 169 | ||
175 | q->is_input_q = 1; | 170 | q->is_input_q = 1; |
176 | spin_lock_init(&q->u.in.lock); | 171 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); |
177 | setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); | ||
178 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 172 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
179 | 173 | ||
180 | if (is_thinint_irq(irq_ptr)) | 174 | if (is_thinint_irq(irq_ptr)) |
@@ -186,12 +180,11 @@ static void setup_queues(struct qdio_irq *irq_ptr, | |||
186 | } | 180 | } |
187 | 181 | ||
188 | for_each_output_queue(irq_ptr, q, i) { | 182 | for_each_output_queue(irq_ptr, q, i) { |
189 | sprintf(dbf_text, "outq%4x", i); | 183 | DBF_EVENT("outq:%1d", i); |
190 | setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); | 184 | setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); |
191 | 185 | ||
192 | q->is_input_q = 0; | 186 | q->is_input_q = 0; |
193 | setup_storage_lists(q, irq_ptr, output_sbal_array, | 187 | setup_storage_lists(q, irq_ptr, output_sbal_array, i); |
194 | dbf_text, i); | ||
195 | output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 188 | output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
196 | 189 | ||
197 | tasklet_init(&q->tasklet, qdio_outbound_processing, | 190 | tasklet_init(&q->tasklet, qdio_outbound_processing, |
@@ -222,8 +215,6 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) | |||
222 | static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, | 215 | static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, |
223 | unsigned char qdioac, unsigned long token) | 216 | unsigned char qdioac, unsigned long token) |
224 | { | 217 | { |
225 | char dbf_text[15]; | ||
226 | |||
227 | if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) | 218 | if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) |
228 | goto no_qebsm; | 219 | goto no_qebsm; |
229 | if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || | 220 | if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || |
@@ -232,33 +223,41 @@ static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, | |||
232 | 223 | ||
233 | irq_ptr->sch_token = token; | 224 | irq_ptr->sch_token = token; |
234 | 225 | ||
235 | QDIO_DBF_TEXT0(0, setup, "V=V:1"); | 226 | DBF_EVENT("V=V:1"); |
236 | sprintf(dbf_text, "%8lx", irq_ptr->sch_token); | 227 | DBF_EVENT("%8lx", irq_ptr->sch_token); |
237 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
238 | return; | 228 | return; |
239 | 229 | ||
240 | no_qebsm: | 230 | no_qebsm: |
241 | irq_ptr->sch_token = 0; | 231 | irq_ptr->sch_token = 0; |
242 | irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; | 232 | irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; |
243 | QDIO_DBF_TEXT0(0, setup, "noV=V"); | 233 | DBF_EVENT("noV=V"); |
244 | } | 234 | } |
245 | 235 | ||
246 | static int __get_ssqd_info(struct qdio_irq *irq_ptr) | 236 | /* |
237 | * If there is a qdio_irq we use the chsc_page and store the information | ||
238 | * in the qdio_irq, otherwise we copy it to the specified structure. | ||
239 | */ | ||
240 | int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, | ||
241 | struct subchannel_id *schid, | ||
242 | struct qdio_ssqd_desc *data) | ||
247 | { | 243 | { |
248 | struct chsc_ssqd_area *ssqd; | 244 | struct chsc_ssqd_area *ssqd; |
249 | int rc; | 245 | int rc; |
250 | 246 | ||
251 | QDIO_DBF_TEXT0(0, setup, "getssqd"); | 247 | DBF_EVENT("getssqd:%4x", schid->sch_no); |
252 | ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; | 248 | if (irq_ptr != NULL) |
249 | ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; | ||
250 | else | ||
251 | ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); | ||
253 | memset(ssqd, 0, PAGE_SIZE); | 252 | memset(ssqd, 0, PAGE_SIZE); |
254 | 253 | ||
255 | ssqd->request = (struct chsc_header) { | 254 | ssqd->request = (struct chsc_header) { |
256 | .length = 0x0010, | 255 | .length = 0x0010, |
257 | .code = 0x0024, | 256 | .code = 0x0024, |
258 | }; | 257 | }; |
259 | ssqd->first_sch = irq_ptr->schid.sch_no; | 258 | ssqd->first_sch = schid->sch_no; |
260 | ssqd->last_sch = irq_ptr->schid.sch_no; | 259 | ssqd->last_sch = schid->sch_no; |
261 | ssqd->ssid = irq_ptr->schid.ssid; | 260 | ssqd->ssid = schid->ssid; |
262 | 261 | ||
263 | if (chsc(ssqd)) | 262 | if (chsc(ssqd)) |
264 | return -EIO; | 263 | return -EIO; |
@@ -268,27 +267,29 @@ static int __get_ssqd_info(struct qdio_irq *irq_ptr) | |||
268 | 267 | ||
269 | if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || | 268 | if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || |
270 | !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || | 269 | !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || |
271 | (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) | 270 | (ssqd->qdio_ssqd.sch != schid->sch_no)) |
272 | return -EINVAL; | 271 | return -EINVAL; |
273 | 272 | ||
274 | memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, | 273 | if (irq_ptr != NULL) |
275 | sizeof(struct qdio_ssqd_desc)); | 274 | memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, |
275 | sizeof(struct qdio_ssqd_desc)); | ||
276 | else { | ||
277 | memcpy(data, &ssqd->qdio_ssqd, | ||
278 | sizeof(struct qdio_ssqd_desc)); | ||
279 | free_page((unsigned long)ssqd); | ||
280 | } | ||
276 | return 0; | 281 | return 0; |
277 | } | 282 | } |
278 | 283 | ||
279 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | 284 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) |
280 | { | 285 | { |
281 | unsigned char qdioac; | 286 | unsigned char qdioac; |
282 | char dbf_text[15]; | ||
283 | int rc; | 287 | int rc; |
284 | 288 | ||
285 | rc = __get_ssqd_info(irq_ptr); | 289 | rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); |
286 | if (rc) { | 290 | if (rc) { |
287 | QDIO_DBF_TEXT2(0, setup, "ssqdasig"); | 291 | DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); |
288 | sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no); | 292 | DBF_ERROR("rc:%x", rc); |
289 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
290 | sprintf(dbf_text, "rc:%d", rc); | ||
291 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
292 | /* all flags set, worst case */ | 293 | /* all flags set, worst case */ |
293 | qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | | 294 | qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | |
294 | AC1_SIGA_SYNC_NEEDED; | 295 | AC1_SIGA_SYNC_NEEDED; |
@@ -297,9 +298,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | |||
297 | 298 | ||
298 | check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); | 299 | check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); |
299 | process_ac_flags(irq_ptr, qdioac); | 300 | process_ac_flags(irq_ptr, qdioac); |
300 | 301 | DBF_EVENT("qdioac:%4x", qdioac); | |
301 | sprintf(dbf_text, "qdioac%2x", qdioac); | ||
302 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
303 | } | 302 | } |
304 | 303 | ||
305 | void qdio_release_memory(struct qdio_irq *irq_ptr) | 304 | void qdio_release_memory(struct qdio_irq *irq_ptr) |
@@ -419,7 +418,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
419 | /* get qdio commands */ | 418 | /* get qdio commands */ |
420 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); | 419 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); |
421 | if (!ciw) { | 420 | if (!ciw) { |
422 | QDIO_DBF_TEXT2(1, setup, "no eq"); | 421 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); |
423 | rc = -EINVAL; | 422 | rc = -EINVAL; |
424 | goto out_err; | 423 | goto out_err; |
425 | } | 424 | } |
@@ -427,7 +426,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
427 | 426 | ||
428 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); | 427 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); |
429 | if (!ciw) { | 428 | if (!ciw) { |
430 | QDIO_DBF_TEXT2(1, setup, "no aq"); | 429 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); |
431 | rc = -EINVAL; | 430 | rc = -EINVAL; |
432 | goto out_err; | 431 | goto out_err; |
433 | } | 432 | } |
@@ -447,56 +446,38 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 446 | { |
448 | char s[80]; | 447 | char s[80]; |
449 | 448 | ||
450 | sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); | 449 | snprintf(s, 80, "qdio: %s %s on SC %x using " |
451 | switch (irq_ptr->qib.qfmt) { | 450 | "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", |
452 | case QDIO_QETH_QFMT: | 451 | dev_name(&cdev->dev), |
453 | sprintf(s + strlen(s), "OSA "); | 452 | (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : |
454 | break; | 453 | ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), |
455 | case QDIO_ZFCP_QFMT: | 454 | irq_ptr->schid.sch_no, |
456 | sprintf(s + strlen(s), "ZFCP "); | 455 | is_thinint_irq(irq_ptr), |
457 | break; | 456 | (irq_ptr->sch_token) ? 1 : 0, |
458 | case QDIO_IQDIO_QFMT: | 457 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, |
459 | sprintf(s + strlen(s), "HS "); | 458 | css_general_characteristics.aif_tdd, |
460 | break; | 459 | (irq_ptr->siga_flag.input) ? "R" : " ", |
461 | } | 460 | (irq_ptr->siga_flag.output) ? "W" : " ", |
462 | sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); | 461 | (irq_ptr->siga_flag.sync) ? "S" : " ", |
463 | sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); | 462 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", |
464 | sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); | 463 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", |
465 | sprintf(s + strlen(s), "PCI:%d ", | 464 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); |
466 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); | ||
467 | sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); | ||
468 | sprintf(s + strlen(s), "SIGA:"); | ||
469 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); | ||
470 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); | ||
471 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); | ||
472 | sprintf(s + strlen(s), "%s", | ||
473 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); | ||
474 | sprintf(s + strlen(s), "%s", | ||
475 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); | ||
476 | sprintf(s + strlen(s), "%s", | ||
477 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); | ||
478 | sprintf(s + strlen(s), "\n"); | ||
479 | printk(KERN_INFO "%s", s); | 465 | printk(KERN_INFO "%s", s); |
480 | } | 466 | } |
481 | 467 | ||
482 | int __init qdio_setup_init(void) | 468 | int __init qdio_setup_init(void) |
483 | { | 469 | { |
484 | char dbf_text[15]; | ||
485 | |||
486 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), | 470 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), |
487 | 256, 0, NULL); | 471 | 256, 0, NULL); |
488 | if (!qdio_q_cache) | 472 | if (!qdio_q_cache) |
489 | return -ENOMEM; | 473 | return -ENOMEM; |
490 | 474 | ||
491 | /* Check for OSA/FCP thin interrupts (bit 67). */ | 475 | /* Check for OSA/FCP thin interrupts (bit 67). */ |
492 | sprintf(dbf_text, "thini%1x", | 476 | DBF_EVENT("thinint:%1d", |
493 | (css_general_characteristics.aif_osa) ? 1 : 0); | 477 | (css_general_characteristics.aif_osa) ? 1 : 0); |
494 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
495 | 478 | ||
496 | /* Check for QEBSM support in general (bit 58). */ | 479 | /* Check for QEBSM support in general (bit 58). */ |
497 | sprintf(dbf_text, "cssQBS:%1x", | 480 | DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); |
498 | (qebsm_possible()) ? 1 : 0); | ||
499 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
500 | return 0; | 481 | return 0; |
501 | } | 482 | } |
502 | 483 | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index ea7f61400267..8e90e147b746 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -125,13 +125,13 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
125 | 125 | ||
126 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) | 126 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) |
127 | { | 127 | { |
128 | unsigned char state; | 128 | unsigned char state = 0; |
129 | 129 | ||
130 | if (!atomic_read(&q->nr_buf_used)) | 130 | if (!atomic_read(&q->nr_buf_used)) |
131 | return 1; | 131 | return 1; |
132 | 132 | ||
133 | qdio_siga_sync_q(q); | 133 | qdio_siga_sync_q(q); |
134 | get_buf_state(q, q->first_to_check, &state); | 134 | get_buf_state(q, q->first_to_check, &state, 0); |
135 | 135 | ||
136 | if (state == SLSB_P_INPUT_PRIMED) | 136 | if (state == SLSB_P_INPUT_PRIMED) |
137 | /* more work coming */ | 137 | /* more work coming */ |
@@ -258,8 +258,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data) | |||
258 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | 258 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) |
259 | { | 259 | { |
260 | struct scssc_area *scssc_area; | 260 | struct scssc_area *scssc_area; |
261 | char dbf_text[15]; | ||
262 | void *ptr; | ||
263 | int rc; | 261 | int rc; |
264 | 262 | ||
265 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; | 263 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; |
@@ -294,19 +292,15 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | |||
294 | 292 | ||
295 | rc = chsc_error_from_response(scssc_area->response.code); | 293 | rc = chsc_error_from_response(scssc_area->response.code); |
296 | if (rc) { | 294 | if (rc) { |
297 | sprintf(dbf_text, "sidR%4x", scssc_area->response.code); | 295 | DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, |
298 | QDIO_DBF_TEXT1(0, trace, dbf_text); | 296 | scssc_area->response.code); |
299 | QDIO_DBF_TEXT1(0, setup, dbf_text); | 297 | DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); |
300 | ptr = &scssc_area->response; | ||
301 | QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); | ||
302 | return rc; | 298 | return rc; |
303 | } | 299 | } |
304 | 300 | ||
305 | QDIO_DBF_TEXT2(0, setup, "setscind"); | 301 | DBF_EVENT("setscind"); |
306 | QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, | 302 | DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); |
307 | sizeof(unsigned long)); | 303 | DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); |
308 | QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, | ||
309 | sizeof(unsigned long)); | ||
310 | return 0; | 304 | return 0; |
311 | } | 305 | } |
312 | 306 | ||
@@ -327,14 +321,11 @@ void tiqdio_free_memory(void) | |||
327 | 321 | ||
328 | int __init tiqdio_register_thinints(void) | 322 | int __init tiqdio_register_thinints(void) |
329 | { | 323 | { |
330 | char dbf_text[20]; | ||
331 | |||
332 | isc_register(QDIO_AIRQ_ISC); | 324 | isc_register(QDIO_AIRQ_ISC); |
333 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, | 325 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, |
334 | NULL, QDIO_AIRQ_ISC); | 326 | NULL, QDIO_AIRQ_ISC); |
335 | if (IS_ERR(tiqdio_alsi)) { | 327 | if (IS_ERR(tiqdio_alsi)) { |
336 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); | 328 | DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); |
337 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
338 | tiqdio_alsi = NULL; | 329 | tiqdio_alsi = NULL; |
339 | isc_unregister(QDIO_AIRQ_ISC); | 330 | isc_unregister(QDIO_AIRQ_ISC); |
340 | return -ENOMEM; | 331 | return -ENOMEM; |
@@ -360,7 +351,7 @@ void qdio_setup_thinint(struct qdio_irq *irq_ptr) | |||
360 | if (!is_thinint_irq(irq_ptr)) | 351 | if (!is_thinint_irq(irq_ptr)) |
361 | return; | 352 | return; |
362 | irq_ptr->dsci = get_indicator(); | 353 | irq_ptr->dsci = get_indicator(); |
363 | QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); | 354 | DBF_HEX(&irq_ptr->dsci, sizeof(void *)); |
364 | } | 355 | } |
365 | 356 | ||
366 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) | 357 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e3fe6838293a..1f5f5d2d87d9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
8 | * Felix Beck <felix.beck@de.ibm.com> | ||
8 | * | 9 | * |
9 | * Adjunct processor bus. | 10 | * Adjunct processor bus. |
10 | * | 11 | * |
@@ -23,6 +24,9 @@ | |||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
24 | */ | 25 | */ |
25 | 26 | ||
27 | #define KMSG_COMPONENT "ap" | ||
28 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
29 | |||
26 | #include <linux/module.h> | 30 | #include <linux/module.h> |
27 | #include <linux/init.h> | 31 | #include <linux/init.h> |
28 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
@@ -34,6 +38,10 @@ | |||
34 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
35 | #include <asm/s390_rdev.h> | 39 | #include <asm/s390_rdev.h> |
36 | #include <asm/reset.h> | 40 | #include <asm/reset.h> |
41 | #include <asm/airq.h> | ||
42 | #include <asm/atomic.h> | ||
43 | #include <asm/system.h> | ||
44 | #include <asm/isc.h> | ||
37 | #include <linux/hrtimer.h> | 45 | #include <linux/hrtimer.h> |
38 | #include <linux/ktime.h> | 46 | #include <linux/ktime.h> |
39 | 47 | ||
@@ -46,6 +54,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); | |||
46 | static int ap_poll_thread_start(void); | 54 | static int ap_poll_thread_start(void); |
47 | static void ap_poll_thread_stop(void); | 55 | static void ap_poll_thread_stop(void); |
48 | static void ap_request_timeout(unsigned long); | 56 | static void ap_request_timeout(unsigned long); |
57 | static inline void ap_schedule_poll_timer(void); | ||
49 | 58 | ||
50 | /* | 59 | /* |
51 | * Module description. | 60 | * Module description. |
@@ -68,7 +77,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000); | |||
68 | MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); | 77 | MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); |
69 | 78 | ||
70 | static struct device *ap_root_device = NULL; | 79 | static struct device *ap_root_device = NULL; |
71 | static DEFINE_SPINLOCK(ap_device_lock); | 80 | static DEFINE_SPINLOCK(ap_device_list_lock); |
72 | static LIST_HEAD(ap_device_list); | 81 | static LIST_HEAD(ap_device_list); |
73 | 82 | ||
74 | /* | 83 | /* |
@@ -80,19 +89,29 @@ static int ap_config_time = AP_CONFIG_TIME; | |||
80 | static DECLARE_WORK(ap_config_work, ap_scan_bus); | 89 | static DECLARE_WORK(ap_config_work, ap_scan_bus); |
81 | 90 | ||
82 | /* | 91 | /* |
83 | * Tasklet & timer for AP request polling. | 92 | * Tasklet & timer for AP request polling and interrupts |
84 | */ | 93 | */ |
85 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); | 94 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); |
86 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); | 95 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); |
87 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); | 96 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); |
88 | static struct task_struct *ap_poll_kthread = NULL; | 97 | static struct task_struct *ap_poll_kthread = NULL; |
89 | static DEFINE_MUTEX(ap_poll_thread_mutex); | 98 | static DEFINE_MUTEX(ap_poll_thread_mutex); |
99 | static void *ap_interrupt_indicator; | ||
90 | static struct hrtimer ap_poll_timer; | 100 | static struct hrtimer ap_poll_timer; |
91 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. | 101 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. |
92 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ | 102 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ |
93 | static unsigned long long poll_timeout = 250000; | 103 | static unsigned long long poll_timeout = 250000; |
94 | 104 | ||
95 | /** | 105 | /** |
106 | * ap_using_interrupts() - Returns non-zero if interrupt support is | ||
107 | * available. | ||
108 | */ | ||
109 | static inline int ap_using_interrupts(void) | ||
110 | { | ||
111 | return ap_interrupt_indicator != NULL; | ||
112 | } | ||
113 | |||
114 | /** | ||
96 | * ap_intructions_available() - Test if AP instructions are available. | 115 | * ap_intructions_available() - Test if AP instructions are available. |
97 | * | 116 | * |
98 | * Returns 0 if the AP instructions are installed. | 117 | * Returns 0 if the AP instructions are installed. |
@@ -113,6 +132,23 @@ static inline int ap_instructions_available(void) | |||
113 | } | 132 | } |
114 | 133 | ||
115 | /** | 134 | /** |
135 | * ap_interrupts_available(): Test if AP interrupts are available. | ||
136 | * | ||
137 | * Returns 1 if AP interrupts are available. | ||
138 | */ | ||
139 | static int ap_interrupts_available(void) | ||
140 | { | ||
141 | unsigned long long facility_bits[2]; | ||
142 | |||
143 | if (stfle(facility_bits, 2) <= 1) | ||
144 | return 0; | ||
145 | if (!(facility_bits[0] & (1ULL << 61)) || | ||
146 | !(facility_bits[1] & (1ULL << 62))) | ||
147 | return 0; | ||
148 | return 1; | ||
149 | } | ||
150 | |||
151 | /** | ||
116 | * ap_test_queue(): Test adjunct processor queue. | 152 | * ap_test_queue(): Test adjunct processor queue. |
117 | * @qid: The AP queue number | 153 | * @qid: The AP queue number |
118 | * @queue_depth: Pointer to queue depth value | 154 | * @queue_depth: Pointer to queue depth value |
@@ -152,6 +188,80 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) | |||
152 | return reg1; | 188 | return reg1; |
153 | } | 189 | } |
154 | 190 | ||
191 | #ifdef CONFIG_64BIT | ||
192 | /** | ||
193 | * ap_queue_interruption_control(): Enable interruption for a specific AP. | ||
194 | * @qid: The AP queue number | ||
195 | * @ind: The notification indicator byte | ||
196 | * | ||
197 | * Returns AP queue status. | ||
198 | */ | ||
199 | static inline struct ap_queue_status | ||
200 | ap_queue_interruption_control(ap_qid_t qid, void *ind) | ||
201 | { | ||
202 | register unsigned long reg0 asm ("0") = qid | 0x03000000UL; | ||
203 | register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; | ||
204 | register struct ap_queue_status reg1_out asm ("1"); | ||
205 | register void *reg2 asm ("2") = ind; | ||
206 | asm volatile( | ||
207 | ".long 0xb2af0000" /* PQAP(RAPQ) */ | ||
208 | : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) | ||
209 | : | ||
210 | : "cc" ); | ||
211 | return reg1_out; | ||
212 | } | ||
213 | #endif | ||
214 | |||
215 | /** | ||
216 | * ap_queue_enable_interruption(): Enable interruption on an AP. | ||
217 | * @qid: The AP queue number | ||
218 | * @ind: the notification indicator byte | ||
219 | * | ||
220 | * Enables interruption on AP queue via ap_queue_interruption_control(). Based | ||
221 | * on the return value it waits a while and tests the AP queue if interrupts | ||
222 | * have been switched on using ap_test_queue(). | ||
223 | */ | ||
224 | static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) | ||
225 | { | ||
226 | #ifdef CONFIG_64BIT | ||
227 | struct ap_queue_status status; | ||
228 | int t_depth, t_device_type, rc, i; | ||
229 | |||
230 | rc = -EBUSY; | ||
231 | status = ap_queue_interruption_control(qid, ind); | ||
232 | |||
233 | for (i = 0; i < AP_MAX_RESET; i++) { | ||
234 | switch (status.response_code) { | ||
235 | case AP_RESPONSE_NORMAL: | ||
236 | if (status.int_enabled) | ||
237 | return 0; | ||
238 | break; | ||
239 | case AP_RESPONSE_RESET_IN_PROGRESS: | ||
240 | case AP_RESPONSE_BUSY: | ||
241 | break; | ||
242 | case AP_RESPONSE_Q_NOT_AVAIL: | ||
243 | case AP_RESPONSE_DECONFIGURED: | ||
244 | case AP_RESPONSE_CHECKSTOPPED: | ||
245 | case AP_RESPONSE_INVALID_ADDRESS: | ||
246 | return -ENODEV; | ||
247 | case AP_RESPONSE_OTHERWISE_CHANGED: | ||
248 | if (status.int_enabled) | ||
249 | return 0; | ||
250 | break; | ||
251 | default: | ||
252 | break; | ||
253 | } | ||
254 | if (i < AP_MAX_RESET - 1) { | ||
255 | udelay(5); | ||
256 | status = ap_test_queue(qid, &t_depth, &t_device_type); | ||
257 | } | ||
258 | } | ||
259 | return rc; | ||
260 | #else | ||
261 | return -EINVAL; | ||
262 | #endif | ||
263 | } | ||
264 | |||
155 | /** | 265 | /** |
156 | * __ap_send(): Send message to adjunct processor queue. | 266 | * __ap_send(): Send message to adjunct processor queue. |
157 | * @qid: The AP queue number | 267 | * @qid: The AP queue number |
@@ -295,6 +405,11 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) | |||
295 | case AP_RESPONSE_CHECKSTOPPED: | 405 | case AP_RESPONSE_CHECKSTOPPED: |
296 | rc = -ENODEV; | 406 | rc = -ENODEV; |
297 | break; | 407 | break; |
408 | case AP_RESPONSE_INVALID_ADDRESS: | ||
409 | rc = -ENODEV; | ||
410 | break; | ||
411 | case AP_RESPONSE_OTHERWISE_CHANGED: | ||
412 | break; | ||
298 | case AP_RESPONSE_BUSY: | 413 | case AP_RESPONSE_BUSY: |
299 | break; | 414 | break; |
300 | default: | 415 | default: |
@@ -345,6 +460,15 @@ static int ap_init_queue(ap_qid_t qid) | |||
345 | status = ap_test_queue(qid, &dummy, &dummy); | 460 | status = ap_test_queue(qid, &dummy, &dummy); |
346 | } | 461 | } |
347 | } | 462 | } |
463 | if (rc == 0 && ap_using_interrupts()) { | ||
464 | rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); | ||
465 | /* If interruption mode is supported by the machine, | ||
466 | * but an AP can not be enabled for interruption then | ||
467 | * the AP will be discarded. */ | ||
468 | if (rc) | ||
469 | pr_err("Registering adapter interrupts for " | ||
470 | "AP %d failed\n", AP_QID_DEVICE(qid)); | ||
471 | } | ||
348 | return rc; | 472 | return rc; |
349 | } | 473 | } |
350 | 474 | ||
@@ -397,16 +521,16 @@ static ssize_t ap_hwtype_show(struct device *dev, | |||
397 | struct ap_device *ap_dev = to_ap_dev(dev); | 521 | struct ap_device *ap_dev = to_ap_dev(dev); |
398 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); | 522 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); |
399 | } | 523 | } |
400 | static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); | ||
401 | 524 | ||
525 | static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); | ||
402 | static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, | 526 | static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, |
403 | char *buf) | 527 | char *buf) |
404 | { | 528 | { |
405 | struct ap_device *ap_dev = to_ap_dev(dev); | 529 | struct ap_device *ap_dev = to_ap_dev(dev); |
406 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); | 530 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); |
407 | } | 531 | } |
408 | static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); | ||
409 | 532 | ||
533 | static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); | ||
410 | static ssize_t ap_request_count_show(struct device *dev, | 534 | static ssize_t ap_request_count_show(struct device *dev, |
411 | struct device_attribute *attr, | 535 | struct device_attribute *attr, |
412 | char *buf) | 536 | char *buf) |
@@ -509,9 +633,9 @@ static int ap_device_probe(struct device *dev) | |||
509 | ap_dev->drv = ap_drv; | 633 | ap_dev->drv = ap_drv; |
510 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 634 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
511 | if (!rc) { | 635 | if (!rc) { |
512 | spin_lock_bh(&ap_device_lock); | 636 | spin_lock_bh(&ap_device_list_lock); |
513 | list_add(&ap_dev->list, &ap_device_list); | 637 | list_add(&ap_dev->list, &ap_device_list); |
514 | spin_unlock_bh(&ap_device_lock); | 638 | spin_unlock_bh(&ap_device_list_lock); |
515 | } | 639 | } |
516 | return rc; | 640 | return rc; |
517 | } | 641 | } |
@@ -553,9 +677,9 @@ static int ap_device_remove(struct device *dev) | |||
553 | 677 | ||
554 | ap_flush_queue(ap_dev); | 678 | ap_flush_queue(ap_dev); |
555 | del_timer_sync(&ap_dev->timeout); | 679 | del_timer_sync(&ap_dev->timeout); |
556 | spin_lock_bh(&ap_device_lock); | 680 | spin_lock_bh(&ap_device_list_lock); |
557 | list_del_init(&ap_dev->list); | 681 | list_del_init(&ap_dev->list); |
558 | spin_unlock_bh(&ap_device_lock); | 682 | spin_unlock_bh(&ap_device_list_lock); |
559 | if (ap_drv->remove) | 683 | if (ap_drv->remove) |
560 | ap_drv->remove(ap_dev); | 684 | ap_drv->remove(ap_dev); |
561 | spin_lock_bh(&ap_dev->lock); | 685 | spin_lock_bh(&ap_dev->lock); |
@@ -599,6 +723,14 @@ static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) | |||
599 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); | 723 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); |
600 | } | 724 | } |
601 | 725 | ||
726 | static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) | ||
727 | { | ||
728 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
729 | ap_using_interrupts() ? 1 : 0); | ||
730 | } | ||
731 | |||
732 | static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); | ||
733 | |||
602 | static ssize_t ap_config_time_store(struct bus_type *bus, | 734 | static ssize_t ap_config_time_store(struct bus_type *bus, |
603 | const char *buf, size_t count) | 735 | const char *buf, size_t count) |
604 | { | 736 | { |
@@ -653,7 +785,8 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, | |||
653 | ktime_t hr_time; | 785 | ktime_t hr_time; |
654 | 786 | ||
655 | /* 120 seconds = maximum poll interval */ | 787 | /* 120 seconds = maximum poll interval */ |
656 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) | 788 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || |
789 | time > 120000000000ULL) | ||
657 | return -EINVAL; | 790 | return -EINVAL; |
658 | poll_timeout = time; | 791 | poll_timeout = time; |
659 | hr_time = ktime_set(0, poll_timeout); | 792 | hr_time = ktime_set(0, poll_timeout); |
@@ -672,6 +805,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { | |||
672 | &bus_attr_ap_domain, | 805 | &bus_attr_ap_domain, |
673 | &bus_attr_config_time, | 806 | &bus_attr_config_time, |
674 | &bus_attr_poll_thread, | 807 | &bus_attr_poll_thread, |
808 | &bus_attr_ap_interrupts, | ||
675 | &bus_attr_poll_timeout, | 809 | &bus_attr_poll_timeout, |
676 | NULL, | 810 | NULL, |
677 | }; | 811 | }; |
@@ -814,6 +948,11 @@ out: | |||
814 | return rc; | 948 | return rc; |
815 | } | 949 | } |
816 | 950 | ||
951 | static void ap_interrupt_handler(void *unused1, void *unused2) | ||
952 | { | ||
953 | tasklet_schedule(&ap_tasklet); | ||
954 | } | ||
955 | |||
817 | /** | 956 | /** |
818 | * __ap_scan_bus(): Scan the AP bus. | 957 | * __ap_scan_bus(): Scan the AP bus. |
819 | * @dev: Pointer to device | 958 | * @dev: Pointer to device |
@@ -928,6 +1067,8 @@ ap_config_timeout(unsigned long ptr) | |||
928 | */ | 1067 | */ |
929 | static inline void ap_schedule_poll_timer(void) | 1068 | static inline void ap_schedule_poll_timer(void) |
930 | { | 1069 | { |
1070 | if (ap_using_interrupts()) | ||
1071 | return; | ||
931 | if (hrtimer_is_queued(&ap_poll_timer)) | 1072 | if (hrtimer_is_queued(&ap_poll_timer)) |
932 | return; | 1073 | return; |
933 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), | 1074 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), |
@@ -1181,7 +1322,7 @@ static void ap_reset(struct ap_device *ap_dev) | |||
1181 | ap_dev->unregistered = 1; | 1322 | ap_dev->unregistered = 1; |
1182 | } | 1323 | } |
1183 | 1324 | ||
1184 | static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) | 1325 | static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) |
1185 | { | 1326 | { |
1186 | spin_lock(&ap_dev->lock); | 1327 | spin_lock(&ap_dev->lock); |
1187 | if (!ap_dev->unregistered) { | 1328 | if (!ap_dev->unregistered) { |
@@ -1207,13 +1348,19 @@ static void ap_poll_all(unsigned long dummy) | |||
1207 | unsigned long flags; | 1348 | unsigned long flags; |
1208 | struct ap_device *ap_dev; | 1349 | struct ap_device *ap_dev; |
1209 | 1350 | ||
1351 | /* Reset the indicator if interrupts are used. Thus new interrupts can | ||
1352 | * be received. Doing it in the beginning of the tasklet is therefor | ||
1353 | * important that no requests on any AP get lost. | ||
1354 | */ | ||
1355 | if (ap_using_interrupts()) | ||
1356 | xchg((u8 *)ap_interrupt_indicator, 0); | ||
1210 | do { | 1357 | do { |
1211 | flags = 0; | 1358 | flags = 0; |
1212 | spin_lock(&ap_device_lock); | 1359 | spin_lock(&ap_device_list_lock); |
1213 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1360 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1214 | __ap_poll_all(ap_dev, &flags); | 1361 | __ap_poll_device(ap_dev, &flags); |
1215 | } | 1362 | } |
1216 | spin_unlock(&ap_device_lock); | 1363 | spin_unlock(&ap_device_list_lock); |
1217 | } while (flags & 1); | 1364 | } while (flags & 1); |
1218 | if (flags & 2) | 1365 | if (flags & 2) |
1219 | ap_schedule_poll_timer(); | 1366 | ap_schedule_poll_timer(); |
@@ -1253,11 +1400,11 @@ static int ap_poll_thread(void *data) | |||
1253 | remove_wait_queue(&ap_poll_wait, &wait); | 1400 | remove_wait_queue(&ap_poll_wait, &wait); |
1254 | 1401 | ||
1255 | flags = 0; | 1402 | flags = 0; |
1256 | spin_lock_bh(&ap_device_lock); | 1403 | spin_lock_bh(&ap_device_list_lock); |
1257 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1404 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1258 | __ap_poll_all(ap_dev, &flags); | 1405 | __ap_poll_device(ap_dev, &flags); |
1259 | } | 1406 | } |
1260 | spin_unlock_bh(&ap_device_lock); | 1407 | spin_unlock_bh(&ap_device_list_lock); |
1261 | } | 1408 | } |
1262 | set_current_state(TASK_RUNNING); | 1409 | set_current_state(TASK_RUNNING); |
1263 | remove_wait_queue(&ap_poll_wait, &wait); | 1410 | remove_wait_queue(&ap_poll_wait, &wait); |
@@ -1268,6 +1415,8 @@ static int ap_poll_thread_start(void) | |||
1268 | { | 1415 | { |
1269 | int rc; | 1416 | int rc; |
1270 | 1417 | ||
1418 | if (ap_using_interrupts()) | ||
1419 | return 0; | ||
1271 | mutex_lock(&ap_poll_thread_mutex); | 1420 | mutex_lock(&ap_poll_thread_mutex); |
1272 | if (!ap_poll_kthread) { | 1421 | if (!ap_poll_kthread) { |
1273 | ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); | 1422 | ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); |
@@ -1301,8 +1450,12 @@ static void ap_request_timeout(unsigned long data) | |||
1301 | { | 1450 | { |
1302 | struct ap_device *ap_dev = (struct ap_device *) data; | 1451 | struct ap_device *ap_dev = (struct ap_device *) data; |
1303 | 1452 | ||
1304 | if (ap_dev->reset == AP_RESET_ARMED) | 1453 | if (ap_dev->reset == AP_RESET_ARMED) { |
1305 | ap_dev->reset = AP_RESET_DO; | 1454 | ap_dev->reset = AP_RESET_DO; |
1455 | |||
1456 | if (ap_using_interrupts()) | ||
1457 | tasklet_schedule(&ap_tasklet); | ||
1458 | } | ||
1306 | } | 1459 | } |
1307 | 1460 | ||
1308 | static void ap_reset_domain(void) | 1461 | static void ap_reset_domain(void) |
@@ -1337,14 +1490,25 @@ int __init ap_module_init(void) | |||
1337 | int rc, i; | 1490 | int rc, i; |
1338 | 1491 | ||
1339 | if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { | 1492 | if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { |
1340 | printk(KERN_WARNING "Invalid param: domain = %d. " | 1493 | pr_warning("%d is not a valid cryptographic domain\n", |
1341 | " Not loading.\n", ap_domain_index); | 1494 | ap_domain_index); |
1342 | return -EINVAL; | 1495 | return -EINVAL; |
1343 | } | 1496 | } |
1344 | if (ap_instructions_available() != 0) { | 1497 | if (ap_instructions_available() != 0) { |
1345 | printk(KERN_WARNING "AP instructions not installed.\n"); | 1498 | pr_warning("The hardware system does not support " |
1499 | "AP instructions\n"); | ||
1346 | return -ENODEV; | 1500 | return -ENODEV; |
1347 | } | 1501 | } |
1502 | if (ap_interrupts_available()) { | ||
1503 | isc_register(AP_ISC); | ||
1504 | ap_interrupt_indicator = s390_register_adapter_interrupt( | ||
1505 | &ap_interrupt_handler, NULL, AP_ISC); | ||
1506 | if (IS_ERR(ap_interrupt_indicator)) { | ||
1507 | ap_interrupt_indicator = NULL; | ||
1508 | isc_unregister(AP_ISC); | ||
1509 | } | ||
1510 | } | ||
1511 | |||
1348 | register_reset_call(&ap_reset_call); | 1512 | register_reset_call(&ap_reset_call); |
1349 | 1513 | ||
1350 | /* Create /sys/bus/ap. */ | 1514 | /* Create /sys/bus/ap. */ |
@@ -1408,6 +1572,10 @@ out_bus: | |||
1408 | bus_unregister(&ap_bus_type); | 1572 | bus_unregister(&ap_bus_type); |
1409 | out: | 1573 | out: |
1410 | unregister_reset_call(&ap_reset_call); | 1574 | unregister_reset_call(&ap_reset_call); |
1575 | if (ap_using_interrupts()) { | ||
1576 | s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); | ||
1577 | isc_unregister(AP_ISC); | ||
1578 | } | ||
1411 | return rc; | 1579 | return rc; |
1412 | } | 1580 | } |
1413 | 1581 | ||
@@ -1443,6 +1611,10 @@ void ap_module_exit(void) | |||
1443 | bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); | 1611 | bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); |
1444 | bus_unregister(&ap_bus_type); | 1612 | bus_unregister(&ap_bus_type); |
1445 | unregister_reset_call(&ap_reset_call); | 1613 | unregister_reset_call(&ap_reset_call); |
1614 | if (ap_using_interrupts()) { | ||
1615 | s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); | ||
1616 | isc_unregister(AP_ISC); | ||
1617 | } | ||
1446 | } | 1618 | } |
1447 | 1619 | ||
1448 | #ifndef CONFIG_ZCRYPT_MONOLITHIC | 1620 | #ifndef CONFIG_ZCRYPT_MONOLITHIC |
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 446378b308fc..a35362241805 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
8 | * Felix Beck <felix.beck@de.ibm.com> | ||
8 | * | 9 | * |
9 | * Adjunct processor bus header file. | 10 | * Adjunct processor bus header file. |
10 | * | 11 | * |
@@ -67,7 +68,8 @@ struct ap_queue_status { | |||
67 | unsigned int queue_empty : 1; | 68 | unsigned int queue_empty : 1; |
68 | unsigned int replies_waiting : 1; | 69 | unsigned int replies_waiting : 1; |
69 | unsigned int queue_full : 1; | 70 | unsigned int queue_full : 1; |
70 | unsigned int pad1 : 5; | 71 | unsigned int pad1 : 4; |
72 | unsigned int int_enabled : 1; | ||
71 | unsigned int response_code : 8; | 73 | unsigned int response_code : 8; |
72 | unsigned int pad2 : 16; | 74 | unsigned int pad2 : 16; |
73 | }; | 75 | }; |
@@ -78,6 +80,8 @@ struct ap_queue_status { | |||
78 | #define AP_RESPONSE_DECONFIGURED 0x03 | 80 | #define AP_RESPONSE_DECONFIGURED 0x03 |
79 | #define AP_RESPONSE_CHECKSTOPPED 0x04 | 81 | #define AP_RESPONSE_CHECKSTOPPED 0x04 |
80 | #define AP_RESPONSE_BUSY 0x05 | 82 | #define AP_RESPONSE_BUSY 0x05 |
83 | #define AP_RESPONSE_INVALID_ADDRESS 0x06 | ||
84 | #define AP_RESPONSE_OTHERWISE_CHANGED 0x07 | ||
81 | #define AP_RESPONSE_Q_FULL 0x10 | 85 | #define AP_RESPONSE_Q_FULL 0x10 |
82 | #define AP_RESPONSE_NO_PENDING_REPLY 0x10 | 86 | #define AP_RESPONSE_NO_PENDING_REPLY 0x10 |
83 | #define AP_RESPONSE_INDEX_TOO_BIG 0x11 | 87 | #define AP_RESPONSE_INDEX_TOO_BIG 0x11 |
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 54f4cbc3be9e..326ea08f67c9 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c | |||
@@ -264,17 +264,21 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, | |||
264 | .type = TYPE82_RSP_CODE, | 264 | .type = TYPE82_RSP_CODE, |
265 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 265 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
266 | }; | 266 | }; |
267 | struct type80_hdr *t80h = reply->message; | 267 | struct type80_hdr *t80h; |
268 | int length; | 268 | int length; |
269 | 269 | ||
270 | /* Copy the reply message to the request message buffer. */ | 270 | /* Copy the reply message to the request message buffer. */ |
271 | if (IS_ERR(reply)) | 271 | if (IS_ERR(reply)) { |
272 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 272 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
273 | else if (t80h->type == TYPE80_RSP_CODE) { | 273 | goto out; |
274 | } | ||
275 | t80h = reply->message; | ||
276 | if (t80h->type == TYPE80_RSP_CODE) { | ||
274 | length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); | 277 | length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); |
275 | memcpy(msg->message, reply->message, length); | 278 | memcpy(msg->message, reply->message, length); |
276 | } else | 279 | } else |
277 | memcpy(msg->message, reply->message, sizeof error_reply); | 280 | memcpy(msg->message, reply->message, sizeof error_reply); |
281 | out: | ||
278 | complete((struct completion *) msg->private); | 282 | complete((struct completion *) msg->private); |
279 | } | 283 | } |
280 | 284 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 12da4815ba8e..17ba81b58c78 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
@@ -247,17 +247,21 @@ static void zcrypt_pcica_receive(struct ap_device *ap_dev, | |||
247 | .type = TYPE82_RSP_CODE, | 247 | .type = TYPE82_RSP_CODE, |
248 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 248 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
249 | }; | 249 | }; |
250 | struct type84_hdr *t84h = reply->message; | 250 | struct type84_hdr *t84h; |
251 | int length; | 251 | int length; |
252 | 252 | ||
253 | /* Copy the reply message to the request message buffer. */ | 253 | /* Copy the reply message to the request message buffer. */ |
254 | if (IS_ERR(reply)) | 254 | if (IS_ERR(reply)) { |
255 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 255 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
256 | else if (t84h->code == TYPE84_RSP_CODE) { | 256 | goto out; |
257 | } | ||
258 | t84h = reply->message; | ||
259 | if (t84h->code == TYPE84_RSP_CODE) { | ||
257 | length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); | 260 | length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); |
258 | memcpy(msg->message, reply->message, length); | 261 | memcpy(msg->message, reply->message, length); |
259 | } else | 262 | } else |
260 | memcpy(msg->message, reply->message, sizeof error_reply); | 263 | memcpy(msg->message, reply->message, sizeof error_reply); |
264 | out: | ||
261 | complete((struct completion *) msg->private); | 265 | complete((struct completion *) msg->private); |
262 | } | 266 | } |
263 | 267 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 779952cb19fc..f4b0c4795434 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
@@ -447,19 +447,23 @@ static void zcrypt_pcicc_receive(struct ap_device *ap_dev, | |||
447 | .type = TYPE82_RSP_CODE, | 447 | .type = TYPE82_RSP_CODE, |
448 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 448 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
449 | }; | 449 | }; |
450 | struct type86_reply *t86r = reply->message; | 450 | struct type86_reply *t86r; |
451 | int length; | 451 | int length; |
452 | 452 | ||
453 | /* Copy the reply message to the request message buffer. */ | 453 | /* Copy the reply message to the request message buffer. */ |
454 | if (IS_ERR(reply)) | 454 | if (IS_ERR(reply)) { |
455 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 455 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
456 | else if (t86r->hdr.type == TYPE86_RSP_CODE && | 456 | goto out; |
457 | } | ||
458 | t86r = reply->message; | ||
459 | if (t86r->hdr.type == TYPE86_RSP_CODE && | ||
457 | t86r->cprb.cprb_ver_id == 0x01) { | 460 | t86r->cprb.cprb_ver_id == 0x01) { |
458 | length = sizeof(struct type86_reply) + t86r->length - 2; | 461 | length = sizeof(struct type86_reply) + t86r->length - 2; |
459 | length = min(PCICC_MAX_RESPONSE_SIZE, length); | 462 | length = min(PCICC_MAX_RESPONSE_SIZE, length); |
460 | memcpy(msg->message, reply->message, length); | 463 | memcpy(msg->message, reply->message, length); |
461 | } else | 464 | } else |
462 | memcpy(msg->message, reply->message, sizeof error_reply); | 465 | memcpy(msg->message, reply->message, sizeof error_reply); |
466 | out: | ||
463 | complete((struct completion *) msg->private); | 467 | complete((struct completion *) msg->private); |
464 | } | 468 | } |
465 | 469 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index d8ad36f81540..e7a1e22e77ac 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -635,13 +635,16 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
635 | }; | 635 | }; |
636 | struct response_type *resp_type = | 636 | struct response_type *resp_type = |
637 | (struct response_type *) msg->private; | 637 | (struct response_type *) msg->private; |
638 | struct type86x_reply *t86r = reply->message; | 638 | struct type86x_reply *t86r; |
639 | int length; | 639 | int length; |
640 | 640 | ||
641 | /* Copy the reply message to the request message buffer. */ | 641 | /* Copy the reply message to the request message buffer. */ |
642 | if (IS_ERR(reply)) | 642 | if (IS_ERR(reply)) { |
643 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 643 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
644 | else if (t86r->hdr.type == TYPE86_RSP_CODE && | 644 | goto out; |
645 | } | ||
646 | t86r = reply->message; | ||
647 | if (t86r->hdr.type == TYPE86_RSP_CODE && | ||
645 | t86r->cprbx.cprb_ver_id == 0x02) { | 648 | t86r->cprbx.cprb_ver_id == 0x02) { |
646 | switch (resp_type->type) { | 649 | switch (resp_type->type) { |
647 | case PCIXCC_RESPONSE_TYPE_ICA: | 650 | case PCIXCC_RESPONSE_TYPE_ICA: |
@@ -660,6 +663,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
660 | } | 663 | } |
661 | } else | 664 | } else |
662 | memcpy(msg->message, reply->message, sizeof error_reply); | 665 | memcpy(msg->message, reply->message, sizeof error_reply); |
666 | out: | ||
663 | complete(&(resp_type->work)); | 667 | complete(&(resp_type->work)); |
664 | } | 668 | } |
665 | 669 | ||
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 42776550acfd..f29c7086fc19 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #undef DEBUGDATA | 13 | #undef DEBUGDATA |
14 | #undef DEBUGCCW | 14 | #undef DEBUGCCW |
15 | 15 | ||
16 | #define KMSG_COMPONENT "ctcm" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -190,21 +193,22 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); | |||
190 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) | 193 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) |
191 | { | 194 | { |
192 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 195 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
193 | "%s(%s): %s: %04x\n", | 196 | "%s(%s): %s: %04x\n", |
194 | CTCM_FUNTAIL, ch->id, msg, rc); | 197 | CTCM_FUNTAIL, ch->id, msg, rc); |
195 | switch (rc) { | 198 | switch (rc) { |
196 | case -EBUSY: | 199 | case -EBUSY: |
197 | ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); | 200 | pr_info("%s: The communication peer is busy\n", |
201 | ch->id); | ||
198 | fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); | 202 | fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); |
199 | break; | 203 | break; |
200 | case -ENODEV: | 204 | case -ENODEV: |
201 | ctcm_pr_emerg("%s (%s): Invalid device called for IO\n", | 205 | pr_err("%s: The specified target device is not valid\n", |
202 | ch->id, msg); | 206 | ch->id); |
203 | fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); | 207 | fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); |
204 | break; | 208 | break; |
205 | default: | 209 | default: |
206 | ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", | 210 | pr_err("An I/O operation resulted in error %04x\n", |
207 | ch->id, msg, rc); | 211 | rc); |
208 | fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); | 212 | fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); |
209 | } | 213 | } |
210 | } | 214 | } |
@@ -886,8 +890,15 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) | |||
886 | fsm_newstate(fi, CTC_STATE_RXERR); | 890 | fsm_newstate(fi, CTC_STATE_RXERR); |
887 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 891 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
888 | } | 892 | } |
889 | } else | 893 | } else { |
890 | ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name); | 894 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
895 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, | ||
896 | ctc_ch_event_names[event], fsm_getstate_str(fi)); | ||
897 | |||
898 | dev_warn(&dev->dev, | ||
899 | "Initialization failed with RX/TX init handshake " | ||
900 | "error %s\n", ctc_ch_event_names[event]); | ||
901 | } | ||
891 | } | 902 | } |
892 | 903 | ||
893 | /** | 904 | /** |
@@ -969,7 +980,9 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) | |||
969 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, | 980 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, |
970 | ctc_ch_event_names[event], fsm_getstate_str(fi)); | 981 | ctc_ch_event_names[event], fsm_getstate_str(fi)); |
971 | 982 | ||
972 | ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); | 983 | dev_warn(&dev->dev, |
984 | "Initialization failed with RX/TX init handshake " | ||
985 | "error %s\n", ctc_ch_event_names[event]); | ||
973 | } | 986 | } |
974 | } | 987 | } |
975 | 988 | ||
@@ -2101,14 +2114,11 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) | |||
2101 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); | 2114 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); |
2102 | 2115 | ||
2103 | if (IS_MPC(priv)) { | 2116 | if (IS_MPC(priv)) { |
2104 | ctcm_pr_info("ctcm: %s Restarting Device and " | ||
2105 | "MPC Group in 5 seconds\n", | ||
2106 | dev->name); | ||
2107 | restart_timer = CTCM_TIME_1_SEC; | 2117 | restart_timer = CTCM_TIME_1_SEC; |
2108 | } else { | 2118 | } else { |
2109 | ctcm_pr_info("%s: Restarting\n", dev->name); | ||
2110 | restart_timer = CTCM_TIME_5_SEC; | 2119 | restart_timer = CTCM_TIME_5_SEC; |
2111 | } | 2120 | } |
2121 | dev_info(&dev->dev, "Restarting device\n"); | ||
2112 | 2122 | ||
2113 | dev_action_stop(fi, event, arg); | 2123 | dev_action_stop(fi, event, arg); |
2114 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); | 2124 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); |
@@ -2150,16 +2160,16 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) | |||
2150 | case DEV_STATE_STARTWAIT_RX: | 2160 | case DEV_STATE_STARTWAIT_RX: |
2151 | if (event == DEV_EVENT_RXUP) { | 2161 | if (event == DEV_EVENT_RXUP) { |
2152 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2162 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2153 | ctcm_pr_info("%s: connected with remote side\n", | 2163 | dev_info(&dev->dev, |
2154 | dev->name); | 2164 | "Connected with remote side\n"); |
2155 | ctcm_clear_busy(dev); | 2165 | ctcm_clear_busy(dev); |
2156 | } | 2166 | } |
2157 | break; | 2167 | break; |
2158 | case DEV_STATE_STARTWAIT_TX: | 2168 | case DEV_STATE_STARTWAIT_TX: |
2159 | if (event == DEV_EVENT_TXUP) { | 2169 | if (event == DEV_EVENT_TXUP) { |
2160 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2170 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2161 | ctcm_pr_info("%s: connected with remote side\n", | 2171 | dev_info(&dev->dev, |
2162 | dev->name); | 2172 | "Connected with remote side\n"); |
2163 | ctcm_clear_busy(dev); | 2173 | ctcm_clear_busy(dev); |
2164 | } | 2174 | } |
2165 | break; | 2175 | break; |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index a4e29836a2aa..2678573becec 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -21,6 +21,9 @@ | |||
21 | #undef DEBUGDATA | 21 | #undef DEBUGDATA |
22 | #undef DEBUGCCW | 22 | #undef DEBUGCCW |
23 | 23 | ||
24 | #define KMSG_COMPONENT "ctcm" | ||
25 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
26 | |||
24 | #include <linux/module.h> | 27 | #include <linux/module.h> |
25 | #include <linux/init.h> | 28 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
@@ -281,14 +284,16 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
281 | 284 | ||
282 | switch (PTR_ERR(irb)) { | 285 | switch (PTR_ERR(irb)) { |
283 | case -EIO: | 286 | case -EIO: |
284 | ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev)); | 287 | dev_err(&cdev->dev, |
288 | "An I/O-error occurred on the CTCM device\n"); | ||
285 | break; | 289 | break; |
286 | case -ETIMEDOUT: | 290 | case -ETIMEDOUT: |
287 | ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev)); | 291 | dev_err(&cdev->dev, |
292 | "An adapter hardware operation timed out\n"); | ||
288 | break; | 293 | break; |
289 | default: | 294 | default: |
290 | ctcm_pr_warn("unknown error %ld on device %s\n", | 295 | dev_err(&cdev->dev, |
291 | PTR_ERR(irb), dev_name(&cdev->dev)); | 296 | "An error occurred on the adapter hardware\n"); |
292 | } | 297 | } |
293 | return PTR_ERR(irb); | 298 | return PTR_ERR(irb); |
294 | } | 299 | } |
@@ -309,15 +314,17 @@ static inline void ccw_unit_check(struct channel *ch, __u8 sense) | |||
309 | if (sense & SNS0_INTERVENTION_REQ) { | 314 | if (sense & SNS0_INTERVENTION_REQ) { |
310 | if (sense & 0x01) { | 315 | if (sense & 0x01) { |
311 | if (ch->sense_rc != 0x01) { | 316 | if (ch->sense_rc != 0x01) { |
312 | ctcm_pr_debug("%s: Interface disc. or Sel. " | 317 | pr_notice( |
313 | "reset (remote)\n", ch->id); | 318 | "%s: The communication peer has " |
319 | "disconnected\n", ch->id); | ||
314 | ch->sense_rc = 0x01; | 320 | ch->sense_rc = 0x01; |
315 | } | 321 | } |
316 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); | 322 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); |
317 | } else { | 323 | } else { |
318 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { | 324 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { |
319 | ctcm_pr_debug("%s: System reset (remote)\n", | 325 | pr_notice( |
320 | ch->id); | 326 | "%s: The remote operating system is " |
327 | "not available\n", ch->id); | ||
321 | ch->sense_rc = SNS0_INTERVENTION_REQ; | 328 | ch->sense_rc = SNS0_INTERVENTION_REQ; |
322 | } | 329 | } |
323 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); | 330 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); |
@@ -1194,8 +1201,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1194 | 1201 | ||
1195 | /* Check for unsolicited interrupts. */ | 1202 | /* Check for unsolicited interrupts. */ |
1196 | if (cgdev == NULL) { | 1203 | if (cgdev == NULL) { |
1197 | ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n", | 1204 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR, |
1198 | cstat, dstat); | 1205 | "%s(%s) unsolicited irq: c-%02x d-%02x\n", |
1206 | CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat); | ||
1207 | dev_warn(&cdev->dev, | ||
1208 | "The adapter received a non-specific IRQ\n"); | ||
1199 | return; | 1209 | return; |
1200 | } | 1210 | } |
1201 | 1211 | ||
@@ -1207,31 +1217,34 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1207 | else if (priv->channel[WRITE]->cdev == cdev) | 1217 | else if (priv->channel[WRITE]->cdev == cdev) |
1208 | ch = priv->channel[WRITE]; | 1218 | ch = priv->channel[WRITE]; |
1209 | else { | 1219 | else { |
1210 | ctcm_pr_err("ctcm: Can't determine channel for interrupt, " | 1220 | dev_err(&cdev->dev, |
1211 | "device %s\n", dev_name(&cdev->dev)); | 1221 | "%s: Internal error: Can't determine channel for " |
1222 | "interrupt device %s\n", | ||
1223 | __func__, dev_name(&cdev->dev)); | ||
1224 | /* Explain: inconsistent internal structures */ | ||
1212 | return; | 1225 | return; |
1213 | } | 1226 | } |
1214 | 1227 | ||
1215 | dev = ch->netdev; | 1228 | dev = ch->netdev; |
1216 | if (dev == NULL) { | 1229 | if (dev == NULL) { |
1217 | ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", | 1230 | dev_err(&cdev->dev, |
1218 | __func__, dev_name(&cdev->dev), ch); | 1231 | "%s Internal error: net_device is NULL, ch = 0x%p\n", |
1232 | __func__, ch); | ||
1233 | /* Explain: inconsistent internal structures */ | ||
1219 | return; | 1234 | return; |
1220 | } | 1235 | } |
1221 | 1236 | ||
1222 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, | ||
1223 | "%s(%s): int. for %s: cstat=%02x dstat=%02x", | ||
1224 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); | ||
1225 | |||
1226 | /* Copy interruption response block. */ | 1237 | /* Copy interruption response block. */ |
1227 | memcpy(ch->irb, irb, sizeof(struct irb)); | 1238 | memcpy(ch->irb, irb, sizeof(struct irb)); |
1228 | 1239 | ||
1240 | /* Issue error message and return on subchannel error code */ | ||
1229 | if (irb->scsw.cmd.cstat) { | 1241 | if (irb->scsw.cmd.cstat) { |
1230 | /* Check for good subchannel return code, otherwise error message */ | ||
1231 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); | 1242 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); |
1232 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", | 1243 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
1233 | dev->name, ch->id, irb->scsw.cmd.cstat, | 1244 | "%s(%s): sub-ch check %s: cs=%02x ds=%02x", |
1234 | irb->scsw.cmd.dstat); | 1245 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); |
1246 | dev_warn(&cdev->dev, | ||
1247 | "A check occurred on the subchannel\n"); | ||
1235 | return; | 1248 | return; |
1236 | } | 1249 | } |
1237 | 1250 | ||
@@ -1239,7 +1252,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1239 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | 1252 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
1240 | if ((irb->ecw[0] & ch->sense_rc) == 0) | 1253 | if ((irb->ecw[0] & ch->sense_rc) == 0) |
1241 | /* print it only once */ | 1254 | /* print it only once */ |
1242 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, | 1255 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
1243 | "%s(%s): sense=%02x, ds=%02x", | 1256 | "%s(%s): sense=%02x, ds=%02x", |
1244 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); | 1257 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); |
1245 | ccw_unit_check(ch, irb->ecw[0]); | 1258 | ccw_unit_check(ch, irb->ecw[0]); |
@@ -1574,6 +1587,11 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1574 | 1587 | ||
1575 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); | 1588 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); |
1576 | 1589 | ||
1590 | dev_info(&dev->dev, | ||
1591 | "setup OK : r/w = %s/%s, protocol : %d\n", | ||
1592 | priv->channel[READ]->id, | ||
1593 | priv->channel[WRITE]->id, priv->protocol); | ||
1594 | |||
1577 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1595 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1578 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, | 1596 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, |
1579 | priv->channel[READ]->id, | 1597 | priv->channel[READ]->id, |
@@ -1687,7 +1705,7 @@ static void __exit ctcm_exit(void) | |||
1687 | { | 1705 | { |
1688 | unregister_cu3088_discipline(&ctcm_group_driver); | 1706 | unregister_cu3088_discipline(&ctcm_group_driver); |
1689 | ctcm_unregister_dbf_views(); | 1707 | ctcm_unregister_dbf_views(); |
1690 | ctcm_pr_info("CTCM driver unloaded\n"); | 1708 | pr_info("CTCM driver unloaded\n"); |
1691 | } | 1709 | } |
1692 | 1710 | ||
1693 | /* | 1711 | /* |
@@ -1695,7 +1713,7 @@ static void __exit ctcm_exit(void) | |||
1695 | */ | 1713 | */ |
1696 | static void print_banner(void) | 1714 | static void print_banner(void) |
1697 | { | 1715 | { |
1698 | printk(KERN_INFO "CTCM driver initialized\n"); | 1716 | pr_info("CTCM driver initialized\n"); |
1699 | } | 1717 | } |
1700 | 1718 | ||
1701 | /** | 1719 | /** |
@@ -1717,8 +1735,8 @@ static int __init ctcm_init(void) | |||
1717 | ret = register_cu3088_discipline(&ctcm_group_driver); | 1735 | ret = register_cu3088_discipline(&ctcm_group_driver); |
1718 | if (ret) { | 1736 | if (ret) { |
1719 | ctcm_unregister_dbf_views(); | 1737 | ctcm_unregister_dbf_views(); |
1720 | ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline " | 1738 | pr_err("%s / register_cu3088_discipline failed, ret = %d\n", |
1721 | "(rc = %d)\n", ret); | 1739 | __func__, ret); |
1722 | return ret; | 1740 | return ret; |
1723 | } | 1741 | } |
1724 | print_banner(); | 1742 | print_banner(); |
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index d77cce3fe4d4..d925e732b7d8 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h | |||
@@ -41,12 +41,6 @@ | |||
41 | #define LOG_FLAG_NOMEM 8 | 41 | #define LOG_FLAG_NOMEM 8 |
42 | 42 | ||
43 | #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) | 43 | #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) |
44 | #define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg) | ||
45 | #define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg) | ||
46 | #define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) | ||
47 | #define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg) | ||
48 | #define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) | ||
49 | #define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg) | ||
50 | 44 | ||
51 | #define CTCM_PR_DEBUG(fmt, arg...) \ | 45 | #define CTCM_PR_DEBUG(fmt, arg...) \ |
52 | do { \ | 46 | do { \ |
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 19f5d5ed85e0..3db5f846bbf6 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -19,6 +19,9 @@ | |||
19 | #undef DEBUGDATA | 19 | #undef DEBUGDATA |
20 | #undef DEBUGCCW | 20 | #undef DEBUGCCW |
21 | 21 | ||
22 | #define KMSG_COMPONENT "ctcm" | ||
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
24 | |||
22 | #include <linux/module.h> | 25 | #include <linux/module.h> |
23 | #include <linux/init.h> | 26 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -386,7 +389,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) | |||
386 | if (grp->allocchan_callback_retries < 4) { | 389 | if (grp->allocchan_callback_retries < 4) { |
387 | if (grp->allochanfunc) | 390 | if (grp->allochanfunc) |
388 | grp->allochanfunc(grp->port_num, | 391 | grp->allochanfunc(grp->port_num, |
389 | grp->group_max_buflen); | 392 | grp->group_max_buflen); |
390 | } else { | 393 | } else { |
391 | /* there are problems...bail out */ | 394 | /* there are problems...bail out */ |
392 | /* there may be a state mismatch so restart */ | 395 | /* there may be a state mismatch so restart */ |
@@ -1232,8 +1235,9 @@ done: | |||
1232 | 1235 | ||
1233 | dev_kfree_skb_any(pskb); | 1236 | dev_kfree_skb_any(pskb); |
1234 | if (sendrc == NET_RX_DROP) { | 1237 | if (sendrc == NET_RX_DROP) { |
1235 | printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED" | 1238 | dev_warn(&dev->dev, |
1236 | " - PACKET DROPPED\n", dev->name, __func__); | 1239 | "The network backlog for %s is exceeded, " |
1240 | "package dropped\n", __func__); | ||
1237 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1241 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1238 | } | 1242 | } |
1239 | 1243 | ||
@@ -1670,10 +1674,11 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) | |||
1670 | CTCM_FUNTAIL, ch->id); | 1674 | CTCM_FUNTAIL, ch->id); |
1671 | } | 1675 | } |
1672 | } | 1676 | } |
1673 | |||
1674 | done: | 1677 | done: |
1675 | if (rc) { | 1678 | if (rc) { |
1676 | ctcm_pr_info("ctcmpc : %s() failed\n", __func__); | 1679 | dev_warn(&dev->dev, |
1680 | "The XID used in the MPC protocol is not valid, " | ||
1681 | "rc = %d\n", rc); | ||
1677 | priv->xid->xid2_flag2 = 0x40; | 1682 | priv->xid->xid2_flag2 = 0x40; |
1678 | grp->saved_xid2->xid2_flag2 = 0x40; | 1683 | grp->saved_xid2->xid2_flag2 = 0x40; |
1679 | } | 1684 | } |
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index bb2d13721d34..8452bb052d68 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c | |||
@@ -10,6 +10,9 @@ | |||
10 | #undef DEBUGDATA | 10 | #undef DEBUGDATA |
11 | #undef DEBUGCCW | 11 | #undef DEBUGCCW |
12 | 12 | ||
13 | #define KMSG_COMPONENT "ctcm" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/sysfs.h> | 16 | #include <linux/sysfs.h> |
14 | #include "ctcm_main.h" | 17 | #include "ctcm_main.h" |
15 | 18 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 0825be87e5a0..fb6c70cec253 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -26,6 +26,9 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define KMSG_COMPONENT "lcs" | ||
30 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
31 | |||
29 | #include <linux/module.h> | 32 | #include <linux/module.h> |
30 | #include <linux/if.h> | 33 | #include <linux/if.h> |
31 | #include <linux/netdevice.h> | 34 | #include <linux/netdevice.h> |
@@ -54,8 +57,6 @@ | |||
54 | #error Cannot compile lcs.c without some net devices switched on. | 57 | #error Cannot compile lcs.c without some net devices switched on. |
55 | #endif | 58 | #endif |
56 | 59 | ||
57 | #define PRINTK_HEADER " lcs: " | ||
58 | |||
59 | /** | 60 | /** |
60 | * initialization string for output | 61 | * initialization string for output |
61 | */ | 62 | */ |
@@ -96,7 +97,7 @@ lcs_register_debug_facility(void) | |||
96 | lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); | 97 | lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); |
97 | lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); | 98 | lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); |
98 | if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { | 99 | if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { |
99 | PRINT_ERR("Not enough memory for debug facility.\n"); | 100 | pr_err("Not enough memory for debug facility.\n"); |
100 | lcs_unregister_debug_facility(); | 101 | lcs_unregister_debug_facility(); |
101 | return -ENOMEM; | 102 | return -ENOMEM; |
102 | } | 103 | } |
@@ -503,7 +504,9 @@ lcs_start_channel(struct lcs_channel *channel) | |||
503 | if (rc) { | 504 | if (rc) { |
504 | LCS_DBF_TEXT_(4,trace,"essh%s", | 505 | LCS_DBF_TEXT_(4,trace,"essh%s", |
505 | dev_name(&channel->ccwdev->dev)); | 506 | dev_name(&channel->ccwdev->dev)); |
506 | PRINT_ERR("Error in starting channel, rc=%d!\n", rc); | 507 | dev_err(&channel->ccwdev->dev, |
508 | "Starting an LCS device resulted in an error," | ||
509 | " rc=%d!\n", rc); | ||
507 | } | 510 | } |
508 | return rc; | 511 | return rc; |
509 | } | 512 | } |
@@ -640,7 +643,9 @@ __lcs_resume_channel(struct lcs_channel *channel) | |||
640 | if (rc) { | 643 | if (rc) { |
641 | LCS_DBF_TEXT_(4, trace, "ersc%s", | 644 | LCS_DBF_TEXT_(4, trace, "ersc%s", |
642 | dev_name(&channel->ccwdev->dev)); | 645 | dev_name(&channel->ccwdev->dev)); |
643 | PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); | 646 | dev_err(&channel->ccwdev->dev, |
647 | "Sending data from the LCS device to the LAN failed" | ||
648 | " with rc=%d\n",rc); | ||
644 | } else | 649 | } else |
645 | channel->state = LCS_CH_STATE_RUNNING; | 650 | channel->state = LCS_CH_STATE_RUNNING; |
646 | return rc; | 651 | return rc; |
@@ -1086,7 +1091,7 @@ lcs_check_multicast_support(struct lcs_card *card) | |||
1086 | cmd->cmd.lcs_qipassist.num_ip_pairs = 1; | 1091 | cmd->cmd.lcs_qipassist.num_ip_pairs = 1; |
1087 | rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); | 1092 | rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); |
1088 | if (rc != 0) { | 1093 | if (rc != 0) { |
1089 | PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n"); | 1094 | pr_err("Query IPAssist failed. Assuming unsupported!\n"); |
1090 | return -EOPNOTSUPP; | 1095 | return -EOPNOTSUPP; |
1091 | } | 1096 | } |
1092 | if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) | 1097 | if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) |
@@ -1119,8 +1124,8 @@ list_modified: | |||
1119 | rc = lcs_send_setipm(card, ipm); | 1124 | rc = lcs_send_setipm(card, ipm); |
1120 | spin_lock_irqsave(&card->ipm_lock, flags); | 1125 | spin_lock_irqsave(&card->ipm_lock, flags); |
1121 | if (rc) { | 1126 | if (rc) { |
1122 | PRINT_INFO("Adding multicast address failed. " | 1127 | pr_info("Adding multicast address failed." |
1123 | "Table possibly full!\n"); | 1128 | " Table possibly full!\n"); |
1124 | /* store ipm in failed list -> will be added | 1129 | /* store ipm in failed list -> will be added |
1125 | * to ipm_list again, so a retry will be done | 1130 | * to ipm_list again, so a retry will be done |
1126 | * during the next call of this function */ | 1131 | * during the next call of this function */ |
@@ -1231,8 +1236,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) | |||
1231 | ipm = (struct lcs_ipm_list *) | 1236 | ipm = (struct lcs_ipm_list *) |
1232 | kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); | 1237 | kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); |
1233 | if (ipm == NULL) { | 1238 | if (ipm == NULL) { |
1234 | PRINT_INFO("Not enough memory to add " | 1239 | pr_info("Not enough memory to add" |
1235 | "new multicast entry!\n"); | 1240 | " new multicast entry!\n"); |
1236 | break; | 1241 | break; |
1237 | } | 1242 | } |
1238 | memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); | 1243 | memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); |
@@ -1306,18 +1311,21 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
1306 | 1311 | ||
1307 | switch (PTR_ERR(irb)) { | 1312 | switch (PTR_ERR(irb)) { |
1308 | case -EIO: | 1313 | case -EIO: |
1309 | PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); | 1314 | dev_warn(&cdev->dev, |
1315 | "An I/O-error occurred on the LCS device\n"); | ||
1310 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1316 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1311 | LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); | 1317 | LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); |
1312 | break; | 1318 | break; |
1313 | case -ETIMEDOUT: | 1319 | case -ETIMEDOUT: |
1314 | PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); | 1320 | dev_warn(&cdev->dev, |
1321 | "A command timed out on the LCS device\n"); | ||
1315 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1322 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1316 | LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); | 1323 | LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); |
1317 | break; | 1324 | break; |
1318 | default: | 1325 | default: |
1319 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 1326 | dev_warn(&cdev->dev, |
1320 | dev_name(&cdev->dev)); | 1327 | "An error occurred on the LCS device, rc=%ld\n", |
1328 | PTR_ERR(irb)); | ||
1321 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1329 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1322 | LCS_DBF_TEXT(2, trace, " rc???"); | 1330 | LCS_DBF_TEXT(2, trace, " rc???"); |
1323 | } | 1331 | } |
@@ -1403,8 +1411,10 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1403 | /* Check for channel and device errors presented */ | 1411 | /* Check for channel and device errors presented */ |
1404 | rc = lcs_get_problem(cdev, irb); | 1412 | rc = lcs_get_problem(cdev, irb); |
1405 | if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { | 1413 | if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { |
1406 | PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", | 1414 | dev_warn(&cdev->dev, |
1407 | dev_name(&cdev->dev), dstat, cstat); | 1415 | "The LCS device stopped because of an error," |
1416 | " dstat=0x%X, cstat=0x%X \n", | ||
1417 | dstat, cstat); | ||
1408 | if (rc) { | 1418 | if (rc) { |
1409 | channel->state = LCS_CH_STATE_ERROR; | 1419 | channel->state = LCS_CH_STATE_ERROR; |
1410 | } | 1420 | } |
@@ -1761,8 +1771,8 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) | |||
1761 | lcs_schedule_recovery(card); | 1771 | lcs_schedule_recovery(card); |
1762 | break; | 1772 | break; |
1763 | case LCS_CMD_STOPLAN: | 1773 | case LCS_CMD_STOPLAN: |
1764 | PRINT_WARN("Stoplan for %s initiated by LGW.\n", | 1774 | pr_warning("Stoplan for %s initiated by LGW.\n", |
1765 | card->dev->name); | 1775 | card->dev->name); |
1766 | if (card->dev) | 1776 | if (card->dev) |
1767 | netif_carrier_off(card->dev); | 1777 | netif_carrier_off(card->dev); |
1768 | break; | 1778 | break; |
@@ -1790,7 +1800,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) | |||
1790 | 1800 | ||
1791 | skb = dev_alloc_skb(skb_len); | 1801 | skb = dev_alloc_skb(skb_len); |
1792 | if (skb == NULL) { | 1802 | if (skb == NULL) { |
1793 | PRINT_ERR("LCS: alloc_skb failed for device=%s\n", | 1803 | dev_err(&card->dev->dev, |
1804 | " Allocating a socket buffer to interface %s failed\n", | ||
1794 | card->dev->name); | 1805 | card->dev->name); |
1795 | card->stats.rx_dropped++; | 1806 | card->stats.rx_dropped++; |
1796 | return; | 1807 | return; |
@@ -1886,7 +1897,8 @@ lcs_stop_device(struct net_device *dev) | |||
1886 | (card->write.state != LCS_CH_STATE_RUNNING)); | 1897 | (card->write.state != LCS_CH_STATE_RUNNING)); |
1887 | rc = lcs_stopcard(card); | 1898 | rc = lcs_stopcard(card); |
1888 | if (rc) | 1899 | if (rc) |
1889 | PRINT_ERR("Try it again!\n "); | 1900 | dev_err(&card->dev->dev, |
1901 | " Shutting down the LCS device failed\n "); | ||
1890 | return rc; | 1902 | return rc; |
1891 | } | 1903 | } |
1892 | 1904 | ||
@@ -1905,7 +1917,7 @@ lcs_open_device(struct net_device *dev) | |||
1905 | /* initialize statistics */ | 1917 | /* initialize statistics */ |
1906 | rc = lcs_detect(card); | 1918 | rc = lcs_detect(card); |
1907 | if (rc) { | 1919 | if (rc) { |
1908 | PRINT_ERR("LCS:Error in opening device!\n"); | 1920 | pr_err("Error in opening device!\n"); |
1909 | 1921 | ||
1910 | } else { | 1922 | } else { |
1911 | dev->flags |= IFF_UP; | 1923 | dev->flags |= IFF_UP; |
@@ -2113,8 +2125,9 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2113 | rc = lcs_detect(card); | 2125 | rc = lcs_detect(card); |
2114 | if (rc) { | 2126 | if (rc) { |
2115 | LCS_DBF_TEXT(2, setup, "dtctfail"); | 2127 | LCS_DBF_TEXT(2, setup, "dtctfail"); |
2116 | PRINT_WARN("Detection of LCS card failed with return code " | 2128 | dev_err(&card->dev->dev, |
2117 | "%d (0x%x)\n", rc, rc); | 2129 | "Detecting a network adapter for LCS devices" |
2130 | " failed with rc=%d (0x%x)\n", rc, rc); | ||
2118 | lcs_stopcard(card); | 2131 | lcs_stopcard(card); |
2119 | goto out; | 2132 | goto out; |
2120 | } | 2133 | } |
@@ -2144,7 +2157,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2144 | #endif | 2157 | #endif |
2145 | default: | 2158 | default: |
2146 | LCS_DBF_TEXT(3, setup, "errinit"); | 2159 | LCS_DBF_TEXT(3, setup, "errinit"); |
2147 | PRINT_ERR("LCS: Initialization failed\n"); | 2160 | pr_err(" Initialization failed\n"); |
2148 | goto out; | 2161 | goto out; |
2149 | } | 2162 | } |
2150 | if (!dev) | 2163 | if (!dev) |
@@ -2176,13 +2189,13 @@ netdev_out: | |||
2176 | goto out; | 2189 | goto out; |
2177 | 2190 | ||
2178 | /* Print out supported assists: IPv6 */ | 2191 | /* Print out supported assists: IPv6 */ |
2179 | PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name, | 2192 | pr_info("LCS device %s %s IPv6 support\n", card->dev->name, |
2180 | (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? | 2193 | (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? |
2181 | "with" : "without"); | 2194 | "with" : "without"); |
2182 | /* Print out supported assist: Multicast */ | 2195 | /* Print out supported assist: Multicast */ |
2183 | PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name, | 2196 | pr_info("LCS device %s %s Multicast support\n", card->dev->name, |
2184 | (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? | 2197 | (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? |
2185 | "with" : "without"); | 2198 | "with" : "without"); |
2186 | return 0; | 2199 | return 0; |
2187 | out: | 2200 | out: |
2188 | 2201 | ||
@@ -2248,15 +2261,16 @@ lcs_recovery(void *ptr) | |||
2248 | return 0; | 2261 | return 0; |
2249 | LCS_DBF_TEXT(4, trace, "recover2"); | 2262 | LCS_DBF_TEXT(4, trace, "recover2"); |
2250 | gdev = card->gdev; | 2263 | gdev = card->gdev; |
2251 | PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev)); | 2264 | dev_warn(&gdev->dev, |
2265 | "A recovery process has been started for the LCS device\n"); | ||
2252 | rc = __lcs_shutdown_device(gdev, 1); | 2266 | rc = __lcs_shutdown_device(gdev, 1); |
2253 | rc = lcs_new_device(gdev); | 2267 | rc = lcs_new_device(gdev); |
2254 | if (!rc) | 2268 | if (!rc) |
2255 | PRINT_INFO("Device %s successfully recovered!\n", | 2269 | pr_info("Device %s successfully recovered!\n", |
2256 | card->dev->name); | 2270 | card->dev->name); |
2257 | else | 2271 | else |
2258 | PRINT_INFO("Device %s could not be recovered!\n", | 2272 | pr_info("Device %s could not be recovered!\n", |
2259 | card->dev->name); | 2273 | card->dev->name); |
2260 | lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); | 2274 | lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); |
2261 | return 0; | 2275 | return 0; |
2262 | } | 2276 | } |
@@ -2308,17 +2322,17 @@ __init lcs_init_module(void) | |||
2308 | { | 2322 | { |
2309 | int rc; | 2323 | int rc; |
2310 | 2324 | ||
2311 | PRINT_INFO("Loading %s\n",version); | 2325 | pr_info("Loading %s\n", version); |
2312 | rc = lcs_register_debug_facility(); | 2326 | rc = lcs_register_debug_facility(); |
2313 | LCS_DBF_TEXT(0, setup, "lcsinit"); | 2327 | LCS_DBF_TEXT(0, setup, "lcsinit"); |
2314 | if (rc) { | 2328 | if (rc) { |
2315 | PRINT_ERR("Initialization failed\n"); | 2329 | pr_err("Initialization failed\n"); |
2316 | return rc; | 2330 | return rc; |
2317 | } | 2331 | } |
2318 | 2332 | ||
2319 | rc = register_cu3088_discipline(&lcs_group_driver); | 2333 | rc = register_cu3088_discipline(&lcs_group_driver); |
2320 | if (rc) { | 2334 | if (rc) { |
2321 | PRINT_ERR("Initialization failed\n"); | 2335 | pr_err("Initialization failed\n"); |
2322 | return rc; | 2336 | return rc; |
2323 | } | 2337 | } |
2324 | return 0; | 2338 | return 0; |
@@ -2331,7 +2345,7 @@ __init lcs_init_module(void) | |||
2331 | static void | 2345 | static void |
2332 | __exit lcs_cleanup_module(void) | 2346 | __exit lcs_cleanup_module(void) |
2333 | { | 2347 | { |
2334 | PRINT_INFO("Terminating lcs module.\n"); | 2348 | pr_info("Terminating lcs module.\n"); |
2335 | LCS_DBF_TEXT(0, trace, "cleanup"); | 2349 | LCS_DBF_TEXT(0, trace, "cleanup"); |
2336 | unregister_cu3088_discipline(&lcs_group_driver); | 2350 | unregister_cu3088_discipline(&lcs_group_driver); |
2337 | lcs_unregister_debug_facility(); | 2351 | lcs_unregister_debug_facility(); |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 0fea51e34b57..930e2fc2a011 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -31,6 +31,9 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define KMSG_COMPONENT "netiucv" | ||
35 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
36 | |||
34 | #undef DEBUG | 37 | #undef DEBUG |
35 | 38 | ||
36 | #include <linux/module.h> | 39 | #include <linux/module.h> |
@@ -846,7 +849,8 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) | |||
846 | 849 | ||
847 | fsm_deltimer(&conn->timer); | 850 | fsm_deltimer(&conn->timer); |
848 | iucv_path_sever(conn->path, NULL); | 851 | iucv_path_sever(conn->path, NULL); |
849 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); | 852 | dev_info(privptr->dev, "The peer interface of the IUCV device" |
853 | " has closed the connection\n"); | ||
850 | IUCV_DBF_TEXT(data, 2, | 854 | IUCV_DBF_TEXT(data, 2, |
851 | "conn_action_connsever: Remote dropped connection\n"); | 855 | "conn_action_connsever: Remote dropped connection\n"); |
852 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 856 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
@@ -856,13 +860,15 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) | |||
856 | static void conn_action_start(fsm_instance *fi, int event, void *arg) | 860 | static void conn_action_start(fsm_instance *fi, int event, void *arg) |
857 | { | 861 | { |
858 | struct iucv_connection *conn = arg; | 862 | struct iucv_connection *conn = arg; |
863 | struct net_device *netdev = conn->netdev; | ||
864 | struct netiucv_priv *privptr = netdev_priv(netdev); | ||
859 | int rc; | 865 | int rc; |
860 | 866 | ||
861 | IUCV_DBF_TEXT(trace, 3, __func__); | 867 | IUCV_DBF_TEXT(trace, 3, __func__); |
862 | 868 | ||
863 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 869 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
864 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", | 870 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", |
865 | conn->netdev->name, conn->userid); | 871 | netdev->name, conn->userid); |
866 | 872 | ||
867 | /* | 873 | /* |
868 | * We must set the state before calling iucv_connect because the | 874 | * We must set the state before calling iucv_connect because the |
@@ -876,41 +882,45 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) | |||
876 | NULL, iucvMagic, conn); | 882 | NULL, iucvMagic, conn); |
877 | switch (rc) { | 883 | switch (rc) { |
878 | case 0: | 884 | case 0: |
879 | conn->netdev->tx_queue_len = conn->path->msglim; | 885 | netdev->tx_queue_len = conn->path->msglim; |
880 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, | 886 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, |
881 | CONN_EVENT_TIMER, conn); | 887 | CONN_EVENT_TIMER, conn); |
882 | return; | 888 | return; |
883 | case 11: | 889 | case 11: |
884 | PRINT_INFO("%s: User %s is currently not available.\n", | 890 | dev_warn(privptr->dev, |
885 | conn->netdev->name, | 891 | "The IUCV device failed to connect to z/VM guest %s\n", |
886 | netiucv_printname(conn->userid)); | 892 | netiucv_printname(conn->userid)); |
887 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 893 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
888 | break; | 894 | break; |
889 | case 12: | 895 | case 12: |
890 | PRINT_INFO("%s: User %s is currently not ready.\n", | 896 | dev_warn(privptr->dev, |
891 | conn->netdev->name, | 897 | "The IUCV device failed to connect to the peer on z/VM" |
892 | netiucv_printname(conn->userid)); | 898 | " guest %s\n", netiucv_printname(conn->userid)); |
893 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 899 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
894 | break; | 900 | break; |
895 | case 13: | 901 | case 13: |
896 | PRINT_WARN("%s: Too many IUCV connections.\n", | 902 | dev_err(privptr->dev, |
897 | conn->netdev->name); | 903 | "Connecting the IUCV device would exceed the maximum" |
904 | " number of IUCV connections\n"); | ||
898 | fsm_newstate(fi, CONN_STATE_CONNERR); | 905 | fsm_newstate(fi, CONN_STATE_CONNERR); |
899 | break; | 906 | break; |
900 | case 14: | 907 | case 14: |
901 | PRINT_WARN("%s: User %s has too many IUCV connections.\n", | 908 | dev_err(privptr->dev, |
902 | conn->netdev->name, | 909 | "z/VM guest %s has too many IUCV connections" |
903 | netiucv_printname(conn->userid)); | 910 | " to connect with the IUCV device\n", |
911 | netiucv_printname(conn->userid)); | ||
904 | fsm_newstate(fi, CONN_STATE_CONNERR); | 912 | fsm_newstate(fi, CONN_STATE_CONNERR); |
905 | break; | 913 | break; |
906 | case 15: | 914 | case 15: |
907 | PRINT_WARN("%s: No IUCV authorization in CP directory.\n", | 915 | dev_err(privptr->dev, |
908 | conn->netdev->name); | 916 | "The IUCV device cannot connect to a z/VM guest with no" |
917 | " IUCV authorization\n"); | ||
909 | fsm_newstate(fi, CONN_STATE_CONNERR); | 918 | fsm_newstate(fi, CONN_STATE_CONNERR); |
910 | break; | 919 | break; |
911 | default: | 920 | default: |
912 | PRINT_WARN("%s: iucv_connect returned error %d\n", | 921 | dev_err(privptr->dev, |
913 | conn->netdev->name, rc); | 922 | "Connecting the IUCV device failed with error %d\n", |
923 | rc); | ||
914 | fsm_newstate(fi, CONN_STATE_CONNERR); | 924 | fsm_newstate(fi, CONN_STATE_CONNERR); |
915 | break; | 925 | break; |
916 | } | 926 | } |
@@ -1059,8 +1069,9 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) | |||
1059 | switch (fsm_getstate(fi)) { | 1069 | switch (fsm_getstate(fi)) { |
1060 | case DEV_STATE_STARTWAIT: | 1070 | case DEV_STATE_STARTWAIT: |
1061 | fsm_newstate(fi, DEV_STATE_RUNNING); | 1071 | fsm_newstate(fi, DEV_STATE_RUNNING); |
1062 | PRINT_INFO("%s: connected with remote side %s\n", | 1072 | dev_info(privptr->dev, |
1063 | dev->name, privptr->conn->userid); | 1073 | "The IUCV device has been connected" |
1074 | " successfully to %s\n", privptr->conn->userid); | ||
1064 | IUCV_DBF_TEXT(setup, 3, | 1075 | IUCV_DBF_TEXT(setup, 3, |
1065 | "connection is up and running\n"); | 1076 | "connection is up and running\n"); |
1066 | break; | 1077 | break; |
@@ -1982,6 +1993,8 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1982 | if (rc) | 1993 | if (rc) |
1983 | goto out_unreg; | 1994 | goto out_unreg; |
1984 | 1995 | ||
1996 | dev_info(priv->dev, "The IUCV interface to %s has been" | ||
1997 | " established successfully\n", netiucv_printname(username)); | ||
1985 | 1998 | ||
1986 | return count; | 1999 | return count; |
1987 | 2000 | ||
@@ -2027,10 +2040,9 @@ static ssize_t remove_write (struct device_driver *drv, | |||
2027 | continue; | 2040 | continue; |
2028 | read_unlock_bh(&iucv_connection_rwlock); | 2041 | read_unlock_bh(&iucv_connection_rwlock); |
2029 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 2042 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
2030 | PRINT_WARN("netiucv: net device %s active with peer " | 2043 | dev_warn(dev, "The IUCV device is connected" |
2031 | "%s\n", ndev->name, priv->conn->userid); | 2044 | " to %s and cannot be removed\n", |
2032 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2045 | priv->conn->userid); |
2033 | ndev->name); | ||
2034 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2046 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
2035 | return -EPERM; | 2047 | return -EPERM; |
2036 | } | 2048 | } |
@@ -2062,7 +2074,7 @@ static struct attribute_group *netiucv_drv_attr_groups[] = { | |||
2062 | 2074 | ||
2063 | static void netiucv_banner(void) | 2075 | static void netiucv_banner(void) |
2064 | { | 2076 | { |
2065 | PRINT_INFO("NETIUCV driver initialized\n"); | 2077 | pr_info("driver initialized\n"); |
2066 | } | 2078 | } |
2067 | 2079 | ||
2068 | static void __exit netiucv_exit(void) | 2080 | static void __exit netiucv_exit(void) |
@@ -2088,7 +2100,7 @@ static void __exit netiucv_exit(void) | |||
2088 | iucv_unregister(&netiucv_handler, 1); | 2100 | iucv_unregister(&netiucv_handler, 1); |
2089 | iucv_unregister_dbf_views(); | 2101 | iucv_unregister_dbf_views(); |
2090 | 2102 | ||
2091 | PRINT_INFO("NETIUCV driver unloaded\n"); | 2103 | pr_info("driver unloaded\n"); |
2092 | return; | 2104 | return; |
2093 | } | 2105 | } |
2094 | 2106 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index af6d60458513..d5ccce1643e4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -31,11 +31,10 @@ | |||
31 | #include <asm/qdio.h> | 31 | #include <asm/qdio.h> |
32 | #include <asm/ccwdev.h> | 32 | #include <asm/ccwdev.h> |
33 | #include <asm/ccwgroup.h> | 33 | #include <asm/ccwgroup.h> |
34 | #include <asm/sysinfo.h> | ||
34 | 35 | ||
35 | #include "qeth_core_mpc.h" | 36 | #include "qeth_core_mpc.h" |
36 | 37 | ||
37 | #define KMSG_COMPONENT "qeth" | ||
38 | |||
39 | /** | 38 | /** |
40 | * Debug Facility stuff | 39 | * Debug Facility stuff |
41 | */ | 40 | */ |
@@ -74,11 +73,6 @@ struct qeth_dbf_info { | |||
74 | #define QETH_DBF_TEXT_(name, level, text...) \ | 73 | #define QETH_DBF_TEXT_(name, level, text...) \ |
75 | qeth_dbf_longtext(QETH_DBF_##name, level, text) | 74 | qeth_dbf_longtext(QETH_DBF_##name, level, text) |
76 | 75 | ||
77 | /** | ||
78 | * some more debug stuff | ||
79 | */ | ||
80 | #define PRINTK_HEADER "qeth: " | ||
81 | |||
82 | #define SENSE_COMMAND_REJECT_BYTE 0 | 76 | #define SENSE_COMMAND_REJECT_BYTE 0 |
83 | #define SENSE_COMMAND_REJECT_FLAG 0x80 | 77 | #define SENSE_COMMAND_REJECT_FLAG 0x80 |
84 | #define SENSE_RESETTING_EVENT_BYTE 1 | 78 | #define SENSE_RESETTING_EVENT_BYTE 1 |
@@ -733,6 +727,7 @@ struct qeth_card { | |||
733 | struct qeth_osn_info osn_info; | 727 | struct qeth_osn_info osn_info; |
734 | struct qeth_discipline discipline; | 728 | struct qeth_discipline discipline; |
735 | atomic_t force_alloc_skb; | 729 | atomic_t force_alloc_skb; |
730 | struct service_level qeth_service_level; | ||
736 | }; | 731 | }; |
737 | 732 | ||
738 | struct qeth_card_list_struct { | 733 | struct qeth_card_list_struct { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 52d26592c72c..e783644a2105 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -319,7 +322,10 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
319 | return -EIO; | 322 | return -EIO; |
320 | iob = qeth_get_buffer(&card->read); | 323 | iob = qeth_get_buffer(&card->read); |
321 | if (!iob) { | 324 | if (!iob) { |
322 | PRINT_WARN("issue_next_read failed: no iob available!\n"); | 325 | dev_warn(&card->gdev->dev, "The qeth device driver " |
326 | "failed to recover an error on the device\n"); | ||
327 | QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " | ||
328 | "available\n", dev_name(&card->gdev->dev)); | ||
323 | return -ENOMEM; | 329 | return -ENOMEM; |
324 | } | 330 | } |
325 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 331 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
@@ -327,7 +333,8 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
327 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 333 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
328 | (addr_t) iob, 0, 0); | 334 | (addr_t) iob, 0, 0); |
329 | if (rc) { | 335 | if (rc) { |
330 | PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc); | 336 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " |
337 | "rc=%i\n", dev_name(&card->gdev->dev), rc); | ||
331 | atomic_set(&card->read.irq_pending, 0); | 338 | atomic_set(&card->read.irq_pending, 0); |
332 | qeth_schedule_recovery(card); | 339 | qeth_schedule_recovery(card); |
333 | wake_up(&card->wait_q); | 340 | wake_up(&card->wait_q); |
@@ -393,10 +400,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
393 | } else { | 400 | } else { |
394 | switch (cmd->hdr.command) { | 401 | switch (cmd->hdr.command) { |
395 | case IPA_CMD_STOPLAN: | 402 | case IPA_CMD_STOPLAN: |
396 | PRINT_WARN("Link failure on %s (CHPID 0x%X) - " | 403 | dev_warn(&card->gdev->dev, |
397 | "there is a network problem or " | 404 | "The link for interface %s on CHPID" |
398 | "someone pulled the cable or " | 405 | " 0x%X failed\n", |
399 | "disabled the port.\n", | ||
400 | QETH_CARD_IFNAME(card), | 406 | QETH_CARD_IFNAME(card), |
401 | card->info.chpid); | 407 | card->info.chpid); |
402 | card->lan_online = 0; | 408 | card->lan_online = 0; |
@@ -404,9 +410,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
404 | netif_carrier_off(card->dev); | 410 | netif_carrier_off(card->dev); |
405 | return NULL; | 411 | return NULL; |
406 | case IPA_CMD_STARTLAN: | 412 | case IPA_CMD_STARTLAN: |
407 | PRINT_INFO("Link reestablished on %s " | 413 | dev_info(&card->gdev->dev, |
408 | "(CHPID 0x%X). Scheduling " | 414 | "The link for %s on CHPID 0x%X has" |
409 | "IP address reset.\n", | 415 | " been restored\n", |
410 | QETH_CARD_IFNAME(card), | 416 | QETH_CARD_IFNAME(card), |
411 | card->info.chpid); | 417 | card->info.chpid); |
412 | netif_carrier_on(card->dev); | 418 | netif_carrier_on(card->dev); |
@@ -458,7 +464,7 @@ static int qeth_check_idx_response(unsigned char *buffer) | |||
458 | 464 | ||
459 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); | 465 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); |
460 | if ((buffer[2] & 0xc0) == 0xc0) { | 466 | if ((buffer[2] & 0xc0) == 0xc0) { |
461 | PRINT_WARN("received an IDX TERMINATE " | 467 | QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " |
462 | "with cause code 0x%02x%s\n", | 468 | "with cause code 0x%02x%s\n", |
463 | buffer[4], | 469 | buffer[4], |
464 | ((buffer[4] == 0x22) ? | 470 | ((buffer[4] == 0x22) ? |
@@ -744,8 +750,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
744 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 750 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
745 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { | 751 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { |
746 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); | 752 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); |
747 | PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", | 753 | dev_warn(&cdev->dev, "The qeth device driver " |
748 | dev_name(&cdev->dev), dstat, cstat); | 754 | "failed to recover an error on the device\n"); |
755 | QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ", | ||
756 | dev_name(&cdev->dev), dstat, cstat); | ||
749 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, | 757 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, |
750 | 16, 1, irb, 64, 1); | 758 | 16, 1, irb, 64, 1); |
751 | return 1; | 759 | return 1; |
@@ -784,12 +792,14 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
784 | 792 | ||
785 | switch (PTR_ERR(irb)) { | 793 | switch (PTR_ERR(irb)) { |
786 | case -EIO: | 794 | case -EIO: |
787 | PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); | 795 | QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", |
796 | dev_name(&cdev->dev)); | ||
788 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 797 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
789 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); | 798 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); |
790 | break; | 799 | break; |
791 | case -ETIMEDOUT: | 800 | case -ETIMEDOUT: |
792 | PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); | 801 | dev_warn(&cdev->dev, "A hardware operation timed out" |
802 | " on the device\n"); | ||
793 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 803 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
794 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); | 804 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); |
795 | if (intparm == QETH_RCD_PARM) { | 805 | if (intparm == QETH_RCD_PARM) { |
@@ -802,8 +812,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
802 | } | 812 | } |
803 | break; | 813 | break; |
804 | default: | 814 | default: |
805 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 815 | QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", |
806 | dev_name(&cdev->dev)); | 816 | dev_name(&cdev->dev), PTR_ERR(irb)); |
807 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 817 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
808 | QETH_DBF_TEXT(TRACE, 2, " rc???"); | 818 | QETH_DBF_TEXT(TRACE, 2, " rc???"); |
809 | } | 819 | } |
@@ -869,10 +879,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
869 | (dstat & DEV_STAT_UNIT_CHECK) || | 879 | (dstat & DEV_STAT_UNIT_CHECK) || |
870 | (cstat)) { | 880 | (cstat)) { |
871 | if (irb->esw.esw0.erw.cons) { | 881 | if (irb->esw.esw0.erw.cons) { |
872 | /* TODO: we should make this s390dbf */ | 882 | dev_warn(&channel->ccwdev->dev, |
873 | PRINT_WARN("sense data available on channel %s.\n", | 883 | "The qeth device driver failed to recover " |
874 | CHANNEL_ID(channel)); | 884 | "an error on the device\n"); |
875 | PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat); | 885 | QETH_DBF_MESSAGE(2, "%s sense data available. cstat " |
886 | "0x%X dstat 0x%X\n", | ||
887 | dev_name(&channel->ccwdev->dev), cstat, dstat); | ||
876 | print_hex_dump(KERN_WARNING, "qeth: irb ", | 888 | print_hex_dump(KERN_WARNING, "qeth: irb ", |
877 | DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); | 889 | DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); |
878 | print_hex_dump(KERN_WARNING, "qeth: sense data ", | 890 | print_hex_dump(KERN_WARNING, "qeth: sense data ", |
@@ -1138,6 +1150,14 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1138 | return 0; | 1150 | return 0; |
1139 | } | 1151 | } |
1140 | 1152 | ||
1153 | static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) | ||
1154 | { | ||
1155 | struct qeth_card *card = container_of(slr, struct qeth_card, | ||
1156 | qeth_service_level); | ||
1157 | seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), | ||
1158 | card->info.mcl_level); | ||
1159 | } | ||
1160 | |||
1141 | static struct qeth_card *qeth_alloc_card(void) | 1161 | static struct qeth_card *qeth_alloc_card(void) |
1142 | { | 1162 | { |
1143 | struct qeth_card *card; | 1163 | struct qeth_card *card; |
@@ -1157,6 +1177,8 @@ static struct qeth_card *qeth_alloc_card(void) | |||
1157 | return NULL; | 1177 | return NULL; |
1158 | } | 1178 | } |
1159 | card->options.layer2 = -1; | 1179 | card->options.layer2 = -1; |
1180 | card->qeth_service_level.seq_print = qeth_core_sl_print; | ||
1181 | register_service_level(&card->qeth_service_level); | ||
1160 | return card; | 1182 | return card; |
1161 | } | 1183 | } |
1162 | 1184 | ||
@@ -1175,8 +1197,8 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1175 | card->qdio.no_out_queues = known_devices[i][8]; | 1197 | card->qdio.no_out_queues = known_devices[i][8]; |
1176 | card->info.is_multicast_different = known_devices[i][9]; | 1198 | card->info.is_multicast_different = known_devices[i][9]; |
1177 | if (qeth_is_1920_device(card)) { | 1199 | if (qeth_is_1920_device(card)) { |
1178 | PRINT_INFO("Priority Queueing not able " | 1200 | dev_info(&card->gdev->dev, |
1179 | "due to hardware limitations!\n"); | 1201 | "Priority Queueing not supported\n"); |
1180 | card->qdio.no_out_queues = 1; | 1202 | card->qdio.no_out_queues = 1; |
1181 | card->qdio.default_out_queue = 0; | 1203 | card->qdio.default_out_queue = 0; |
1182 | } | 1204 | } |
@@ -1185,7 +1207,8 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1185 | i++; | 1207 | i++; |
1186 | } | 1208 | } |
1187 | card->info.type = QETH_CARD_TYPE_UNKNOWN; | 1209 | card->info.type = QETH_CARD_TYPE_UNKNOWN; |
1188 | PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card)); | 1210 | dev_err(&card->gdev->dev, "The adapter hardware is of an " |
1211 | "unknown type\n"); | ||
1189 | return -ENOENT; | 1212 | return -ENOENT; |
1190 | } | 1213 | } |
1191 | 1214 | ||
@@ -1368,8 +1391,8 @@ static int qeth_get_unitaddr(struct qeth_card *card) | |||
1368 | QETH_DBF_TEXT(SETUP, 2, "getunit"); | 1391 | QETH_DBF_TEXT(SETUP, 2, "getunit"); |
1369 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); | 1392 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); |
1370 | if (rc) { | 1393 | if (rc) { |
1371 | PRINT_ERR("qeth_read_conf_data for device %s returned %i\n", | 1394 | QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", |
1372 | CARD_DDEV_ID(card), rc); | 1395 | dev_name(&card->gdev->dev), rc); |
1373 | return rc; | 1396 | return rc; |
1374 | } | 1397 | } |
1375 | card->info.chpid = prcd[30]; | 1398 | card->info.chpid = prcd[30]; |
@@ -1519,7 +1542,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1519 | if (rc == -ERESTARTSYS) | 1542 | if (rc == -ERESTARTSYS) |
1520 | return rc; | 1543 | return rc; |
1521 | if (channel->state != CH_STATE_ACTIVATING) { | 1544 | if (channel->state != CH_STATE_ACTIVATING) { |
1522 | PRINT_WARN("IDX activate timed out!\n"); | 1545 | dev_warn(&channel->ccwdev->dev, "The qeth device driver" |
1546 | " failed to recover an error on the device\n"); | ||
1547 | QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", | ||
1548 | dev_name(&channel->ccwdev->dev)); | ||
1523 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); | 1549 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); |
1524 | qeth_clear_cmd_buffers(channel); | 1550 | qeth_clear_cmd_buffers(channel); |
1525 | return -ETIME; | 1551 | return -ETIME; |
@@ -1552,20 +1578,21 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, | |||
1552 | 1578 | ||
1553 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1579 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1554 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) | 1580 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1555 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | 1581 | dev_err(&card->write.ccwdev->dev, |
1556 | "adapter exclusively used by another host\n", | 1582 | "The adapter is used exclusively by another " |
1557 | CARD_WDEV_ID(card)); | 1583 | "host\n"); |
1558 | else | 1584 | else |
1559 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | 1585 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" |
1560 | "negative reply\n", CARD_WDEV_ID(card)); | 1586 | " negative reply\n", |
1587 | dev_name(&card->write.ccwdev->dev)); | ||
1561 | goto out; | 1588 | goto out; |
1562 | } | 1589 | } |
1563 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1590 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1564 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { | 1591 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { |
1565 | PRINT_WARN("IDX_ACTIVATE on write channel device %s: " | 1592 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " |
1566 | "function level mismatch " | 1593 | "function level mismatch (sent: 0x%x, received: " |
1567 | "(sent: 0x%x, received: 0x%x)\n", | 1594 | "0x%x)\n", dev_name(&card->write.ccwdev->dev), |
1568 | CARD_WDEV_ID(card), card->info.func_level, temp); | 1595 | card->info.func_level, temp); |
1569 | goto out; | 1596 | goto out; |
1570 | } | 1597 | } |
1571 | channel->state = CH_STATE_UP; | 1598 | channel->state = CH_STATE_UP; |
@@ -1591,12 +1618,13 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1591 | 1618 | ||
1592 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1619 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1593 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) | 1620 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1594 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | 1621 | dev_err(&card->write.ccwdev->dev, |
1595 | "adapter exclusively used by another host\n", | 1622 | "The adapter is used exclusively by another " |
1596 | CARD_RDEV_ID(card)); | 1623 | "host\n"); |
1597 | else | 1624 | else |
1598 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | 1625 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" |
1599 | "negative reply\n", CARD_RDEV_ID(card)); | 1626 | " negative reply\n", |
1627 | dev_name(&card->read.ccwdev->dev)); | ||
1600 | goto out; | 1628 | goto out; |
1601 | } | 1629 | } |
1602 | 1630 | ||
@@ -1610,9 +1638,10 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1610 | 1638 | ||
1611 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1639 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1612 | if (temp != qeth_peer_func_level(card->info.func_level)) { | 1640 | if (temp != qeth_peer_func_level(card->info.func_level)) { |
1613 | PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " | 1641 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " |
1614 | "level mismatch (sent: 0x%x, received: 0x%x)\n", | 1642 | "level mismatch (sent: 0x%x, received: 0x%x)\n", |
1615 | CARD_RDEV_ID(card), card->info.func_level, temp); | 1643 | dev_name(&card->read.ccwdev->dev), |
1644 | card->info.func_level, temp); | ||
1616 | goto out; | 1645 | goto out; |
1617 | } | 1646 | } |
1618 | memcpy(&card->token.issuer_rm_r, | 1647 | memcpy(&card->token.issuer_rm_r, |
@@ -1686,8 +1715,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1686 | (addr_t) iob, 0, 0); | 1715 | (addr_t) iob, 0, 0); |
1687 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); | 1716 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); |
1688 | if (rc) { | 1717 | if (rc) { |
1689 | PRINT_WARN("qeth_send_control_data: " | 1718 | QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " |
1690 | "ccw_device_start rc = %i\n", rc); | 1719 | "ccw_device_start rc = %i\n", |
1720 | dev_name(&card->write.ccwdev->dev), rc); | ||
1691 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1721 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1692 | spin_lock_irqsave(&card->lock, flags); | 1722 | spin_lock_irqsave(&card->lock, flags); |
1693 | list_del_init(&reply->list); | 1723 | list_del_init(&reply->list); |
@@ -2170,11 +2200,8 @@ static void qeth_print_status_with_portname(struct qeth_card *card) | |||
2170 | dbf_text[i] = | 2200 | dbf_text[i] = |
2171 | (char) _ebcasc[(__u8) dbf_text[i]]; | 2201 | (char) _ebcasc[(__u8) dbf_text[i]]; |
2172 | dbf_text[8] = 0; | 2202 | dbf_text[8] = 0; |
2173 | PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n" | 2203 | dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" |
2174 | "with link type %s (portname: %s)\n", | 2204 | "with link type %s (portname: %s)\n", |
2175 | CARD_RDEV_ID(card), | ||
2176 | CARD_WDEV_ID(card), | ||
2177 | CARD_DDEV_ID(card), | ||
2178 | qeth_get_cardname(card), | 2205 | qeth_get_cardname(card), |
2179 | (card->info.mcl_level[0]) ? " (level: " : "", | 2206 | (card->info.mcl_level[0]) ? " (level: " : "", |
2180 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2207 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
@@ -2187,23 +2214,17 @@ static void qeth_print_status_with_portname(struct qeth_card *card) | |||
2187 | static void qeth_print_status_no_portname(struct qeth_card *card) | 2214 | static void qeth_print_status_no_portname(struct qeth_card *card) |
2188 | { | 2215 | { |
2189 | if (card->info.portname[0]) | 2216 | if (card->info.portname[0]) |
2190 | PRINT_INFO("Device %s/%s/%s is a%s " | 2217 | dev_info(&card->gdev->dev, "Device is a%s " |
2191 | "card%s%s%s\nwith link type %s " | 2218 | "card%s%s%s\nwith link type %s " |
2192 | "(no portname needed by interface).\n", | 2219 | "(no portname needed by interface).\n", |
2193 | CARD_RDEV_ID(card), | ||
2194 | CARD_WDEV_ID(card), | ||
2195 | CARD_DDEV_ID(card), | ||
2196 | qeth_get_cardname(card), | 2220 | qeth_get_cardname(card), |
2197 | (card->info.mcl_level[0]) ? " (level: " : "", | 2221 | (card->info.mcl_level[0]) ? " (level: " : "", |
2198 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2222 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
2199 | (card->info.mcl_level[0]) ? ")" : "", | 2223 | (card->info.mcl_level[0]) ? ")" : "", |
2200 | qeth_get_cardname_short(card)); | 2224 | qeth_get_cardname_short(card)); |
2201 | else | 2225 | else |
2202 | PRINT_INFO("Device %s/%s/%s is a%s " | 2226 | dev_info(&card->gdev->dev, "Device is a%s " |
2203 | "card%s%s%s\nwith link type %s.\n", | 2227 | "card%s%s%s\nwith link type %s.\n", |
2204 | CARD_RDEV_ID(card), | ||
2205 | CARD_WDEV_ID(card), | ||
2206 | CARD_DDEV_ID(card), | ||
2207 | qeth_get_cardname(card), | 2228 | qeth_get_cardname(card), |
2208 | (card->info.mcl_level[0]) ? " (level: " : "", | 2229 | (card->info.mcl_level[0]) ? " (level: " : "", |
2209 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2230 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
@@ -2325,7 +2346,6 @@ static int qeth_init_input_buffer(struct qeth_card *card, | |||
2325 | * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off | 2346 | * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off |
2326 | * buffers | 2347 | * buffers |
2327 | */ | 2348 | */ |
2328 | BUG_ON(!pool_entry); | ||
2329 | 2349 | ||
2330 | buf->pool_entry = pool_entry; | 2350 | buf->pool_entry = pool_entry; |
2331 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { | 2351 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { |
@@ -2630,9 +2650,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2630 | qeth_get_micros() - | 2650 | qeth_get_micros() - |
2631 | card->perf_stats.inbound_do_qdio_start_time; | 2651 | card->perf_stats.inbound_do_qdio_start_time; |
2632 | if (rc) { | 2652 | if (rc) { |
2633 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " | 2653 | dev_warn(&card->gdev->dev, |
2634 | "return %i (device %s).\n", | 2654 | "QDIO reported an error, rc=%i\n", rc); |
2635 | rc, CARD_DDEV_ID(card)); | ||
2636 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); | 2655 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); |
2637 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | 2656 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
2638 | } | 2657 | } |
@@ -3730,6 +3749,7 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
3730 | free_netdev(card->dev); | 3749 | free_netdev(card->dev); |
3731 | kfree(card->ip_tbd_list); | 3750 | kfree(card->ip_tbd_list); |
3732 | qeth_free_qdio_buffers(card); | 3751 | qeth_free_qdio_buffers(card); |
3752 | unregister_service_level(&card->qeth_service_level); | ||
3733 | kfree(card); | 3753 | kfree(card); |
3734 | } | 3754 | } |
3735 | 3755 | ||
@@ -3757,7 +3777,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, | |||
3757 | 3777 | ||
3758 | int qeth_core_hardsetup_card(struct qeth_card *card) | 3778 | int qeth_core_hardsetup_card(struct qeth_card *card) |
3759 | { | 3779 | { |
3760 | struct qdio_ssqd_desc *qdio_ssqd; | 3780 | struct qdio_ssqd_desc *ssqd; |
3761 | int retries = 3; | 3781 | int retries = 3; |
3762 | int mpno = 0; | 3782 | int mpno = 0; |
3763 | int rc; | 3783 | int rc; |
@@ -3766,7 +3786,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card) | |||
3766 | atomic_set(&card->force_alloc_skb, 0); | 3786 | atomic_set(&card->force_alloc_skb, 0); |
3767 | retry: | 3787 | retry: |
3768 | if (retries < 3) { | 3788 | if (retries < 3) { |
3769 | PRINT_WARN("Retrying to do IDX activates.\n"); | 3789 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", |
3790 | dev_name(&card->gdev->dev)); | ||
3770 | ccw_device_set_offline(CARD_DDEV(card)); | 3791 | ccw_device_set_offline(CARD_DDEV(card)); |
3771 | ccw_device_set_offline(CARD_WDEV(card)); | 3792 | ccw_device_set_offline(CARD_WDEV(card)); |
3772 | ccw_device_set_offline(CARD_RDEV(card)); | 3793 | ccw_device_set_offline(CARD_RDEV(card)); |
@@ -3792,9 +3813,16 @@ retry: | |||
3792 | return rc; | 3813 | return rc; |
3793 | } | 3814 | } |
3794 | 3815 | ||
3795 | qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card)); | 3816 | ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL); |
3796 | if (qdio_ssqd) | 3817 | if (!ssqd) { |
3797 | mpno = qdio_ssqd->pcnt; | 3818 | rc = -ENOMEM; |
3819 | goto out; | ||
3820 | } | ||
3821 | rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd); | ||
3822 | if (rc == 0) | ||
3823 | mpno = ssqd->pcnt; | ||
3824 | kfree(ssqd); | ||
3825 | |||
3798 | if (mpno) | 3826 | if (mpno) |
3799 | mpno = min(mpno - 1, QETH_MAX_PORTNO); | 3827 | mpno = min(mpno - 1, QETH_MAX_PORTNO); |
3800 | if (card->info.portno > mpno) { | 3828 | if (card->info.portno > mpno) { |
@@ -3834,7 +3862,10 @@ retry: | |||
3834 | } | 3862 | } |
3835 | return 0; | 3863 | return 0; |
3836 | out: | 3864 | out: |
3837 | PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc); | 3865 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " |
3866 | "an error on the device\n"); | ||
3867 | QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", | ||
3868 | dev_name(&card->gdev->dev), rc); | ||
3838 | return rc; | 3869 | return rc; |
3839 | } | 3870 | } |
3840 | EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); | 3871 | EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); |
@@ -4054,8 +4085,8 @@ int qeth_core_load_discipline(struct qeth_card *card, | |||
4054 | break; | 4085 | break; |
4055 | } | 4086 | } |
4056 | if (!card->discipline.ccwgdriver) { | 4087 | if (!card->discipline.ccwgdriver) { |
4057 | PRINT_ERR("Support for discipline %d not present\n", | 4088 | dev_err(&card->gdev->dev, "There is no kernel module to " |
4058 | discipline); | 4089 | "support discipline %d\n", discipline); |
4059 | rc = -EINVAL; | 4090 | rc = -EINVAL; |
4060 | } | 4091 | } |
4061 | return rc; | 4092 | return rc; |
@@ -4448,7 +4479,7 @@ static int __init qeth_core_init(void) | |||
4448 | { | 4479 | { |
4449 | int rc; | 4480 | int rc; |
4450 | 4481 | ||
4451 | PRINT_INFO("loading core functions\n"); | 4482 | pr_info("loading core functions\n"); |
4452 | INIT_LIST_HEAD(&qeth_core_card_list.list); | 4483 | INIT_LIST_HEAD(&qeth_core_card_list.list); |
4453 | rwlock_init(&qeth_core_card_list.rwlock); | 4484 | rwlock_init(&qeth_core_card_list.rwlock); |
4454 | 4485 | ||
@@ -4488,9 +4519,10 @@ driver_err: | |||
4488 | ccwgroup_err: | 4519 | ccwgroup_err: |
4489 | ccw_driver_unregister(&qeth_ccw_driver); | 4520 | ccw_driver_unregister(&qeth_ccw_driver); |
4490 | ccw_err: | 4521 | ccw_err: |
4522 | QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc); | ||
4491 | qeth_unregister_dbf_views(); | 4523 | qeth_unregister_dbf_views(); |
4492 | out_err: | 4524 | out_err: |
4493 | PRINT_ERR("Initialization failed with code %d\n", rc); | 4525 | pr_err("Initializing the qeth device driver failed\n"); |
4494 | return rc; | 4526 | return rc; |
4495 | } | 4527 | } |
4496 | 4528 | ||
@@ -4503,7 +4535,7 @@ static void __exit qeth_core_exit(void) | |||
4503 | ccw_driver_unregister(&qeth_ccw_driver); | 4535 | ccw_driver_unregister(&qeth_ccw_driver); |
4504 | kmem_cache_destroy(qeth_core_header_cache); | 4536 | kmem_cache_destroy(qeth_core_header_cache); |
4505 | qeth_unregister_dbf_views(); | 4537 | qeth_unregister_dbf_views(); |
4506 | PRINT_INFO("core functions removed\n"); | 4538 | pr_info("core functions removed\n"); |
4507 | } | 4539 | } |
4508 | 4540 | ||
4509 | module_init(qeth_core_init); | 4541 | module_init(qeth_core_init); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 1b1e80336d2c..af15bc648ba1 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -503,12 +506,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
503 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | 506 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; |
504 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, | 507 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, |
505 | OSA_ADDR_LEN); | 508 | OSA_ADDR_LEN); |
506 | PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " | 509 | dev_info(&card->gdev->dev, |
507 | "successfully registered on device %s\n", | 510 | "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " |
508 | card->dev->dev_addr[0], card->dev->dev_addr[1], | 511 | "successfully registered on device %s\n", |
509 | card->dev->dev_addr[2], card->dev->dev_addr[3], | 512 | card->dev->dev_addr[0], card->dev->dev_addr[1], |
510 | card->dev->dev_addr[4], card->dev->dev_addr[5], | 513 | card->dev->dev_addr[2], card->dev->dev_addr[3], |
511 | card->dev->name); | 514 | card->dev->dev_addr[4], card->dev->dev_addr[5], |
515 | card->dev->name); | ||
512 | } | 516 | } |
513 | return 0; | 517 | return 0; |
514 | } | 518 | } |
@@ -1015,9 +1019,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1015 | if (rc) { | 1019 | if (rc) { |
1016 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1020 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1017 | if (rc == 0xe080) { | 1021 | if (rc == 0xe080) { |
1018 | PRINT_WARN("LAN on card %s if offline! " | 1022 | dev_warn(&card->gdev->dev, |
1019 | "Waiting for STARTLAN from card.\n", | 1023 | "The LAN is offline\n"); |
1020 | CARD_BUS_ID(card)); | ||
1021 | card->lan_online = 0; | 1024 | card->lan_online = 0; |
1022 | } | 1025 | } |
1023 | return rc; | 1026 | return rc; |
@@ -1117,8 +1120,8 @@ static int qeth_l2_recover(void *ptr) | |||
1117 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 1120 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
1118 | return 0; | 1121 | return 0; |
1119 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 1122 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
1120 | PRINT_WARN("Recovery of device %s started ...\n", | 1123 | dev_warn(&card->gdev->dev, |
1121 | CARD_BUS_ID(card)); | 1124 | "A recovery process has been started for the device\n"); |
1122 | card->use_hard_stop = 1; | 1125 | card->use_hard_stop = 1; |
1123 | __qeth_l2_set_offline(card->gdev, 1); | 1126 | __qeth_l2_set_offline(card->gdev, 1); |
1124 | rc = __qeth_l2_set_online(card->gdev, 1); | 1127 | rc = __qeth_l2_set_online(card->gdev, 1); |
@@ -1126,27 +1129,27 @@ static int qeth_l2_recover(void *ptr) | |||
1126 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1129 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1127 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1130 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1128 | if (!rc) | 1131 | if (!rc) |
1129 | PRINT_INFO("Device %s successfully recovered!\n", | 1132 | dev_info(&card->gdev->dev, |
1130 | CARD_BUS_ID(card)); | 1133 | "Device successfully recovered!\n"); |
1131 | else { | 1134 | else { |
1132 | rtnl_lock(); | 1135 | rtnl_lock(); |
1133 | dev_close(card->dev); | 1136 | dev_close(card->dev); |
1134 | rtnl_unlock(); | 1137 | rtnl_unlock(); |
1135 | PRINT_INFO("Device %s could not be recovered!\n", | 1138 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1136 | CARD_BUS_ID(card)); | 1139 | "failed to recover an error on the device\n"); |
1137 | } | 1140 | } |
1138 | return 0; | 1141 | return 0; |
1139 | } | 1142 | } |
1140 | 1143 | ||
1141 | static int __init qeth_l2_init(void) | 1144 | static int __init qeth_l2_init(void) |
1142 | { | 1145 | { |
1143 | PRINT_INFO("register layer 2 discipline\n"); | 1146 | pr_info("register layer 2 discipline\n"); |
1144 | return 0; | 1147 | return 0; |
1145 | } | 1148 | } |
1146 | 1149 | ||
1147 | static void __exit qeth_l2_exit(void) | 1150 | static void __exit qeth_l2_exit(void) |
1148 | { | 1151 | { |
1149 | PRINT_INFO("unregister layer 2 discipline\n"); | 1152 | pr_info("unregister layer 2 discipline\n"); |
1150 | } | 1153 | } |
1151 | 1154 | ||
1152 | static void qeth_l2_shutdown(struct ccwgroup_device *gdev) | 1155 | static void qeth_l2_shutdown(struct ccwgroup_device *gdev) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ed59fedd5922..c0b30b25a5f1 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -917,8 +920,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
917 | if (rc) { | 920 | if (rc) { |
918 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); | 921 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); |
919 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | 922 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); |
920 | PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", | 923 | dev_warn(&card->gdev->dev, |
921 | buf, rc, rc); | 924 | "Registering IP address %s failed\n", buf); |
922 | } | 925 | } |
923 | return rc; | 926 | return rc; |
924 | } | 927 | } |
@@ -1029,24 +1032,22 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) | |||
1029 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); | 1032 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); |
1030 | 1033 | ||
1031 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { | 1034 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
1032 | PRINT_WARN("set adapter parameters not supported " | 1035 | dev_info(&card->gdev->dev, |
1033 | "on device %s.\n", | 1036 | "set adapter parameters not supported.\n"); |
1034 | CARD_BUS_ID(card)); | ||
1035 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); | 1037 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); |
1036 | return 0; | 1038 | return 0; |
1037 | } | 1039 | } |
1038 | rc = qeth_query_setadapterparms(card); | 1040 | rc = qeth_query_setadapterparms(card); |
1039 | if (rc) { | 1041 | if (rc) { |
1040 | PRINT_WARN("couldn't set adapter parameters on device %s: " | 1042 | QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " |
1041 | "x%x\n", CARD_BUS_ID(card), rc); | 1043 | "0x%x\n", card->gdev->dev.bus_id, rc); |
1042 | return rc; | 1044 | return rc; |
1043 | } | 1045 | } |
1044 | if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { | 1046 | if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { |
1045 | rc = qeth_setadpparms_change_macaddr(card); | 1047 | rc = qeth_setadpparms_change_macaddr(card); |
1046 | if (rc) | 1048 | if (rc) |
1047 | PRINT_WARN("couldn't get MAC address on " | 1049 | dev_warn(&card->gdev->dev, "Reading the adapter MAC" |
1048 | "device %s: x%x\n", | 1050 | " address failed\n", rc); |
1049 | CARD_BUS_ID(card), rc); | ||
1050 | } | 1051 | } |
1051 | 1052 | ||
1052 | if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 1053 | if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
@@ -1160,16 +1161,17 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) | |||
1160 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); | 1161 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); |
1161 | 1162 | ||
1162 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 1163 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
1163 | PRINT_WARN("ARP processing not supported " | 1164 | dev_info(&card->gdev->dev, |
1164 | "on %s!\n", QETH_CARD_IFNAME(card)); | 1165 | "ARP processing not supported on %s!\n", |
1166 | QETH_CARD_IFNAME(card)); | ||
1165 | return 0; | 1167 | return 0; |
1166 | } | 1168 | } |
1167 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 1169 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
1168 | IPA_CMD_ASS_START, 0); | 1170 | IPA_CMD_ASS_START, 0); |
1169 | if (rc) { | 1171 | if (rc) { |
1170 | PRINT_WARN("Could not start ARP processing " | 1172 | dev_warn(&card->gdev->dev, |
1171 | "assist on %s: 0x%x\n", | 1173 | "Starting ARP processing support for %s failed\n", |
1172 | QETH_CARD_IFNAME(card), rc); | 1174 | QETH_CARD_IFNAME(card)); |
1173 | } | 1175 | } |
1174 | return rc; | 1176 | return rc; |
1175 | } | 1177 | } |
@@ -1181,19 +1183,21 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) | |||
1181 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); | 1183 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); |
1182 | 1184 | ||
1183 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { | 1185 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { |
1184 | PRINT_INFO("Hardware IP fragmentation not supported on %s\n", | 1186 | dev_info(&card->gdev->dev, |
1185 | QETH_CARD_IFNAME(card)); | 1187 | "Hardware IP fragmentation not supported on %s\n", |
1188 | QETH_CARD_IFNAME(card)); | ||
1186 | return -EOPNOTSUPP; | 1189 | return -EOPNOTSUPP; |
1187 | } | 1190 | } |
1188 | 1191 | ||
1189 | rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, | 1192 | rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, |
1190 | IPA_CMD_ASS_START, 0); | 1193 | IPA_CMD_ASS_START, 0); |
1191 | if (rc) { | 1194 | if (rc) { |
1192 | PRINT_WARN("Could not start Hardware IP fragmentation " | 1195 | dev_warn(&card->gdev->dev, |
1193 | "assist on %s: 0x%x\n", | 1196 | "Starting IP fragmentation support for %s failed\n", |
1194 | QETH_CARD_IFNAME(card), rc); | 1197 | QETH_CARD_IFNAME(card)); |
1195 | } else | 1198 | } else |
1196 | PRINT_INFO("Hardware IP fragmentation enabled \n"); | 1199 | dev_info(&card->gdev->dev, |
1200 | "Hardware IP fragmentation enabled \n"); | ||
1197 | return rc; | 1201 | return rc; |
1198 | } | 1202 | } |
1199 | 1203 | ||
@@ -1207,17 +1211,18 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) | |||
1207 | return -EOPNOTSUPP; | 1211 | return -EOPNOTSUPP; |
1208 | 1212 | ||
1209 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { | 1213 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { |
1210 | PRINT_INFO("Inbound source address not " | 1214 | dev_info(&card->gdev->dev, |
1211 | "supported on %s\n", QETH_CARD_IFNAME(card)); | 1215 | "Inbound source address not supported on %s\n", |
1216 | QETH_CARD_IFNAME(card)); | ||
1212 | return -EOPNOTSUPP; | 1217 | return -EOPNOTSUPP; |
1213 | } | 1218 | } |
1214 | 1219 | ||
1215 | rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, | 1220 | rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, |
1216 | IPA_CMD_ASS_START, 0); | 1221 | IPA_CMD_ASS_START, 0); |
1217 | if (rc) | 1222 | if (rc) |
1218 | PRINT_WARN("Could not start inbound source " | 1223 | dev_warn(&card->gdev->dev, |
1219 | "assist on %s: 0x%x\n", | 1224 | "Starting proxy ARP support for %s failed\n", |
1220 | QETH_CARD_IFNAME(card), rc); | 1225 | QETH_CARD_IFNAME(card)); |
1221 | return rc; | 1226 | return rc; |
1222 | } | 1227 | } |
1223 | 1228 | ||
@@ -1228,19 +1233,19 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) | |||
1228 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); | 1233 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); |
1229 | 1234 | ||
1230 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { | 1235 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { |
1231 | PRINT_WARN("VLAN not supported on %s\n", | 1236 | dev_info(&card->gdev->dev, |
1232 | QETH_CARD_IFNAME(card)); | 1237 | "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); |
1233 | return -EOPNOTSUPP; | 1238 | return -EOPNOTSUPP; |
1234 | } | 1239 | } |
1235 | 1240 | ||
1236 | rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, | 1241 | rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, |
1237 | IPA_CMD_ASS_START, 0); | 1242 | IPA_CMD_ASS_START, 0); |
1238 | if (rc) { | 1243 | if (rc) { |
1239 | PRINT_WARN("Could not start vlan " | 1244 | dev_warn(&card->gdev->dev, |
1240 | "assist on %s: 0x%x\n", | 1245 | "Starting VLAN support for %s failed\n", |
1241 | QETH_CARD_IFNAME(card), rc); | 1246 | QETH_CARD_IFNAME(card)); |
1242 | } else { | 1247 | } else { |
1243 | PRINT_INFO("VLAN enabled \n"); | 1248 | dev_info(&card->gdev->dev, "VLAN enabled\n"); |
1244 | } | 1249 | } |
1245 | return rc; | 1250 | return rc; |
1246 | } | 1251 | } |
@@ -1252,19 +1257,20 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) | |||
1252 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); | 1257 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); |
1253 | 1258 | ||
1254 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { | 1259 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { |
1255 | PRINT_WARN("Multicast not supported on %s\n", | 1260 | dev_info(&card->gdev->dev, |
1256 | QETH_CARD_IFNAME(card)); | 1261 | "Multicast not supported on %s\n", |
1262 | QETH_CARD_IFNAME(card)); | ||
1257 | return -EOPNOTSUPP; | 1263 | return -EOPNOTSUPP; |
1258 | } | 1264 | } |
1259 | 1265 | ||
1260 | rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, | 1266 | rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, |
1261 | IPA_CMD_ASS_START, 0); | 1267 | IPA_CMD_ASS_START, 0); |
1262 | if (rc) { | 1268 | if (rc) { |
1263 | PRINT_WARN("Could not start multicast " | 1269 | dev_warn(&card->gdev->dev, |
1264 | "assist on %s: rc=%i\n", | 1270 | "Starting multicast support for %s failed\n", |
1265 | QETH_CARD_IFNAME(card), rc); | 1271 | QETH_CARD_IFNAME(card)); |
1266 | } else { | 1272 | } else { |
1267 | PRINT_INFO("Multicast enabled\n"); | 1273 | dev_info(&card->gdev->dev, "Multicast enabled\n"); |
1268 | card->dev->flags |= IFF_MULTICAST; | 1274 | card->dev->flags |= IFF_MULTICAST; |
1269 | } | 1275 | } |
1270 | return rc; | 1276 | return rc; |
@@ -1315,36 +1321,37 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) | |||
1315 | 1321 | ||
1316 | rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); | 1322 | rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); |
1317 | if (rc) { | 1323 | if (rc) { |
1318 | PRINT_ERR("IPv6 query ipassist failed on %s\n", | 1324 | dev_err(&card->gdev->dev, |
1319 | QETH_CARD_IFNAME(card)); | 1325 | "Activating IPv6 support for %s failed\n", |
1326 | QETH_CARD_IFNAME(card)); | ||
1320 | return rc; | 1327 | return rc; |
1321 | } | 1328 | } |
1322 | rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, | 1329 | rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, |
1323 | IPA_CMD_ASS_START, 3); | 1330 | IPA_CMD_ASS_START, 3); |
1324 | if (rc) { | 1331 | if (rc) { |
1325 | PRINT_WARN("IPv6 start assist (version 4) failed " | 1332 | dev_err(&card->gdev->dev, |
1326 | "on %s: 0x%x\n", | 1333 | "Activating IPv6 support for %s failed\n", |
1327 | QETH_CARD_IFNAME(card), rc); | 1334 | QETH_CARD_IFNAME(card)); |
1328 | return rc; | 1335 | return rc; |
1329 | } | 1336 | } |
1330 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, | 1337 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, |
1331 | IPA_CMD_ASS_START); | 1338 | IPA_CMD_ASS_START); |
1332 | if (rc) { | 1339 | if (rc) { |
1333 | PRINT_WARN("IPV6 start assist (version 6) failed " | 1340 | dev_err(&card->gdev->dev, |
1334 | "on %s: 0x%x\n", | 1341 | "Activating IPv6 support for %s failed\n", |
1335 | QETH_CARD_IFNAME(card), rc); | 1342 | QETH_CARD_IFNAME(card)); |
1336 | return rc; | 1343 | return rc; |
1337 | } | 1344 | } |
1338 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, | 1345 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, |
1339 | IPA_CMD_ASS_START); | 1346 | IPA_CMD_ASS_START); |
1340 | if (rc) { | 1347 | if (rc) { |
1341 | PRINT_WARN("Could not enable passthrough " | 1348 | dev_warn(&card->gdev->dev, |
1342 | "on %s: 0x%x\n", | 1349 | "Enabling the passthrough mode for %s failed\n", |
1343 | QETH_CARD_IFNAME(card), rc); | 1350 | QETH_CARD_IFNAME(card)); |
1344 | return rc; | 1351 | return rc; |
1345 | } | 1352 | } |
1346 | out: | 1353 | out: |
1347 | PRINT_INFO("IPV6 enabled \n"); | 1354 | dev_info(&card->gdev->dev, "IPV6 enabled\n"); |
1348 | return 0; | 1355 | return 0; |
1349 | } | 1356 | } |
1350 | #endif | 1357 | #endif |
@@ -1356,8 +1363,8 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) | |||
1356 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); | 1363 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); |
1357 | 1364 | ||
1358 | if (!qeth_is_supported(card, IPA_IPV6)) { | 1365 | if (!qeth_is_supported(card, IPA_IPV6)) { |
1359 | PRINT_WARN("IPv6 not supported on %s\n", | 1366 | dev_info(&card->gdev->dev, |
1360 | QETH_CARD_IFNAME(card)); | 1367 | "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); |
1361 | return 0; | 1368 | return 0; |
1362 | } | 1369 | } |
1363 | #ifdef CONFIG_QETH_IPV6 | 1370 | #ifdef CONFIG_QETH_IPV6 |
@@ -1373,34 +1380,35 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) | |||
1373 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); | 1380 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); |
1374 | card->info.broadcast_capable = 0; | 1381 | card->info.broadcast_capable = 0; |
1375 | if (!qeth_is_supported(card, IPA_FILTERING)) { | 1382 | if (!qeth_is_supported(card, IPA_FILTERING)) { |
1376 | PRINT_WARN("Broadcast not supported on %s\n", | 1383 | dev_info(&card->gdev->dev, |
1377 | QETH_CARD_IFNAME(card)); | 1384 | "Broadcast not supported on %s\n", |
1385 | QETH_CARD_IFNAME(card)); | ||
1378 | rc = -EOPNOTSUPP; | 1386 | rc = -EOPNOTSUPP; |
1379 | goto out; | 1387 | goto out; |
1380 | } | 1388 | } |
1381 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1389 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1382 | IPA_CMD_ASS_START, 0); | 1390 | IPA_CMD_ASS_START, 0); |
1383 | if (rc) { | 1391 | if (rc) { |
1384 | PRINT_WARN("Could not enable broadcasting filtering " | 1392 | dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " |
1385 | "on %s: 0x%x\n", | 1393 | "%s failed\n", QETH_CARD_IFNAME(card)); |
1386 | QETH_CARD_IFNAME(card), rc); | ||
1387 | goto out; | 1394 | goto out; |
1388 | } | 1395 | } |
1389 | 1396 | ||
1390 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1397 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1391 | IPA_CMD_ASS_CONFIGURE, 1); | 1398 | IPA_CMD_ASS_CONFIGURE, 1); |
1392 | if (rc) { | 1399 | if (rc) { |
1393 | PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n", | 1400 | dev_warn(&card->gdev->dev, |
1394 | QETH_CARD_IFNAME(card), rc); | 1401 | "Setting up broadcast filtering for %s failed\n", |
1402 | QETH_CARD_IFNAME(card)); | ||
1395 | goto out; | 1403 | goto out; |
1396 | } | 1404 | } |
1397 | card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; | 1405 | card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; |
1398 | PRINT_INFO("Broadcast enabled \n"); | 1406 | dev_info(&card->gdev->dev, "Broadcast enabled\n"); |
1399 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1407 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1400 | IPA_CMD_ASS_ENABLE, 1); | 1408 | IPA_CMD_ASS_ENABLE, 1); |
1401 | if (rc) { | 1409 | if (rc) { |
1402 | PRINT_WARN("Could not set up broadcast echo filtering on " | 1410 | dev_warn(&card->gdev->dev, "Setting up broadcast echo " |
1403 | "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc); | 1411 | "filtering for %s failed\n", QETH_CARD_IFNAME(card)); |
1404 | goto out; | 1412 | goto out; |
1405 | } | 1413 | } |
1406 | card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; | 1414 | card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; |
@@ -1419,18 +1427,18 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card) | |||
1419 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, | 1427 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, |
1420 | IPA_CMD_ASS_START, 0); | 1428 | IPA_CMD_ASS_START, 0); |
1421 | if (rc) { | 1429 | if (rc) { |
1422 | PRINT_WARN("Starting Inbound HW Checksumming failed on %s: " | 1430 | dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " |
1423 | "0x%x,\ncontinuing using Inbound SW Checksumming\n", | 1431 | "failed, using SW checksumming\n", |
1424 | QETH_CARD_IFNAME(card), rc); | 1432 | QETH_CARD_IFNAME(card)); |
1425 | return rc; | 1433 | return rc; |
1426 | } | 1434 | } |
1427 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, | 1435 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, |
1428 | IPA_CMD_ASS_ENABLE, | 1436 | IPA_CMD_ASS_ENABLE, |
1429 | card->info.csum_mask); | 1437 | card->info.csum_mask); |
1430 | if (rc) { | 1438 | if (rc) { |
1431 | PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: " | 1439 | dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " |
1432 | "0x%x,\ncontinuing using Inbound SW Checksumming\n", | 1440 | "failed, using SW checksumming\n", |
1433 | QETH_CARD_IFNAME(card), rc); | 1441 | QETH_CARD_IFNAME(card)); |
1434 | return rc; | 1442 | return rc; |
1435 | } | 1443 | } |
1436 | return 0; | 1444 | return 0; |
@@ -1443,26 +1451,30 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) | |||
1443 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); | 1451 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); |
1444 | 1452 | ||
1445 | if (card->options.checksum_type == NO_CHECKSUMMING) { | 1453 | if (card->options.checksum_type == NO_CHECKSUMMING) { |
1446 | PRINT_WARN("Using no checksumming on %s.\n", | 1454 | dev_info(&card->gdev->dev, |
1447 | QETH_CARD_IFNAME(card)); | 1455 | "Using no checksumming on %s.\n", |
1456 | QETH_CARD_IFNAME(card)); | ||
1448 | return 0; | 1457 | return 0; |
1449 | } | 1458 | } |
1450 | if (card->options.checksum_type == SW_CHECKSUMMING) { | 1459 | if (card->options.checksum_type == SW_CHECKSUMMING) { |
1451 | PRINT_WARN("Using SW checksumming on %s.\n", | 1460 | dev_info(&card->gdev->dev, |
1452 | QETH_CARD_IFNAME(card)); | 1461 | "Using SW checksumming on %s.\n", |
1462 | QETH_CARD_IFNAME(card)); | ||
1453 | return 0; | 1463 | return 0; |
1454 | } | 1464 | } |
1455 | if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { | 1465 | if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { |
1456 | PRINT_WARN("Inbound HW Checksumming not " | 1466 | dev_info(&card->gdev->dev, |
1457 | "supported on %s,\ncontinuing " | 1467 | "Inbound HW Checksumming not " |
1458 | "using Inbound SW Checksumming\n", | 1468 | "supported on %s,\ncontinuing " |
1459 | QETH_CARD_IFNAME(card)); | 1469 | "using Inbound SW Checksumming\n", |
1470 | QETH_CARD_IFNAME(card)); | ||
1460 | card->options.checksum_type = SW_CHECKSUMMING; | 1471 | card->options.checksum_type = SW_CHECKSUMMING; |
1461 | return 0; | 1472 | return 0; |
1462 | } | 1473 | } |
1463 | rc = qeth_l3_send_checksum_command(card); | 1474 | rc = qeth_l3_send_checksum_command(card); |
1464 | if (!rc) | 1475 | if (!rc) |
1465 | PRINT_INFO("HW Checksumming (inbound) enabled \n"); | 1476 | dev_info(&card->gdev->dev, |
1477 | "HW Checksumming (inbound) enabled\n"); | ||
1466 | 1478 | ||
1467 | return rc; | 1479 | return rc; |
1468 | } | 1480 | } |
@@ -1474,18 +1486,20 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1474 | QETH_DBF_TEXT(TRACE, 3, "sttso"); | 1486 | QETH_DBF_TEXT(TRACE, 3, "sttso"); |
1475 | 1487 | ||
1476 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { | 1488 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { |
1477 | PRINT_WARN("Outbound TSO not supported on %s\n", | 1489 | dev_info(&card->gdev->dev, |
1478 | QETH_CARD_IFNAME(card)); | 1490 | "Outbound TSO not supported on %s\n", |
1491 | QETH_CARD_IFNAME(card)); | ||
1479 | rc = -EOPNOTSUPP; | 1492 | rc = -EOPNOTSUPP; |
1480 | } else { | 1493 | } else { |
1481 | rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, | 1494 | rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, |
1482 | IPA_CMD_ASS_START, 0); | 1495 | IPA_CMD_ASS_START, 0); |
1483 | if (rc) | 1496 | if (rc) |
1484 | PRINT_WARN("Could not start outbound TSO " | 1497 | dev_warn(&card->gdev->dev, "Starting outbound TCP " |
1485 | "assist on %s: rc=%i\n", | 1498 | "segmentation offload for %s failed\n", |
1486 | QETH_CARD_IFNAME(card), rc); | 1499 | QETH_CARD_IFNAME(card)); |
1487 | else | 1500 | else |
1488 | PRINT_INFO("Outbound TSO enabled\n"); | 1501 | dev_info(&card->gdev->dev, |
1502 | "Outbound TSO enabled\n"); | ||
1489 | } | 1503 | } |
1490 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { | 1504 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { |
1491 | card->options.large_send = QETH_LARGE_SEND_NO; | 1505 | card->options.large_send = QETH_LARGE_SEND_NO; |
@@ -1578,12 +1592,8 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card, | |||
1578 | else { | 1592 | else { |
1579 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | | 1593 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | |
1580 | UNIQUE_ID_NOT_BY_CARD; | 1594 | UNIQUE_ID_NOT_BY_CARD; |
1581 | PRINT_WARN("couldn't get a unique id from the card on device " | 1595 | dev_warn(&card->gdev->dev, "The network adapter failed to " |
1582 | "%s (result=x%x), using default id. ipv6 " | 1596 | "generate a unique ID\n"); |
1583 | "autoconfig on other lpars may lead to duplicate " | ||
1584 | "ip addresses. please use manually " | ||
1585 | "configured ones.\n", | ||
1586 | CARD_BUS_ID(card), cmd->hdr.return_code); | ||
1587 | } | 1597 | } |
1588 | return 0; | 1598 | return 0; |
1589 | } | 1599 | } |
@@ -3086,9 +3096,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3086 | if (rc) { | 3096 | if (rc) { |
3087 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 3097 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3088 | if (rc == 0xe080) { | 3098 | if (rc == 0xe080) { |
3089 | PRINT_WARN("LAN on card %s if offline! " | 3099 | dev_warn(&card->gdev->dev, |
3090 | "Waiting for STARTLAN from card.\n", | 3100 | "The LAN is offline\n"); |
3091 | CARD_BUS_ID(card)); | ||
3092 | card->lan_online = 0; | 3101 | card->lan_online = 0; |
3093 | } | 3102 | } |
3094 | return rc; | 3103 | return rc; |
@@ -3194,8 +3203,8 @@ static int qeth_l3_recover(void *ptr) | |||
3194 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 3203 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
3195 | return 0; | 3204 | return 0; |
3196 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 3205 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
3197 | PRINT_WARN("Recovery of device %s started ...\n", | 3206 | dev_warn(&card->gdev->dev, |
3198 | CARD_BUS_ID(card)); | 3207 | "A recovery process has been started for the device\n"); |
3199 | card->use_hard_stop = 1; | 3208 | card->use_hard_stop = 1; |
3200 | __qeth_l3_set_offline(card->gdev, 1); | 3209 | __qeth_l3_set_offline(card->gdev, 1); |
3201 | rc = __qeth_l3_set_online(card->gdev, 1); | 3210 | rc = __qeth_l3_set_online(card->gdev, 1); |
@@ -3203,14 +3212,14 @@ static int qeth_l3_recover(void *ptr) | |||
3203 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3212 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3204 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3213 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3205 | if (!rc) | 3214 | if (!rc) |
3206 | PRINT_INFO("Device %s successfully recovered!\n", | 3215 | dev_info(&card->gdev->dev, |
3207 | CARD_BUS_ID(card)); | 3216 | "Device successfully recovered!\n"); |
3208 | else { | 3217 | else { |
3209 | rtnl_lock(); | 3218 | rtnl_lock(); |
3210 | dev_close(card->dev); | 3219 | dev_close(card->dev); |
3211 | rtnl_unlock(); | 3220 | rtnl_unlock(); |
3212 | PRINT_INFO("Device %s could not be recovered!\n", | 3221 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3213 | CARD_BUS_ID(card)); | 3222 | "failed to recover an error on the device\n"); |
3214 | } | 3223 | } |
3215 | return 0; | 3224 | return 0; |
3216 | } | 3225 | } |
@@ -3344,7 +3353,7 @@ static int qeth_l3_register_notifiers(void) | |||
3344 | return rc; | 3353 | return rc; |
3345 | } | 3354 | } |
3346 | #else | 3355 | #else |
3347 | PRINT_WARN("layer 3 discipline no IPv6 support\n"); | 3356 | pr_warning("There is no IPv6 support for the layer 3 discipline\n"); |
3348 | #endif | 3357 | #endif |
3349 | return 0; | 3358 | return 0; |
3350 | } | 3359 | } |
@@ -3363,7 +3372,7 @@ static int __init qeth_l3_init(void) | |||
3363 | { | 3372 | { |
3364 | int rc = 0; | 3373 | int rc = 0; |
3365 | 3374 | ||
3366 | PRINT_INFO("register layer 3 discipline\n"); | 3375 | pr_info("register layer 3 discipline\n"); |
3367 | rc = qeth_l3_register_notifiers(); | 3376 | rc = qeth_l3_register_notifiers(); |
3368 | return rc; | 3377 | return rc; |
3369 | } | 3378 | } |
@@ -3371,7 +3380,7 @@ static int __init qeth_l3_init(void) | |||
3371 | static void __exit qeth_l3_exit(void) | 3380 | static void __exit qeth_l3_exit(void) |
3372 | { | 3381 | { |
3373 | qeth_l3_unregister_notifiers(); | 3382 | qeth_l3_unregister_notifiers(); |
3374 | PRINT_INFO("unregister layer 3 discipline\n"); | 3383 | pr_info("unregister layer 3 discipline\n"); |
3375 | } | 3384 | } |
3376 | 3385 | ||
3377 | module_init(qeth_l3_init); | 3386 | module_init(qeth_l3_init); |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 3d4e3e3f3fc0..e529b55b3ce9 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -25,9 +25,15 @@ | |||
25 | * Sven Schuetz | 25 | * Sven Schuetz |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "zfcp" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/miscdevice.h> | 31 | #include <linux/miscdevice.h> |
32 | #include <linux/seq_file.h> | ||
29 | #include "zfcp_ext.h" | 33 | #include "zfcp_ext.h" |
30 | 34 | ||
35 | #define ZFCP_BUS_ID_SIZE 20 | ||
36 | |||
31 | static char *device; | 37 | static char *device; |
32 | 38 | ||
33 | MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); | 39 | MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); |
@@ -83,9 +89,9 @@ static int __init zfcp_device_setup(char *devstr) | |||
83 | strcpy(str, devstr); | 89 | strcpy(str, devstr); |
84 | 90 | ||
85 | token = strsep(&str, ","); | 91 | token = strsep(&str, ","); |
86 | if (!token || strlen(token) >= BUS_ID_SIZE) | 92 | if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) |
87 | goto err_out; | 93 | goto err_out; |
88 | strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); | 94 | strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE); |
89 | 95 | ||
90 | token = strsep(&str, ","); | 96 | token = strsep(&str, ","); |
91 | if (!token || strict_strtoull(token, 0, | 97 | if (!token || strict_strtoull(token, 0, |
@@ -102,7 +108,7 @@ static int __init zfcp_device_setup(char *devstr) | |||
102 | 108 | ||
103 | err_out: | 109 | err_out: |
104 | kfree(str); | 110 | kfree(str); |
105 | pr_err("zfcp: %s is not a valid SCSI device\n", devstr); | 111 | pr_err("%s is not a valid SCSI device\n", devstr); |
106 | return 0; | 112 | return 0; |
107 | } | 113 | } |
108 | 114 | ||
@@ -186,13 +192,13 @@ static int __init zfcp_module_init(void) | |||
186 | 192 | ||
187 | retval = misc_register(&zfcp_cfdc_misc); | 193 | retval = misc_register(&zfcp_cfdc_misc); |
188 | if (retval) { | 194 | if (retval) { |
189 | pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n"); | 195 | pr_err("Registering the misc device zfcp_cfdc failed\n"); |
190 | goto out_misc; | 196 | goto out_misc; |
191 | } | 197 | } |
192 | 198 | ||
193 | retval = zfcp_ccw_register(); | 199 | retval = zfcp_ccw_register(); |
194 | if (retval) { | 200 | if (retval) { |
195 | pr_err("zfcp: The zfcp device driver could not register with " | 201 | pr_err("The zfcp device driver could not register with " |
196 | "the common I/O layer\n"); | 202 | "the common I/O layer\n"); |
197 | goto out_ccw_register; | 203 | goto out_ccw_register; |
198 | } | 204 | } |
@@ -436,6 +442,16 @@ static void _zfcp_status_read_scheduler(struct work_struct *work) | |||
436 | stat_work)); | 442 | stat_work)); |
437 | } | 443 | } |
438 | 444 | ||
445 | static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) | ||
446 | { | ||
447 | struct zfcp_adapter *adapter = | ||
448 | container_of(sl, struct zfcp_adapter, service_level); | ||
449 | |||
450 | seq_printf(m, "zfcp: %s microcode level %x\n", | ||
451 | dev_name(&adapter->ccw_device->dev), | ||
452 | adapter->fsf_lic_version); | ||
453 | } | ||
454 | |||
439 | /** | 455 | /** |
440 | * zfcp_adapter_enqueue - enqueue a new adapter to the list | 456 | * zfcp_adapter_enqueue - enqueue a new adapter to the list |
441 | * @ccw_device: pointer to the struct cc_device | 457 | * @ccw_device: pointer to the struct cc_device |
@@ -500,6 +516,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
500 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); | 516 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); |
501 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); | 517 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); |
502 | 518 | ||
519 | adapter->service_level.seq_print = zfcp_print_sl; | ||
520 | |||
503 | /* mark adapter unusable as long as sysfs registration is not complete */ | 521 | /* mark adapter unusable as long as sysfs registration is not complete */ |
504 | atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); | 522 | atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); |
505 | 523 | ||
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 951a8d409d1d..728147131e1d 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | /** | 14 | /** |
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index ec2abceca6dc..f1a7518e67ed 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Copyright IBM Corporation 2008 | 7 | * Copyright IBM Corporation 2008 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "zfcp" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/types.h> | 13 | #include <linux/types.h> |
11 | #include <linux/miscdevice.h> | 14 | #include <linux/miscdevice.h> |
12 | #include <asm/ccwdev.h> | 15 | #include <asm/ccwdev.h> |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 31012d58cfb7..735d675623f8 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
10 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
11 | #include "zfcp_ext.h" | 14 | #include "zfcp_ext.h" |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9ce4c75bd190..e19e46ae4a68 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/qdio.h> | 33 | #include <asm/qdio.h> |
34 | #include <asm/debug.h> | 34 | #include <asm/debug.h> |
35 | #include <asm/ebcdic.h> | 35 | #include <asm/ebcdic.h> |
36 | #include <asm/sysinfo.h> | ||
36 | #include "zfcp_dbf.h" | 37 | #include "zfcp_dbf.h" |
37 | #include "zfcp_fsf.h" | 38 | #include "zfcp_fsf.h" |
38 | 39 | ||
@@ -515,6 +516,7 @@ struct zfcp_adapter { | |||
515 | struct fsf_qtcb_bottom_port *stats_reset_data; | 516 | struct fsf_qtcb_bottom_port *stats_reset_data; |
516 | unsigned long stats_reset; | 517 | unsigned long stats_reset; |
517 | struct work_struct scan_work; | 518 | struct work_struct scan_work; |
519 | struct service_level service_level; | ||
518 | atomic_t qdio_outb_full; /* queue full incidents */ | 520 | atomic_t qdio_outb_full; /* queue full incidents */ |
519 | }; | 521 | }; |
520 | 522 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c557ba34e1aa..4ed4950d994b 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | #define ZFCP_MAX_ERPS 3 | 14 | #define ZFCP_MAX_ERPS 3 |
@@ -1281,10 +1284,13 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) | |||
1281 | break; | 1284 | break; |
1282 | 1285 | ||
1283 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1286 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
1284 | if (result != ZFCP_ERP_SUCCEEDED) | 1287 | if (result != ZFCP_ERP_SUCCEEDED) { |
1288 | unregister_service_level(&adapter->service_level); | ||
1285 | zfcp_erp_rports_del(adapter); | 1289 | zfcp_erp_rports_del(adapter); |
1286 | else | 1290 | } else { |
1291 | register_service_level(&adapter->service_level); | ||
1287 | schedule_work(&adapter->scan_work); | 1292 | schedule_work(&adapter->scan_work); |
1293 | } | ||
1288 | zfcp_adapter_put(adapter); | 1294 | zfcp_adapter_put(adapter); |
1289 | break; | 1295 | break; |
1290 | } | 1296 | } |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 8aab3091a7b1..f009f2a7ec3e 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2008 | 6 | * Copyright IBM Corporation 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | struct ct_iu_gpn_ft_req { | 14 | struct ct_iu_gpn_ft_req { |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index dc0367690405..9c72e083559d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/blktrace_api.h> | 12 | #include <linux/blktrace_api.h> |
10 | #include "zfcp_ext.h" | 13 | #include "zfcp_ext.h" |
11 | 14 | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 664752f90b20..d3b55fb66f13 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ | 14 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 468c880f8b6d..9dc42a68fbdd 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
11 | 14 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index ca9293ba1766..899af2b45b1e 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2008 | 6 | * Copyright IBM Corporation 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ | 14 | #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index c3e4ab07b9cc..0eea90781385 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -1,17 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/sysinfo.c | 2 | * drivers/s390/sysinfo.c |
3 | * | 3 | * |
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright IBM Corp. 2001, 2008 |
5 | * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) | 5 | * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | */ | 7 | */ |
7 | 8 | ||
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
9 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
10 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> | ||
11 | #include <linux/init.h> | 13 | #include <linux/init.h> |
12 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/module.h> | ||
13 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
14 | #include <asm/sysinfo.h> | 17 | #include <asm/sysinfo.h> |
18 | #include <asm/cpcmd.h> | ||
15 | 19 | ||
16 | /* Sigh, math-emu. Don't ask. */ | 20 | /* Sigh, math-emu. Don't ask. */ |
17 | #include <asm/sfp-util.h> | 21 | #include <asm/sfp-util.h> |
@@ -271,6 +275,125 @@ static __init int create_proc_sysinfo(void) | |||
271 | 275 | ||
272 | __initcall(create_proc_sysinfo); | 276 | __initcall(create_proc_sysinfo); |
273 | 277 | ||
278 | /* | ||
279 | * Service levels interface. | ||
280 | */ | ||
281 | |||
282 | static DECLARE_RWSEM(service_level_sem); | ||
283 | static LIST_HEAD(service_level_list); | ||
284 | |||
285 | int register_service_level(struct service_level *slr) | ||
286 | { | ||
287 | struct service_level *ptr; | ||
288 | |||
289 | down_write(&service_level_sem); | ||
290 | list_for_each_entry(ptr, &service_level_list, list) | ||
291 | if (ptr == slr) { | ||
292 | up_write(&service_level_sem); | ||
293 | return -EEXIST; | ||
294 | } | ||
295 | list_add_tail(&slr->list, &service_level_list); | ||
296 | up_write(&service_level_sem); | ||
297 | return 0; | ||
298 | } | ||
299 | EXPORT_SYMBOL(register_service_level); | ||
300 | |||
301 | int unregister_service_level(struct service_level *slr) | ||
302 | { | ||
303 | struct service_level *ptr, *next; | ||
304 | int rc = -ENOENT; | ||
305 | |||
306 | down_write(&service_level_sem); | ||
307 | list_for_each_entry_safe(ptr, next, &service_level_list, list) { | ||
308 | if (ptr != slr) | ||
309 | continue; | ||
310 | list_del(&ptr->list); | ||
311 | rc = 0; | ||
312 | break; | ||
313 | } | ||
314 | up_write(&service_level_sem); | ||
315 | return rc; | ||
316 | } | ||
317 | EXPORT_SYMBOL(unregister_service_level); | ||
318 | |||
319 | static void *service_level_start(struct seq_file *m, loff_t *pos) | ||
320 | { | ||
321 | down_read(&service_level_sem); | ||
322 | return seq_list_start(&service_level_list, *pos); | ||
323 | } | ||
324 | |||
325 | static void *service_level_next(struct seq_file *m, void *p, loff_t *pos) | ||
326 | { | ||
327 | return seq_list_next(p, &service_level_list, pos); | ||
328 | } | ||
329 | |||
330 | static void service_level_stop(struct seq_file *m, void *p) | ||
331 | { | ||
332 | up_read(&service_level_sem); | ||
333 | } | ||
334 | |||
335 | static int service_level_show(struct seq_file *m, void *p) | ||
336 | { | ||
337 | struct service_level *slr; | ||
338 | |||
339 | slr = list_entry(p, struct service_level, list); | ||
340 | slr->seq_print(m, slr); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static const struct seq_operations service_level_seq_ops = { | ||
345 | .start = service_level_start, | ||
346 | .next = service_level_next, | ||
347 | .stop = service_level_stop, | ||
348 | .show = service_level_show | ||
349 | }; | ||
350 | |||
351 | static int service_level_open(struct inode *inode, struct file *file) | ||
352 | { | ||
353 | return seq_open(file, &service_level_seq_ops); | ||
354 | } | ||
355 | |||
356 | static const struct file_operations service_level_ops = { | ||
357 | .open = service_level_open, | ||
358 | .read = seq_read, | ||
359 | .llseek = seq_lseek, | ||
360 | .release = seq_release | ||
361 | }; | ||
362 | |||
363 | static void service_level_vm_print(struct seq_file *m, | ||
364 | struct service_level *slr) | ||
365 | { | ||
366 | char *query_buffer, *str; | ||
367 | |||
368 | query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); | ||
369 | if (!query_buffer) | ||
370 | return; | ||
371 | cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); | ||
372 | str = strchr(query_buffer, '\n'); | ||
373 | if (str) | ||
374 | *str = 0; | ||
375 | seq_printf(m, "VM: %s\n", query_buffer); | ||
376 | kfree(query_buffer); | ||
377 | } | ||
378 | |||
379 | static struct service_level service_level_vm = { | ||
380 | .seq_print = service_level_vm_print | ||
381 | }; | ||
382 | |||
383 | static __init int create_proc_service_level(void) | ||
384 | { | ||
385 | proc_create("service_levels", 0, NULL, &service_level_ops); | ||
386 | if (MACHINE_IS_VM) | ||
387 | register_service_level(&service_level_vm); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | subsys_initcall(create_proc_service_level); | ||
392 | |||
393 | /* | ||
394 | * Bogomips calculation based on cpu capability. | ||
395 | */ | ||
396 | |||
274 | int get_cpu_capability(unsigned int *capability) | 397 | int get_cpu_capability(unsigned int *capability) |
275 | { | 398 | { |
276 | struct sysinfo_1_2_2 *info; | 399 | struct sysinfo_1_2_2 *info; |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f458c1217c5e..c41fa2af7677 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -949,7 +949,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
949 | set_binfmt(&elf_format); | 949 | set_binfmt(&elf_format); |
950 | 950 | ||
951 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES | 951 | #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES |
952 | retval = arch_setup_additional_pages(bprm, executable_stack); | 952 | retval = arch_setup_additional_pages(bprm, !!elf_interpreter); |
953 | if (retval < 0) { | 953 | if (retval < 0) { |
954 | send_sig(SIGKILL, current, 0); | 954 | send_sig(SIGKILL, current, 0); |
955 | goto out; | 955 | goto out; |
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 18546d8eb78e..36fa286adad5 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h | |||
@@ -49,7 +49,7 @@ | |||
49 | 49 | ||
50 | /* memmap is virtually contigious. */ | 50 | /* memmap is virtually contigious. */ |
51 | #define __pfn_to_page(pfn) (vmemmap + (pfn)) | 51 | #define __pfn_to_page(pfn) (vmemmap + (pfn)) |
52 | #define __page_to_pfn(page) ((page) - vmemmap) | 52 | #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) |
53 | 53 | ||
54 | #elif defined(CONFIG_SPARSEMEM) | 54 | #elif defined(CONFIG_SPARSEMEM) |
55 | /* | 55 | /* |
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h index fd70adbb3566..5e310c8d8e2f 100644 --- a/include/net/iucv/iucv.h +++ b/include/net/iucv/iucv.h | |||
@@ -337,12 +337,35 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | |||
337 | * established paths. This function will deal with RMDATA messages | 337 | * established paths. This function will deal with RMDATA messages |
338 | * embedded in struct iucv_message as well. | 338 | * embedded in struct iucv_message as well. |
339 | * | 339 | * |
340 | * Locking: local_bh_enable/local_bh_disable | ||
341 | * | ||
340 | * Returns the result from the CP IUCV call. | 342 | * Returns the result from the CP IUCV call. |
341 | */ | 343 | */ |
342 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 344 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, |
343 | u8 flags, void *buffer, size_t size, size_t *residual); | 345 | u8 flags, void *buffer, size_t size, size_t *residual); |
344 | 346 | ||
345 | /** | 347 | /** |
348 | * __iucv_message_receive | ||
349 | * @path: address of iucv path structure | ||
350 | * @msg: address of iucv msg structure | ||
351 | * @flags: flags that affect how the message is received (IUCV_IPBUFLST) | ||
352 | * @buffer: address of data buffer or address of struct iucv_array | ||
353 | * @size: length of data buffer | ||
354 | * @residual: | ||
355 | * | ||
356 | * This function receives messages that are being sent to you over | ||
357 | * established paths. This function will deal with RMDATA messages | ||
358 | * embedded in struct iucv_message as well. | ||
359 | * | ||
360 | * Locking: no locking. | ||
361 | * | ||
362 | * Returns the result from the CP IUCV call. | ||
363 | */ | ||
364 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | ||
365 | u8 flags, void *buffer, size_t size, | ||
366 | size_t *residual); | ||
367 | |||
368 | /** | ||
346 | * iucv_message_reject | 369 | * iucv_message_reject |
347 | * @path: address of iucv path structure | 370 | * @path: address of iucv path structure |
348 | * @msg: address of iucv msg structure | 371 | * @msg: address of iucv msg structure |
@@ -386,12 +409,34 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | |||
386 | * transmitted is in a buffer and this is a one-way message and the | 409 | * transmitted is in a buffer and this is a one-way message and the |
387 | * receiver will not reply to the message. | 410 | * receiver will not reply to the message. |
388 | * | 411 | * |
412 | * Locking: local_bh_enable/local_bh_disable | ||
413 | * | ||
389 | * Returns the result from the CP IUCV call. | 414 | * Returns the result from the CP IUCV call. |
390 | */ | 415 | */ |
391 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 416 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, |
392 | u8 flags, u32 srccls, void *buffer, size_t size); | 417 | u8 flags, u32 srccls, void *buffer, size_t size); |
393 | 418 | ||
394 | /** | 419 | /** |
420 | * __iucv_message_send | ||
421 | * @path: address of iucv path structure | ||
422 | * @msg: address of iucv msg structure | ||
423 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | ||
424 | * @srccls: source class of message | ||
425 | * @buffer: address of data buffer or address of struct iucv_array | ||
426 | * @size: length of send buffer | ||
427 | * | ||
428 | * This function transmits data to another application. Data to be | ||
429 | * transmitted is in a buffer and this is a one-way message and the | ||
430 | * receiver will not reply to the message. | ||
431 | * | ||
432 | * Locking: no locking. | ||
433 | * | ||
434 | * Returns the result from the CP IUCV call. | ||
435 | */ | ||
436 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | ||
437 | u8 flags, u32 srccls, void *buffer, size_t size); | ||
438 | |||
439 | /** | ||
395 | * iucv_message_send2way | 440 | * iucv_message_send2way |
396 | * @path: address of iucv path structure | 441 | * @path: address of iucv path structure |
397 | * @msg: address of iucv msg structure | 442 | * @msg: address of iucv msg structure |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 29f7baa25110..af3192d2a5a3 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> | 8 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "af_iucv" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/types.h> | 15 | #include <linux/types.h> |
13 | #include <linux/list.h> | 16 | #include <linux/list.h> |
@@ -616,6 +619,8 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
616 | struct iucv_sock *iucv = iucv_sk(sk); | 619 | struct iucv_sock *iucv = iucv_sk(sk); |
617 | struct sk_buff *skb; | 620 | struct sk_buff *skb; |
618 | struct iucv_message txmsg; | 621 | struct iucv_message txmsg; |
622 | char user_id[9]; | ||
623 | char appl_id[9]; | ||
619 | int err; | 624 | int err; |
620 | 625 | ||
621 | err = sock_error(sk); | 626 | err = sock_error(sk); |
@@ -651,8 +656,15 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
651 | err = iucv_message_send(iucv->path, &txmsg, 0, 0, | 656 | err = iucv_message_send(iucv->path, &txmsg, 0, 0, |
652 | (void *) skb->data, skb->len); | 657 | (void *) skb->data, skb->len); |
653 | if (err) { | 658 | if (err) { |
654 | if (err == 3) | 659 | if (err == 3) { |
655 | printk(KERN_ERR "AF_IUCV msg limit exceeded\n"); | 660 | user_id[8] = 0; |
661 | memcpy(user_id, iucv->dst_user_id, 8); | ||
662 | appl_id[8] = 0; | ||
663 | memcpy(appl_id, iucv->dst_name, 8); | ||
664 | pr_err("Application %s on z/VM guest %s" | ||
665 | " exceeds message limit\n", | ||
666 | user_id, appl_id); | ||
667 | } | ||
656 | skb_unlink(skb, &iucv->send_skb_q); | 668 | skb_unlink(skb, &iucv->send_skb_q); |
657 | err = -EPIPE; | 669 | err = -EPIPE; |
658 | goto fail; | 670 | goto fail; |
@@ -1190,7 +1202,8 @@ static int __init afiucv_init(void) | |||
1190 | int err; | 1202 | int err; |
1191 | 1203 | ||
1192 | if (!MACHINE_IS_VM) { | 1204 | if (!MACHINE_IS_VM) { |
1193 | printk(KERN_ERR "AF_IUCV connection needs VM as base\n"); | 1205 | pr_err("The af_iucv module cannot be loaded" |
1206 | " without z/VM\n"); | ||
1194 | err = -EPROTONOSUPPORT; | 1207 | err = -EPROTONOSUPPORT; |
1195 | goto out; | 1208 | goto out; |
1196 | } | 1209 | } |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index d7b54b5bfa69..8f57d4f4328a 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -30,6 +30,9 @@ | |||
30 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 30 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #define KMSG_COMPONENT "iucv" | ||
34 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
35 | |||
33 | #include <linux/module.h> | 36 | #include <linux/module.h> |
34 | #include <linux/moduleparam.h> | 37 | #include <linux/moduleparam.h> |
35 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
@@ -424,8 +427,8 @@ static void iucv_declare_cpu(void *data) | |||
424 | err = "Paging or storage error"; | 427 | err = "Paging or storage error"; |
425 | break; | 428 | break; |
426 | } | 429 | } |
427 | printk(KERN_WARNING "iucv_register: iucv_declare_buffer " | 430 | pr_warning("Defining an interrupt buffer on CPU %i" |
428 | "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err); | 431 | " failed with 0x%02x (%s)\n", cpu, rc, err); |
429 | return; | 432 | return; |
430 | } | 433 | } |
431 | 434 | ||
@@ -957,7 +960,52 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | |||
957 | EXPORT_SYMBOL(iucv_message_purge); | 960 | EXPORT_SYMBOL(iucv_message_purge); |
958 | 961 | ||
959 | /** | 962 | /** |
960 | * iucv_message_receive | 963 | * iucv_message_receive_iprmdata |
964 | * @path: address of iucv path structure | ||
965 | * @msg: address of iucv msg structure | ||
966 | * @flags: how the message is received (IUCV_IPBUFLST) | ||
967 | * @buffer: address of data buffer or address of struct iucv_array | ||
968 | * @size: length of data buffer | ||
969 | * @residual: | ||
970 | * | ||
971 | * Internal function used by iucv_message_receive and __iucv_message_receive | ||
972 | * to receive RMDATA data stored in struct iucv_message. | ||
973 | */ | ||
974 | static int iucv_message_receive_iprmdata(struct iucv_path *path, | ||
975 | struct iucv_message *msg, | ||
976 | u8 flags, void *buffer, | ||
977 | size_t size, size_t *residual) | ||
978 | { | ||
979 | struct iucv_array *array; | ||
980 | u8 *rmmsg; | ||
981 | size_t copy; | ||
982 | |||
983 | /* | ||
984 | * Message is 8 bytes long and has been stored to the | ||
985 | * message descriptor itself. | ||
986 | */ | ||
987 | if (residual) | ||
988 | *residual = abs(size - 8); | ||
989 | rmmsg = msg->rmmsg; | ||
990 | if (flags & IUCV_IPBUFLST) { | ||
991 | /* Copy to struct iucv_array. */ | ||
992 | size = (size < 8) ? size : 8; | ||
993 | for (array = buffer; size > 0; array++) { | ||
994 | copy = min_t(size_t, size, array->length); | ||
995 | memcpy((u8 *)(addr_t) array->address, | ||
996 | rmmsg, copy); | ||
997 | rmmsg += copy; | ||
998 | size -= copy; | ||
999 | } | ||
1000 | } else { | ||
1001 | /* Copy to direct buffer. */ | ||
1002 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | ||
1003 | } | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | /** | ||
1008 | * __iucv_message_receive | ||
961 | * @path: address of iucv path structure | 1009 | * @path: address of iucv path structure |
962 | * @msg: address of iucv msg structure | 1010 | * @msg: address of iucv msg structure |
963 | * @flags: how the message is received (IUCV_IPBUFLST) | 1011 | * @flags: how the message is received (IUCV_IPBUFLST) |
@@ -969,44 +1017,19 @@ EXPORT_SYMBOL(iucv_message_purge); | |||
969 | * established paths. This function will deal with RMDATA messages | 1017 | * established paths. This function will deal with RMDATA messages |
970 | * embedded in struct iucv_message as well. | 1018 | * embedded in struct iucv_message as well. |
971 | * | 1019 | * |
1020 | * Locking: no locking | ||
1021 | * | ||
972 | * Returns the result from the CP IUCV call. | 1022 | * Returns the result from the CP IUCV call. |
973 | */ | 1023 | */ |
974 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | 1024 | int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, |
975 | u8 flags, void *buffer, size_t size, size_t *residual) | 1025 | u8 flags, void *buffer, size_t size, size_t *residual) |
976 | { | 1026 | { |
977 | union iucv_param *parm; | 1027 | union iucv_param *parm; |
978 | struct iucv_array *array; | ||
979 | u8 *rmmsg; | ||
980 | size_t copy; | ||
981 | int rc; | 1028 | int rc; |
982 | 1029 | ||
983 | if (msg->flags & IUCV_IPRMDATA) { | 1030 | if (msg->flags & IUCV_IPRMDATA) |
984 | /* | 1031 | return iucv_message_receive_iprmdata(path, msg, flags, |
985 | * Message is 8 bytes long and has been stored to the | 1032 | buffer, size, residual); |
986 | * message descriptor itself. | ||
987 | */ | ||
988 | rc = (size < 8) ? 5 : 0; | ||
989 | if (residual) | ||
990 | *residual = abs(size - 8); | ||
991 | rmmsg = msg->rmmsg; | ||
992 | if (flags & IUCV_IPBUFLST) { | ||
993 | /* Copy to struct iucv_array. */ | ||
994 | size = (size < 8) ? size : 8; | ||
995 | for (array = buffer; size > 0; array++) { | ||
996 | copy = min_t(size_t, size, array->length); | ||
997 | memcpy((u8 *)(addr_t) array->address, | ||
998 | rmmsg, copy); | ||
999 | rmmsg += copy; | ||
1000 | size -= copy; | ||
1001 | } | ||
1002 | } else { | ||
1003 | /* Copy to direct buffer. */ | ||
1004 | memcpy(buffer, rmmsg, min_t(size_t, size, 8)); | ||
1005 | } | ||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | local_bh_disable(); | ||
1010 | parm = iucv_param[smp_processor_id()]; | 1033 | parm = iucv_param[smp_processor_id()]; |
1011 | memset(parm, 0, sizeof(union iucv_param)); | 1034 | memset(parm, 0, sizeof(union iucv_param)); |
1012 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 1035 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; |
@@ -1022,6 +1045,37 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | |||
1022 | if (residual) | 1045 | if (residual) |
1023 | *residual = parm->db.ipbfln1f; | 1046 | *residual = parm->db.ipbfln1f; |
1024 | } | 1047 | } |
1048 | return rc; | ||
1049 | } | ||
1050 | EXPORT_SYMBOL(__iucv_message_receive); | ||
1051 | |||
1052 | /** | ||
1053 | * iucv_message_receive | ||
1054 | * @path: address of iucv path structure | ||
1055 | * @msg: address of iucv msg structure | ||
1056 | * @flags: how the message is received (IUCV_IPBUFLST) | ||
1057 | * @buffer: address of data buffer or address of struct iucv_array | ||
1058 | * @size: length of data buffer | ||
1059 | * @residual: | ||
1060 | * | ||
1061 | * This function receives messages that are being sent to you over | ||
1062 | * established paths. This function will deal with RMDATA messages | ||
1063 | * embedded in struct iucv_message as well. | ||
1064 | * | ||
1065 | * Locking: local_bh_enable/local_bh_disable | ||
1066 | * | ||
1067 | * Returns the result from the CP IUCV call. | ||
1068 | */ | ||
1069 | int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | ||
1070 | u8 flags, void *buffer, size_t size, size_t *residual) | ||
1071 | { | ||
1072 | int rc; | ||
1073 | |||
1074 | if (msg->flags & IUCV_IPRMDATA) | ||
1075 | return iucv_message_receive_iprmdata(path, msg, flags, | ||
1076 | buffer, size, residual); | ||
1077 | local_bh_disable(); | ||
1078 | rc = __iucv_message_receive(path, msg, flags, buffer, size, residual); | ||
1025 | local_bh_enable(); | 1079 | local_bh_enable(); |
1026 | return rc; | 1080 | return rc; |
1027 | } | 1081 | } |
@@ -1101,7 +1155,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | |||
1101 | EXPORT_SYMBOL(iucv_message_reply); | 1155 | EXPORT_SYMBOL(iucv_message_reply); |
1102 | 1156 | ||
1103 | /** | 1157 | /** |
1104 | * iucv_message_send | 1158 | * __iucv_message_send |
1105 | * @path: address of iucv path structure | 1159 | * @path: address of iucv path structure |
1106 | * @msg: address of iucv msg structure | 1160 | * @msg: address of iucv msg structure |
1107 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | 1161 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) |
@@ -1113,15 +1167,16 @@ EXPORT_SYMBOL(iucv_message_reply); | |||
1113 | * transmitted is in a buffer and this is a one-way message and the | 1167 | * transmitted is in a buffer and this is a one-way message and the |
1114 | * receiver will not reply to the message. | 1168 | * receiver will not reply to the message. |
1115 | * | 1169 | * |
1170 | * Locking: no locking | ||
1171 | * | ||
1116 | * Returns the result from the CP IUCV call. | 1172 | * Returns the result from the CP IUCV call. |
1117 | */ | 1173 | */ |
1118 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | 1174 | int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, |
1119 | u8 flags, u32 srccls, void *buffer, size_t size) | 1175 | u8 flags, u32 srccls, void *buffer, size_t size) |
1120 | { | 1176 | { |
1121 | union iucv_param *parm; | 1177 | union iucv_param *parm; |
1122 | int rc; | 1178 | int rc; |
1123 | 1179 | ||
1124 | local_bh_disable(); | ||
1125 | parm = iucv_param[smp_processor_id()]; | 1180 | parm = iucv_param[smp_processor_id()]; |
1126 | memset(parm, 0, sizeof(union iucv_param)); | 1181 | memset(parm, 0, sizeof(union iucv_param)); |
1127 | if (flags & IUCV_IPRMDATA) { | 1182 | if (flags & IUCV_IPRMDATA) { |
@@ -1144,6 +1199,34 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | |||
1144 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 1199 | rc = iucv_call_b2f0(IUCV_SEND, parm); |
1145 | if (!rc) | 1200 | if (!rc) |
1146 | msg->id = parm->db.ipmsgid; | 1201 | msg->id = parm->db.ipmsgid; |
1202 | return rc; | ||
1203 | } | ||
1204 | EXPORT_SYMBOL(__iucv_message_send); | ||
1205 | |||
1206 | /** | ||
1207 | * iucv_message_send | ||
1208 | * @path: address of iucv path structure | ||
1209 | * @msg: address of iucv msg structure | ||
1210 | * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST) | ||
1211 | * @srccls: source class of message | ||
1212 | * @buffer: address of send buffer or address of struct iucv_array | ||
1213 | * @size: length of send buffer | ||
1214 | * | ||
1215 | * This function transmits data to another application. Data to be | ||
1216 | * transmitted is in a buffer and this is a one-way message and the | ||
1217 | * receiver will not reply to the message. | ||
1218 | * | ||
1219 | * Locking: local_bh_enable/local_bh_disable | ||
1220 | * | ||
1221 | * Returns the result from the CP IUCV call. | ||
1222 | */ | ||
1223 | int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | ||
1224 | u8 flags, u32 srccls, void *buffer, size_t size) | ||
1225 | { | ||
1226 | int rc; | ||
1227 | |||
1228 | local_bh_disable(); | ||
1229 | rc = __iucv_message_send(path, msg, flags, srccls, buffer, size); | ||
1147 | local_bh_enable(); | 1230 | local_bh_enable(); |
1148 | return rc; | 1231 | return rc; |
1149 | } | 1232 | } |
@@ -1572,7 +1655,7 @@ static void iucv_external_interrupt(u16 code) | |||
1572 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); | 1655 | BUG_ON(p->iptype < 0x01 || p->iptype > 0x09); |
1573 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); | 1656 | work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC); |
1574 | if (!work) { | 1657 | if (!work) { |
1575 | printk(KERN_WARNING "iucv_external_interrupt: out of memory\n"); | 1658 | pr_warning("iucv_external_interrupt: out of memory\n"); |
1576 | return; | 1659 | return; |
1577 | } | 1660 | } |
1578 | memcpy(&work->data, p, sizeof(work->data)); | 1661 | memcpy(&work->data, p, sizeof(work->data)); |