diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-06 17:45:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-06 17:45:32 -0500 |
commit | 02aedd69e2ef31b0fca1e8960cb1e7fd0c343110 (patch) | |
tree | c096ab87e0832e8ddda45241b422c0064cfe0cbb | |
parent | 9ad0830f307bcd8dc285cfae58998d43b21727f4 (diff) | |
parent | 4d284cac76d0bfebc42d76b428c4e44d921200a9 (diff) |
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (37 commits)
[S390] Avoid excessive inlining.
[S390] Mark kernel text section read-only.
[S390] Convert memory detection into C code.
[S390] Calibrate delay and bogomips.
[S390] Hypervisor filesystem (s390_hypfs) for z/VM
[S390] Add crypto support for 3592 tape devices
[S390] boot from NSS support
[S390] Support for s390 Pseudo Random Number Generator
[S390] ETR support.
[S390] noexec protection
[S390] move crypto options and some cleanup.
[S390] cio: Don't spam debug feature.
[S390] Cleanup of CHSC event handling.
[S390] cio: declare hardware structures packed.
[S390] Add set_fs(USER_DS) to start_thread().
[S390] cio: Catch operand exceptions on stsch.
[S390] Fix register usage description.
[S390] kretprobe_trampoline_holder() in wrong section.
[S390] Fix kprobes breakpoint handling.
[S390] Update maintainers file.
...
168 files changed, 5520 insertions, 2171 deletions
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt index 3f9ddbc23b27..0993969609cf 100644 --- a/Documentation/s390/Debugging390.txt +++ b/Documentation/s390/Debugging390.txt | |||
@@ -480,7 +480,7 @@ r2 argument 0 / return value 0 call-clobbered | |||
480 | r3 argument 1 / return value 1 (if long long) call-clobbered | 480 | r3 argument 1 / return value 1 (if long long) call-clobbered |
481 | r4 argument 2 call-clobbered | 481 | r4 argument 2 call-clobbered |
482 | r5 argument 3 call-clobbered | 482 | r5 argument 3 call-clobbered |
483 | r6 argument 5 saved | 483 | r6 argument 4 saved |
484 | r7 pointer-to arguments 5 to ... saved | 484 | r7 pointer-to arguments 5 to ... saved |
485 | r8 this & that saved | 485 | r8 this & that saved |
486 | r9 this & that saved | 486 | r9 this & that saved |
diff --git a/MAINTAINERS b/MAINTAINERS index 0ad8803a0c75..3d125e7a809e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2791,7 +2791,7 @@ M: schwidefsky@de.ibm.com | |||
2791 | P: Heiko Carstens | 2791 | P: Heiko Carstens |
2792 | M: heiko.carstens@de.ibm.com | 2792 | M: heiko.carstens@de.ibm.com |
2793 | M: linux390@de.ibm.com | 2793 | M: linux390@de.ibm.com |
2794 | L: linux-390@vm.marist.edu | 2794 | L: linux-s390@vger.kernel.org |
2795 | W: http://www.ibm.com/developerworks/linux/linux390/ | 2795 | W: http://www.ibm.com/developerworks/linux/linux390/ |
2796 | S: Supported | 2796 | S: Supported |
2797 | 2797 | ||
@@ -2799,7 +2799,7 @@ S390 NETWORK DRIVERS | |||
2799 | P: Frank Pavlic | 2799 | P: Frank Pavlic |
2800 | M: fpavlic@de.ibm.com | 2800 | M: fpavlic@de.ibm.com |
2801 | M: linux390@de.ibm.com | 2801 | M: linux390@de.ibm.com |
2802 | L: linux-390@vm.marist.edu | 2802 | L: linux-s390@vger.kernel.org |
2803 | W: http://www.ibm.com/developerworks/linux/linux390/ | 2803 | W: http://www.ibm.com/developerworks/linux/linux390/ |
2804 | S: Supported | 2804 | S: Supported |
2805 | 2805 | ||
@@ -2807,7 +2807,7 @@ S390 ZFCP DRIVER | |||
2807 | P: Swen Schillig | 2807 | P: Swen Schillig |
2808 | M: swen@vnet.ibm.com | 2808 | M: swen@vnet.ibm.com |
2809 | M: linux390@de.ibm.com | 2809 | M: linux390@de.ibm.com |
2810 | L: linux-390@vm.marist.edu | 2810 | L: linux-s390@vger.kernel.org |
2811 | W: http://www.ibm.com/developerworks/linux/linux390/ | 2811 | W: http://www.ibm.com/developerworks/linux/linux390/ |
2812 | S: Supported | 2812 | S: Supported |
2813 | 2813 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 12272361c018..eaed402ad346 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -34,10 +34,6 @@ config GENERIC_HWEIGHT | |||
34 | bool | 34 | bool |
35 | default y | 35 | default y |
36 | 36 | ||
37 | config GENERIC_CALIBRATE_DELAY | ||
38 | bool | ||
39 | default y | ||
40 | |||
41 | config GENERIC_TIME | 37 | config GENERIC_TIME |
42 | def_bool y | 38 | def_bool y |
43 | 39 | ||
@@ -134,6 +130,31 @@ config AUDIT_ARCH | |||
134 | bool | 130 | bool |
135 | default y | 131 | default y |
136 | 132 | ||
133 | config S390_SWITCH_AMODE | ||
134 | bool "Switch kernel/user addressing modes" | ||
135 | help | ||
136 | This option allows to switch the addressing modes of kernel and user | ||
137 | space. The kernel parameter switch_amode=on will enable this feature, | ||
138 | default is disabled. Enabling this (via kernel parameter) on machines | ||
139 | earlier than IBM System z9-109 EC/BC will reduce system performance. | ||
140 | |||
141 | Note that this option will also be selected by selecting the execute | ||
142 | protection option below. Enabling the execute protection via the | ||
143 | noexec kernel parameter will also switch the addressing modes, | ||
144 | independent of the switch_amode kernel parameter. | ||
145 | |||
146 | |||
147 | config S390_EXEC_PROTECT | ||
148 | bool "Data execute protection" | ||
149 | select S390_SWITCH_AMODE | ||
150 | help | ||
151 | This option allows to enable a buffer overflow protection for user | ||
152 | space programs and it also selects the addressing mode option above. | ||
153 | The kernel parameter noexec=on will enable this feature and also | ||
154 | switch the addressing modes, default is disabled. Enabling this (via | ||
155 | kernel parameter) on machines earlier than IBM System z9-109 EC/BC | ||
156 | will reduce system performance. | ||
157 | |||
137 | comment "Code generation options" | 158 | comment "Code generation options" |
138 | 159 | ||
139 | choice | 160 | choice |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index b8c237290263..c9da7d16145e 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -81,7 +81,7 @@ static struct ctl_table appldata_dir_table[] = { | |||
81 | /* | 81 | /* |
82 | * Timer | 82 | * Timer |
83 | */ | 83 | */ |
84 | DEFINE_PER_CPU(struct vtimer_list, appldata_timer); | 84 | static DEFINE_PER_CPU(struct vtimer_list, appldata_timer); |
85 | static atomic_t appldata_expire_count = ATOMIC_INIT(0); | 85 | static atomic_t appldata_expire_count = ATOMIC_INIT(0); |
86 | 86 | ||
87 | static DEFINE_SPINLOCK(appldata_timer_lock); | 87 | static DEFINE_SPINLOCK(appldata_timer_lock); |
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 8aea3698a77b..4ca615788702 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
@@ -36,7 +36,7 @@ | |||
36 | * book: | 36 | * book: |
37 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | 37 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml |
38 | */ | 38 | */ |
39 | struct appldata_mem_data { | 39 | static struct appldata_mem_data { |
40 | u64 timestamp; | 40 | u64 timestamp; |
41 | u32 sync_count_1; /* after VM collected the record data, */ | 41 | u32 sync_count_1; /* after VM collected the record data, */ |
42 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | 42 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the |
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 075e619bf37d..f64b8c867ae2 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * book: | 34 | * book: |
35 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | 35 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml |
36 | */ | 36 | */ |
37 | struct appldata_net_sum_data { | 37 | static struct appldata_net_sum_data { |
38 | u64 timestamp; | 38 | u64 timestamp; |
39 | u32 sync_count_1; /* after VM collected the record data, */ | 39 | u32 sync_count_1; /* after VM collected the record data, */ |
40 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | 40 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the |
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig new file mode 100644 index 000000000000..99ff9f08e4d7 --- /dev/null +++ b/arch/s390/crypto/Kconfig | |||
@@ -0,0 +1,60 @@ | |||
1 | config CRYPTO_SHA1_S390 | ||
2 | tristate "SHA1 digest algorithm" | ||
3 | depends on S390 | ||
4 | select CRYPTO_ALGAPI | ||
5 | help | ||
6 | This is the s390 hardware accelerated implementation of the | ||
7 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | ||
8 | |||
9 | config CRYPTO_SHA256_S390 | ||
10 | tristate "SHA256 digest algorithm" | ||
11 | depends on S390 | ||
12 | select CRYPTO_ALGAPI | ||
13 | help | ||
14 | This is the s390 hardware accelerated implementation of the | ||
15 | SHA256 secure hash standard (DFIPS 180-2). | ||
16 | |||
17 | This version of SHA implements a 256 bit hash with 128 bits of | ||
18 | security against collision attacks. | ||
19 | |||
20 | config CRYPTO_DES_S390 | ||
21 | tristate "DES and Triple DES cipher algorithms" | ||
22 | depends on S390 | ||
23 | select CRYPTO_ALGAPI | ||
24 | select CRYPTO_BLKCIPHER | ||
25 | help | ||
26 | This us the s390 hardware accelerated implementation of the | ||
27 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | ||
28 | |||
29 | config CRYPTO_AES_S390 | ||
30 | tristate "AES cipher algorithms" | ||
31 | depends on S390 | ||
32 | select CRYPTO_ALGAPI | ||
33 | select CRYPTO_BLKCIPHER | ||
34 | help | ||
35 | This is the s390 hardware accelerated implementation of the | ||
36 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | ||
37 | algorithm. | ||
38 | |||
39 | Rijndael appears to be consistently a very good performer in | ||
40 | both hardware and software across a wide range of computing | ||
41 | environments regardless of its use in feedback or non-feedback | ||
42 | modes. Its key setup time is excellent, and its key agility is | ||
43 | good. Rijndael's very low memory requirements make it very well | ||
44 | suited for restricted-space environments, in which it also | ||
45 | demonstrates excellent performance. Rijndael's operations are | ||
46 | among the easiest to defend against power and timing attacks. | ||
47 | |||
48 | On s390 the System z9-109 currently only supports the key size | ||
49 | of 128 bit. | ||
50 | |||
51 | config S390_PRNG | ||
52 | tristate "Pseudo random number generator device driver" | ||
53 | depends on S390 | ||
54 | default "m" | ||
55 | help | ||
56 | Select this option if you want to use the s390 pseudo random number | ||
57 | generator. The PRNG is part of the cryptograhic processor functions | ||
58 | and uses triple-DES to generate secure random numbers like the | ||
59 | ANSI X9.17 standard. The PRNG is usable via the char device | ||
60 | /dev/prandom. | ||
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index bfe2541dc5cf..14e552c5cc43 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile | |||
@@ -6,5 +6,4 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o | |||
6 | obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o | 6 | obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o |
7 | obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o | 7 | obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o |
8 | obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o | 8 | obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o |
9 | 9 | obj-$(CONFIG_S390_PRNG) += prng.o | |
10 | obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o | ||
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 15c9eec02928..91636353f6f0 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * s390 implementation of the AES Cipher Algorithm. | 4 | * s390 implementation of the AES Cipher Algorithm. |
5 | * | 5 | * |
6 | * s390 Version: | 6 | * s390 Version: |
7 | * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2005,2007 |
8 | * Author(s): Jan Glauber (jang@de.ibm.com) | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
9 | * | 9 | * |
10 | * Derived from "crypto/aes.c" | 10 | * Derived from "crypto/aes.c" |
@@ -27,9 +27,11 @@ | |||
27 | /* data block size for all key lengths */ | 27 | /* data block size for all key lengths */ |
28 | #define AES_BLOCK_SIZE 16 | 28 | #define AES_BLOCK_SIZE 16 |
29 | 29 | ||
30 | int has_aes_128 = 0; | 30 | #define AES_KEYLEN_128 1 |
31 | int has_aes_192 = 0; | 31 | #define AES_KEYLEN_192 2 |
32 | int has_aes_256 = 0; | 32 | #define AES_KEYLEN_256 4 |
33 | |||
34 | static char keylen_flag = 0; | ||
33 | 35 | ||
34 | struct s390_aes_ctx { | 36 | struct s390_aes_ctx { |
35 | u8 iv[AES_BLOCK_SIZE]; | 37 | u8 iv[AES_BLOCK_SIZE]; |
@@ -47,20 +49,19 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
47 | 49 | ||
48 | switch (key_len) { | 50 | switch (key_len) { |
49 | case 16: | 51 | case 16: |
50 | if (!has_aes_128) | 52 | if (!(keylen_flag & AES_KEYLEN_128)) |
51 | goto fail; | 53 | goto fail; |
52 | break; | 54 | break; |
53 | case 24: | 55 | case 24: |
54 | if (!has_aes_192) | 56 | if (!(keylen_flag & AES_KEYLEN_192)) |
55 | goto fail; | 57 | goto fail; |
56 | 58 | ||
57 | break; | 59 | break; |
58 | case 32: | 60 | case 32: |
59 | if (!has_aes_256) | 61 | if (!(keylen_flag & AES_KEYLEN_256)) |
60 | goto fail; | 62 | goto fail; |
61 | break; | 63 | break; |
62 | default: | 64 | default: |
63 | /* invalid key length */ | ||
64 | goto fail; | 65 | goto fail; |
65 | break; | 66 | break; |
66 | } | 67 | } |
@@ -322,34 +323,32 @@ static int __init aes_init(void) | |||
322 | int ret; | 323 | int ret; |
323 | 324 | ||
324 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) | 325 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) |
325 | has_aes_128 = 1; | 326 | keylen_flag |= AES_KEYLEN_128; |
326 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) | 327 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) |
327 | has_aes_192 = 1; | 328 | keylen_flag |= AES_KEYLEN_192; |
328 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) | 329 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) |
329 | has_aes_256 = 1; | 330 | keylen_flag |= AES_KEYLEN_256; |
331 | |||
332 | if (!keylen_flag) | ||
333 | return -EOPNOTSUPP; | ||
330 | 334 | ||
331 | if (!has_aes_128 && !has_aes_192 && !has_aes_256) | 335 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
332 | return -ENOSYS; | 336 | if (keylen_flag == AES_KEYLEN_128) |
337 | printk(KERN_INFO | ||
338 | "aes_s390: hardware acceleration only available for" | ||
339 | "128 bit keys\n"); | ||
333 | 340 | ||
334 | ret = crypto_register_alg(&aes_alg); | 341 | ret = crypto_register_alg(&aes_alg); |
335 | if (ret != 0) { | 342 | if (ret) |
336 | printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n"); | ||
337 | goto aes_err; | 343 | goto aes_err; |
338 | } | ||
339 | 344 | ||
340 | ret = crypto_register_alg(&ecb_aes_alg); | 345 | ret = crypto_register_alg(&ecb_aes_alg); |
341 | if (ret != 0) { | 346 | if (ret) |
342 | printk(KERN_INFO | ||
343 | "crypt_s390: ecb-aes-s390 couldn't be loaded.\n"); | ||
344 | goto ecb_aes_err; | 347 | goto ecb_aes_err; |
345 | } | ||
346 | 348 | ||
347 | ret = crypto_register_alg(&cbc_aes_alg); | 349 | ret = crypto_register_alg(&cbc_aes_alg); |
348 | if (ret != 0) { | 350 | if (ret) |
349 | printk(KERN_INFO | ||
350 | "crypt_s390: cbc-aes-s390 couldn't be loaded.\n"); | ||
351 | goto cbc_aes_err; | 351 | goto cbc_aes_err; |
352 | } | ||
353 | 352 | ||
354 | out: | 353 | out: |
355 | return ret; | 354 | return ret; |
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 2b137089f625..2775d2618332 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h | |||
@@ -3,8 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Support for s390 cryptographic instructions. | 4 | * Support for s390 cryptographic instructions. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2003,2007 |
7 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 7 | * Author(s): Thomas Spatzier |
8 | * Jan Glauber (jan.glauber@de.ibm.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -32,7 +33,8 @@ enum crypt_s390_operations { | |||
32 | CRYPT_S390_KMAC = 0x0500 | 33 | CRYPT_S390_KMAC = 0x0500 |
33 | }; | 34 | }; |
34 | 35 | ||
35 | /* function codes for KM (CIPHER MESSAGE) instruction | 36 | /* |
37 | * function codes for KM (CIPHER MESSAGE) instruction | ||
36 | * 0x80 is the decipher modifier bit | 38 | * 0x80 is the decipher modifier bit |
37 | */ | 39 | */ |
38 | enum crypt_s390_km_func { | 40 | enum crypt_s390_km_func { |
@@ -51,7 +53,8 @@ enum crypt_s390_km_func { | |||
51 | KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, | 53 | KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, |
52 | }; | 54 | }; |
53 | 55 | ||
54 | /* function codes for KMC (CIPHER MESSAGE WITH CHAINING) | 56 | /* |
57 | * function codes for KMC (CIPHER MESSAGE WITH CHAINING) | ||
55 | * instruction | 58 | * instruction |
56 | */ | 59 | */ |
57 | enum crypt_s390_kmc_func { | 60 | enum crypt_s390_kmc_func { |
@@ -68,9 +71,11 @@ enum crypt_s390_kmc_func { | |||
68 | KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, | 71 | KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, |
69 | KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, | 72 | KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, |
70 | KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, | 73 | KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, |
74 | KMC_PRNG = CRYPT_S390_KMC | 0x43, | ||
71 | }; | 75 | }; |
72 | 76 | ||
73 | /* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) | 77 | /* |
78 | * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) | ||
74 | * instruction | 79 | * instruction |
75 | */ | 80 | */ |
76 | enum crypt_s390_kimd_func { | 81 | enum crypt_s390_kimd_func { |
@@ -79,7 +84,8 @@ enum crypt_s390_kimd_func { | |||
79 | KIMD_SHA_256 = CRYPT_S390_KIMD | 2, | 84 | KIMD_SHA_256 = CRYPT_S390_KIMD | 2, |
80 | }; | 85 | }; |
81 | 86 | ||
82 | /* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) | 87 | /* |
88 | * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) | ||
83 | * instruction | 89 | * instruction |
84 | */ | 90 | */ |
85 | enum crypt_s390_klmd_func { | 91 | enum crypt_s390_klmd_func { |
@@ -88,7 +94,8 @@ enum crypt_s390_klmd_func { | |||
88 | KLMD_SHA_256 = CRYPT_S390_KLMD | 2, | 94 | KLMD_SHA_256 = CRYPT_S390_KLMD | 2, |
89 | }; | 95 | }; |
90 | 96 | ||
91 | /* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) | 97 | /* |
98 | * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) | ||
92 | * instruction | 99 | * instruction |
93 | */ | 100 | */ |
94 | enum crypt_s390_kmac_func { | 101 | enum crypt_s390_kmac_func { |
@@ -98,229 +105,219 @@ enum crypt_s390_kmac_func { | |||
98 | KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 | 105 | KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 |
99 | }; | 106 | }; |
100 | 107 | ||
101 | /* status word for s390 crypto instructions' QUERY functions */ | 108 | /** |
102 | struct crypt_s390_query_status { | 109 | * crypt_s390_km: |
103 | u64 high; | 110 | * @func: the function code passed to KM; see crypt_s390_km_func |
104 | u64 low; | 111 | * @param: address of parameter block; see POP for details on each func |
105 | }; | 112 | * @dest: address of destination memory area |
106 | 113 | * @src: address of source memory area | |
107 | /* | 114 | * @src_len: length of src operand in bytes |
115 | * | ||
108 | * Executes the KM (CIPHER MESSAGE) operation of the CPU. | 116 | * Executes the KM (CIPHER MESSAGE) operation of the CPU. |
109 | * @param func: the function code passed to KM; see crypt_s390_km_func | 117 | * |
110 | * @param param: address of parameter block; see POP for details on each func | 118 | * Returns -1 for failure, 0 for the query func, number of processed |
111 | * @param dest: address of destination memory area | 119 | * bytes for encryption/decryption funcs |
112 | * @param src: address of source memory area | ||
113 | * @param src_len: length of src operand in bytes | ||
114 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
115 | * for encryption/decryption funcs | ||
116 | */ | 120 | */ |
117 | static inline int | 121 | static inline int crypt_s390_km(long func, void *param, |
118 | crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) | 122 | u8 *dest, const u8 *src, long src_len) |
119 | { | 123 | { |
120 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 124 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
121 | register void* __param asm("1") = param; | 125 | register void *__param asm("1") = param; |
122 | register const u8* __src asm("2") = src; | 126 | register const u8 *__src asm("2") = src; |
123 | register long __src_len asm("3") = src_len; | 127 | register long __src_len asm("3") = src_len; |
124 | register u8* __dest asm("4") = dest; | 128 | register u8 *__dest asm("4") = dest; |
125 | int ret; | 129 | int ret; |
126 | 130 | ||
127 | asm volatile( | 131 | asm volatile( |
128 | "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ | 132 | "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ |
129 | "1: brc 1,0b \n" /* handle partial completion */ | 133 | "1: brc 1,0b \n" /* handle partial completion */ |
130 | " ahi %0,%h7\n" | 134 | " la %0,0\n" |
131 | "2: ahi %0,%h8\n" | 135 | "2:\n" |
132 | "3:\n" | 136 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
133 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
134 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) | 137 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) |
135 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 138 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
136 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
137 | if (ret < 0) | 139 | if (ret < 0) |
138 | return ret; | 140 | return ret; |
139 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 141 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
140 | } | 142 | } |
141 | 143 | ||
142 | /* | 144 | /** |
145 | * crypt_s390_kmc: | ||
146 | * @func: the function code passed to KM; see crypt_s390_kmc_func | ||
147 | * @param: address of parameter block; see POP for details on each func | ||
148 | * @dest: address of destination memory area | ||
149 | * @src: address of source memory area | ||
150 | * @src_len: length of src operand in bytes | ||
151 | * | ||
143 | * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. | 152 | * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. |
144 | * @param func: the function code passed to KM; see crypt_s390_kmc_func | 153 | * |
145 | * @param param: address of parameter block; see POP for details on each func | 154 | * Returns -1 for failure, 0 for the query func, number of processed |
146 | * @param dest: address of destination memory area | 155 | * bytes for encryption/decryption funcs |
147 | * @param src: address of source memory area | ||
148 | * @param src_len: length of src operand in bytes | ||
149 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
150 | * for encryption/decryption funcs | ||
151 | */ | 156 | */ |
152 | static inline int | 157 | static inline int crypt_s390_kmc(long func, void *param, |
153 | crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) | 158 | u8 *dest, const u8 *src, long src_len) |
154 | { | 159 | { |
155 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 160 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
156 | register void* __param asm("1") = param; | 161 | register void *__param asm("1") = param; |
157 | register const u8* __src asm("2") = src; | 162 | register const u8 *__src asm("2") = src; |
158 | register long __src_len asm("3") = src_len; | 163 | register long __src_len asm("3") = src_len; |
159 | register u8* __dest asm("4") = dest; | 164 | register u8 *__dest asm("4") = dest; |
160 | int ret; | 165 | int ret; |
161 | 166 | ||
162 | asm volatile( | 167 | asm volatile( |
163 | "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ | 168 | "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ |
164 | "1: brc 1,0b \n" /* handle partial completion */ | 169 | "1: brc 1,0b \n" /* handle partial completion */ |
165 | " ahi %0,%h7\n" | 170 | " la %0,0\n" |
166 | "2: ahi %0,%h8\n" | 171 | "2:\n" |
167 | "3:\n" | 172 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
168 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
169 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) | 173 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) |
170 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 174 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
171 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
172 | if (ret < 0) | 175 | if (ret < 0) |
173 | return ret; | 176 | return ret; |
174 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 177 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
175 | } | 178 | } |
176 | 179 | ||
177 | /* | 180 | /** |
181 | * crypt_s390_kimd: | ||
182 | * @func: the function code passed to KM; see crypt_s390_kimd_func | ||
183 | * @param: address of parameter block; see POP for details on each func | ||
184 | * @src: address of source memory area | ||
185 | * @src_len: length of src operand in bytes | ||
186 | * | ||
178 | * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation | 187 | * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation |
179 | * of the CPU. | 188 | * of the CPU. |
180 | * @param func: the function code passed to KM; see crypt_s390_kimd_func | 189 | * |
181 | * @param param: address of parameter block; see POP for details on each func | 190 | * Returns -1 for failure, 0 for the query func, number of processed |
182 | * @param src: address of source memory area | 191 | * bytes for digest funcs |
183 | * @param src_len: length of src operand in bytes | ||
184 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
185 | * for digest funcs | ||
186 | */ | 192 | */ |
187 | static inline int | 193 | static inline int crypt_s390_kimd(long func, void *param, |
188 | crypt_s390_kimd(long func, void* param, const u8* src, long src_len) | 194 | const u8 *src, long src_len) |
189 | { | 195 | { |
190 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 196 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
191 | register void* __param asm("1") = param; | 197 | register void *__param asm("1") = param; |
192 | register const u8* __src asm("2") = src; | 198 | register const u8 *__src asm("2") = src; |
193 | register long __src_len asm("3") = src_len; | 199 | register long __src_len asm("3") = src_len; |
194 | int ret; | 200 | int ret; |
195 | 201 | ||
196 | asm volatile( | 202 | asm volatile( |
197 | "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ | 203 | "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ |
198 | "1: brc 1,0b \n" /* handle partial completion */ | 204 | "1: brc 1,0b \n" /* handle partial completion */ |
199 | " ahi %0,%h6\n" | 205 | " la %0,0\n" |
200 | "2: ahi %0,%h7\n" | 206 | "2:\n" |
201 | "3:\n" | 207 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
202 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
203 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 208 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
204 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 209 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
205 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
206 | if (ret < 0) | 210 | if (ret < 0) |
207 | return ret; | 211 | return ret; |
208 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 212 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
209 | } | 213 | } |
210 | 214 | ||
211 | /* | 215 | /** |
216 | * crypt_s390_klmd: | ||
217 | * @func: the function code passed to KM; see crypt_s390_klmd_func | ||
218 | * @param: address of parameter block; see POP for details on each func | ||
219 | * @src: address of source memory area | ||
220 | * @src_len: length of src operand in bytes | ||
221 | * | ||
212 | * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. | 222 | * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. |
213 | * @param func: the function code passed to KM; see crypt_s390_klmd_func | 223 | * |
214 | * @param param: address of parameter block; see POP for details on each func | 224 | * Returns -1 for failure, 0 for the query func, number of processed |
215 | * @param src: address of source memory area | 225 | * bytes for digest funcs |
216 | * @param src_len: length of src operand in bytes | ||
217 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
218 | * for digest funcs | ||
219 | */ | 226 | */ |
220 | static inline int | 227 | static inline int crypt_s390_klmd(long func, void *param, |
221 | crypt_s390_klmd(long func, void* param, const u8* src, long src_len) | 228 | const u8 *src, long src_len) |
222 | { | 229 | { |
223 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 230 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
224 | register void* __param asm("1") = param; | 231 | register void *__param asm("1") = param; |
225 | register const u8* __src asm("2") = src; | 232 | register const u8 *__src asm("2") = src; |
226 | register long __src_len asm("3") = src_len; | 233 | register long __src_len asm("3") = src_len; |
227 | int ret; | 234 | int ret; |
228 | 235 | ||
229 | asm volatile( | 236 | asm volatile( |
230 | "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ | 237 | "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ |
231 | "1: brc 1,0b \n" /* handle partial completion */ | 238 | "1: brc 1,0b \n" /* handle partial completion */ |
232 | " ahi %0,%h6\n" | 239 | " la %0,0\n" |
233 | "2: ahi %0,%h7\n" | 240 | "2:\n" |
234 | "3:\n" | 241 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
235 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
236 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 242 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
237 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 243 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
238 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
239 | if (ret < 0) | 244 | if (ret < 0) |
240 | return ret; | 245 | return ret; |
241 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 246 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
242 | } | 247 | } |
243 | 248 | ||
244 | /* | 249 | /** |
250 | * crypt_s390_kmac: | ||
251 | * @func: the function code passed to KM; see crypt_s390_klmd_func | ||
252 | * @param: address of parameter block; see POP for details on each func | ||
253 | * @src: address of source memory area | ||
254 | * @src_len: length of src operand in bytes | ||
255 | * | ||
245 | * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation | 256 | * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation |
246 | * of the CPU. | 257 | * of the CPU. |
247 | * @param func: the function code passed to KM; see crypt_s390_klmd_func | 258 | * |
248 | * @param param: address of parameter block; see POP for details on each func | 259 | * Returns -1 for failure, 0 for the query func, number of processed |
249 | * @param src: address of source memory area | 260 | * bytes for digest funcs |
250 | * @param src_len: length of src operand in bytes | ||
251 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
252 | * for digest funcs | ||
253 | */ | 261 | */ |
254 | static inline int | 262 | static inline int crypt_s390_kmac(long func, void *param, |
255 | crypt_s390_kmac(long func, void* param, const u8* src, long src_len) | 263 | const u8 *src, long src_len) |
256 | { | 264 | { |
257 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 265 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
258 | register void* __param asm("1") = param; | 266 | register void *__param asm("1") = param; |
259 | register const u8* __src asm("2") = src; | 267 | register const u8 *__src asm("2") = src; |
260 | register long __src_len asm("3") = src_len; | 268 | register long __src_len asm("3") = src_len; |
261 | int ret; | 269 | int ret; |
262 | 270 | ||
263 | asm volatile( | 271 | asm volatile( |
264 | "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ | 272 | "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ |
265 | "1: brc 1,0b \n" /* handle partial completion */ | 273 | "1: brc 1,0b \n" /* handle partial completion */ |
266 | " ahi %0,%h6\n" | 274 | " la %0,0\n" |
267 | "2: ahi %0,%h7\n" | 275 | "2:\n" |
268 | "3:\n" | 276 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
269 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
270 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 277 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
271 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 278 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
272 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
273 | if (ret < 0) | 279 | if (ret < 0) |
274 | return ret; | 280 | return ret; |
275 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 281 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
276 | } | 282 | } |
277 | 283 | ||
278 | /** | 284 | /** |
285 | * crypt_s390_func_available: | ||
286 | * @func: the function code of the specific function; 0 if op in general | ||
287 | * | ||
279 | * Tests if a specific crypto function is implemented on the machine. | 288 | * Tests if a specific crypto function is implemented on the machine. |
280 | * @param func: the function code of the specific function; 0 if op in general | 289 | * |
281 | * @return 1 if func available; 0 if func or op in general not available | 290 | * Returns 1 if func available; 0 if func or op in general not available |
282 | */ | 291 | */ |
283 | static inline int | 292 | static inline int crypt_s390_func_available(int func) |
284 | crypt_s390_func_available(int func) | ||
285 | { | 293 | { |
294 | unsigned char status[16]; | ||
286 | int ret; | 295 | int ret; |
287 | 296 | ||
288 | struct crypt_s390_query_status status = { | 297 | switch (func & CRYPT_S390_OP_MASK) { |
289 | .high = 0, | 298 | case CRYPT_S390_KM: |
290 | .low = 0 | 299 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); |
291 | }; | 300 | break; |
292 | switch (func & CRYPT_S390_OP_MASK){ | 301 | case CRYPT_S390_KMC: |
293 | case CRYPT_S390_KM: | 302 | ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); |
294 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); | 303 | break; |
295 | break; | 304 | case CRYPT_S390_KIMD: |
296 | case CRYPT_S390_KMC: | 305 | ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); |
297 | ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); | 306 | break; |
298 | break; | 307 | case CRYPT_S390_KLMD: |
299 | case CRYPT_S390_KIMD: | 308 | ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); |
300 | ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); | 309 | break; |
301 | break; | 310 | case CRYPT_S390_KMAC: |
302 | case CRYPT_S390_KLMD: | 311 | ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); |
303 | ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); | 312 | break; |
304 | break; | 313 | default: |
305 | case CRYPT_S390_KMAC: | 314 | return 0; |
306 | ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); | ||
307 | break; | ||
308 | default: | ||
309 | ret = 0; | ||
310 | return ret; | ||
311 | } | ||
312 | if (ret >= 0){ | ||
313 | func &= CRYPT_S390_FUNC_MASK; | ||
314 | func &= 0x7f; //mask modifier bit | ||
315 | if (func < 64){ | ||
316 | ret = (status.high >> (64 - func - 1)) & 0x1; | ||
317 | } else { | ||
318 | ret = (status.low >> (128 - func - 1)) & 0x1; | ||
319 | } | ||
320 | } else { | ||
321 | ret = 0; | ||
322 | } | 315 | } |
323 | return ret; | 316 | if (ret < 0) |
317 | return 0; | ||
318 | func &= CRYPT_S390_FUNC_MASK; | ||
319 | func &= 0x7f; /* mask modifier bit */ | ||
320 | return (status[func >> 3] & (0x80 >> (func & 7))) != 0; | ||
324 | } | 321 | } |
325 | 322 | ||
326 | #endif // _CRYPTO_ARCH_S390_CRYPT_S390_H | 323 | #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ |
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c deleted file mode 100644 index 54fb11d7fadd..000000000000 --- a/arch/s390/crypto/crypt_s390_query.c +++ /dev/null | |||
@@ -1,129 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for s390 cryptographic instructions. | ||
5 | * Testing module for querying processor crypto capabilities. | ||
6 | * | ||
7 | * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
8 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include "crypt_s390.h" | ||
21 | |||
22 | static void query_available_functions(void) | ||
23 | { | ||
24 | printk(KERN_INFO "#####################\n"); | ||
25 | |||
26 | /* query available KM functions */ | ||
27 | printk(KERN_INFO "KM_QUERY: %d\n", | ||
28 | crypt_s390_func_available(KM_QUERY)); | ||
29 | printk(KERN_INFO "KM_DEA: %d\n", | ||
30 | crypt_s390_func_available(KM_DEA_ENCRYPT)); | ||
31 | printk(KERN_INFO "KM_TDEA_128: %d\n", | ||
32 | crypt_s390_func_available(KM_TDEA_128_ENCRYPT)); | ||
33 | printk(KERN_INFO "KM_TDEA_192: %d\n", | ||
34 | crypt_s390_func_available(KM_TDEA_192_ENCRYPT)); | ||
35 | printk(KERN_INFO "KM_AES_128: %d\n", | ||
36 | crypt_s390_func_available(KM_AES_128_ENCRYPT)); | ||
37 | printk(KERN_INFO "KM_AES_192: %d\n", | ||
38 | crypt_s390_func_available(KM_AES_192_ENCRYPT)); | ||
39 | printk(KERN_INFO "KM_AES_256: %d\n", | ||
40 | crypt_s390_func_available(KM_AES_256_ENCRYPT)); | ||
41 | |||
42 | /* query available KMC functions */ | ||
43 | printk(KERN_INFO "KMC_QUERY: %d\n", | ||
44 | crypt_s390_func_available(KMC_QUERY)); | ||
45 | printk(KERN_INFO "KMC_DEA: %d\n", | ||
46 | crypt_s390_func_available(KMC_DEA_ENCRYPT)); | ||
47 | printk(KERN_INFO "KMC_TDEA_128: %d\n", | ||
48 | crypt_s390_func_available(KMC_TDEA_128_ENCRYPT)); | ||
49 | printk(KERN_INFO "KMC_TDEA_192: %d\n", | ||
50 | crypt_s390_func_available(KMC_TDEA_192_ENCRYPT)); | ||
51 | printk(KERN_INFO "KMC_AES_128: %d\n", | ||
52 | crypt_s390_func_available(KMC_AES_128_ENCRYPT)); | ||
53 | printk(KERN_INFO "KMC_AES_192: %d\n", | ||
54 | crypt_s390_func_available(KMC_AES_192_ENCRYPT)); | ||
55 | printk(KERN_INFO "KMC_AES_256: %d\n", | ||
56 | crypt_s390_func_available(KMC_AES_256_ENCRYPT)); | ||
57 | |||
58 | /* query available KIMD functions */ | ||
59 | printk(KERN_INFO "KIMD_QUERY: %d\n", | ||
60 | crypt_s390_func_available(KIMD_QUERY)); | ||
61 | printk(KERN_INFO "KIMD_SHA_1: %d\n", | ||
62 | crypt_s390_func_available(KIMD_SHA_1)); | ||
63 | printk(KERN_INFO "KIMD_SHA_256: %d\n", | ||
64 | crypt_s390_func_available(KIMD_SHA_256)); | ||
65 | |||
66 | /* query available KLMD functions */ | ||
67 | printk(KERN_INFO "KLMD_QUERY: %d\n", | ||
68 | crypt_s390_func_available(KLMD_QUERY)); | ||
69 | printk(KERN_INFO "KLMD_SHA_1: %d\n", | ||
70 | crypt_s390_func_available(KLMD_SHA_1)); | ||
71 | printk(KERN_INFO "KLMD_SHA_256: %d\n", | ||
72 | crypt_s390_func_available(KLMD_SHA_256)); | ||
73 | |||
74 | /* query available KMAC functions */ | ||
75 | printk(KERN_INFO "KMAC_QUERY: %d\n", | ||
76 | crypt_s390_func_available(KMAC_QUERY)); | ||
77 | printk(KERN_INFO "KMAC_DEA: %d\n", | ||
78 | crypt_s390_func_available(KMAC_DEA)); | ||
79 | printk(KERN_INFO "KMAC_TDEA_128: %d\n", | ||
80 | crypt_s390_func_available(KMAC_TDEA_128)); | ||
81 | printk(KERN_INFO "KMAC_TDEA_192: %d\n", | ||
82 | crypt_s390_func_available(KMAC_TDEA_192)); | ||
83 | } | ||
84 | |||
85 | static int init(void) | ||
86 | { | ||
87 | struct crypt_s390_query_status status = { | ||
88 | .high = 0, | ||
89 | .low = 0 | ||
90 | }; | ||
91 | |||
92 | printk(KERN_INFO "crypt_s390: querying available crypto functions\n"); | ||
93 | crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); | ||
94 | printk(KERN_INFO "KM:\t%016llx %016llx\n", | ||
95 | (unsigned long long) status.high, | ||
96 | (unsigned long long) status.low); | ||
97 | status.high = status.low = 0; | ||
98 | crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); | ||
99 | printk(KERN_INFO "KMC:\t%016llx %016llx\n", | ||
100 | (unsigned long long) status.high, | ||
101 | (unsigned long long) status.low); | ||
102 | status.high = status.low = 0; | ||
103 | crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); | ||
104 | printk(KERN_INFO "KIMD:\t%016llx %016llx\n", | ||
105 | (unsigned long long) status.high, | ||
106 | (unsigned long long) status.low); | ||
107 | status.high = status.low = 0; | ||
108 | crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); | ||
109 | printk(KERN_INFO "KLMD:\t%016llx %016llx\n", | ||
110 | (unsigned long long) status.high, | ||
111 | (unsigned long long) status.low); | ||
112 | status.high = status.low = 0; | ||
113 | crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); | ||
114 | printk(KERN_INFO "KMAC:\t%016llx %016llx\n", | ||
115 | (unsigned long long) status.high, | ||
116 | (unsigned long long) status.low); | ||
117 | |||
118 | query_available_functions(); | ||
119 | return -ECANCELED; | ||
120 | } | ||
121 | |||
122 | static void __exit cleanup(void) | ||
123 | { | ||
124 | } | ||
125 | |||
126 | module_init(init); | ||
127 | module_exit(cleanup); | ||
128 | |||
129 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c index e3f5c5f238fe..5706af266442 100644 --- a/arch/s390/crypto/des_check_key.c +++ b/arch/s390/crypto/des_check_key.c | |||
@@ -10,8 +10,9 @@ | |||
10 | * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. | 10 | * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. |
11 | * | 11 | * |
12 | * s390 Version: | 12 | * s390 Version: |
13 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | 13 | * Copyright IBM Corp. 2003 |
14 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 14 | * Author(s): Thomas Spatzier |
15 | * Jan Glauber (jan.glauber@de.ibm.com) | ||
15 | * | 16 | * |
16 | * Derived from "crypto/des.c" | 17 | * Derived from "crypto/des.c" |
17 | * Copyright (c) 1992 Dana L. How. | 18 | * Copyright (c) 1992 Dana L. How. |
@@ -30,6 +31,7 @@ | |||
30 | #include <linux/module.h> | 31 | #include <linux/module.h> |
31 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
32 | #include <linux/crypto.h> | 33 | #include <linux/crypto.h> |
34 | #include "crypto_des.h" | ||
33 | 35 | ||
34 | #define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) | 36 | #define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) |
35 | 37 | ||
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 2aba04852fe3..ea22707f435f 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -3,9 +3,9 @@ | |||
3 | * | 3 | * |
4 | * s390 implementation of the DES Cipher Algorithm. | 4 | * s390 implementation of the DES Cipher Algorithm. |
5 | * | 5 | * |
6 | * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2003,2007 |
7 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 7 | * Author(s): Thomas Spatzier |
8 | * | 8 | * Jan Glauber (jan.glauber@de.ibm.com) |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
@@ -557,7 +557,7 @@ static int init(void) | |||
557 | if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || | 557 | if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || |
558 | !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || | 558 | !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || |
559 | !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) | 559 | !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) |
560 | return -ENOSYS; | 560 | return -EOPNOTSUPP; |
561 | 561 | ||
562 | ret = crypto_register_alg(&des_alg); | 562 | ret = crypto_register_alg(&des_alg); |
563 | if (ret) | 563 | if (ret) |
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c new file mode 100644 index 000000000000..8eb3a1aedc22 --- /dev/null +++ b/arch/s390/crypto/prng.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2006,2007 | ||
3 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> | ||
4 | * Driver for the s390 pseudo random number generator | ||
5 | */ | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/miscdevice.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/moduleparam.h> | ||
12 | #include <linux/random.h> | ||
13 | #include <asm/debug.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | |||
16 | #include "crypt_s390.h" | ||
17 | |||
18 | MODULE_LICENSE("GPL"); | ||
19 | MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>"); | ||
20 | MODULE_DESCRIPTION("s390 PRNG interface"); | ||
21 | |||
22 | static int prng_chunk_size = 256; | ||
23 | module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH); | ||
24 | MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes"); | ||
25 | |||
26 | static int prng_entropy_limit = 4096; | ||
27 | module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR); | ||
28 | MODULE_PARM_DESC(prng_entropy_limit, | ||
29 | "PRNG add entropy after that much bytes were produced"); | ||
30 | |||
31 | /* | ||
32 | * Any one who considers arithmetical methods of producing random digits is, | ||
33 | * of course, in a state of sin. -- John von Neumann | ||
34 | */ | ||
35 | |||
36 | struct s390_prng_data { | ||
37 | unsigned long count; /* how many bytes were produced */ | ||
38 | char *buf; | ||
39 | }; | ||
40 | |||
41 | static struct s390_prng_data *p; | ||
42 | |||
43 | /* copied from libica, use a non-zero initial parameter block */ | ||
44 | static unsigned char parm_block[32] = { | ||
45 | 0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4, | ||
46 | 0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0, | ||
47 | }; | ||
48 | |||
49 | static int prng_open(struct inode *inode, struct file *file) | ||
50 | { | ||
51 | return nonseekable_open(inode, file); | ||
52 | } | ||
53 | |||
54 | static void prng_add_entropy(void) | ||
55 | { | ||
56 | __u64 entropy[4]; | ||
57 | unsigned int i; | ||
58 | int ret; | ||
59 | |||
60 | for (i = 0; i < 16; i++) { | ||
61 | ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy, | ||
62 | (char *)entropy, sizeof(entropy)); | ||
63 | BUG_ON(ret < 0 || ret != sizeof(entropy)); | ||
64 | memcpy(parm_block, entropy, sizeof(entropy)); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static void prng_seed(int nbytes) | ||
69 | { | ||
70 | char buf[16]; | ||
71 | int i = 0; | ||
72 | |||
73 | BUG_ON(nbytes > 16); | ||
74 | get_random_bytes(buf, nbytes); | ||
75 | |||
76 | /* Add the entropy */ | ||
77 | while (nbytes >= 8) { | ||
78 | *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8); | ||
79 | prng_add_entropy(); | ||
80 | i += 8; | ||
81 | nbytes -= 8; | ||
82 | } | ||
83 | prng_add_entropy(); | ||
84 | } | ||
85 | |||
86 | static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, | ||
87 | loff_t *ppos) | ||
88 | { | ||
89 | int chunk, n; | ||
90 | int ret = 0; | ||
91 | int tmp; | ||
92 | |||
93 | /* nbytes can be arbitrary long, we spilt it into chunks */ | ||
94 | while (nbytes) { | ||
95 | /* same as in extract_entropy_user in random.c */ | ||
96 | if (need_resched()) { | ||
97 | if (signal_pending(current)) { | ||
98 | if (ret == 0) | ||
99 | ret = -ERESTARTSYS; | ||
100 | break; | ||
101 | } | ||
102 | schedule(); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * we lose some random bytes if an attacker issues | ||
107 | * reads < 8 bytes, but we don't care | ||
108 | */ | ||
109 | chunk = min_t(int, nbytes, prng_chunk_size); | ||
110 | |||
111 | /* PRNG only likes multiples of 8 bytes */ | ||
112 | n = (chunk + 7) & -8; | ||
113 | |||
114 | if (p->count > prng_entropy_limit) | ||
115 | prng_seed(8); | ||
116 | |||
117 | /* if the CPU supports PRNG stckf is present too */ | ||
118 | asm volatile(".insn s,0xb27c0000,%0" | ||
119 | : "=m" (*((unsigned long long *)p->buf)) : : "cc"); | ||
120 | |||
121 | /* | ||
122 | * Beside the STCKF the input for the TDES-EDE is the output | ||
123 | * of the last operation. We differ here from X9.17 since we | ||
124 | * only store one timestamp into the buffer. Padding the whole | ||
125 | * buffer with timestamps does not improve security, since | ||
126 | * successive stckf have nearly constant offsets. | ||
127 | * If an attacker knows the first timestamp it would be | ||
128 | * trivial to guess the additional values. One timestamp | ||
129 | * is therefore enough and still guarantees unique input values. | ||
130 | * | ||
131 | * Note: you can still get strict X9.17 conformity by setting | ||
132 | * prng_chunk_size to 8 bytes. | ||
133 | */ | ||
134 | tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n); | ||
135 | BUG_ON((tmp < 0) || (tmp != n)); | ||
136 | |||
137 | p->count += n; | ||
138 | |||
139 | if (copy_to_user(ubuf, p->buf, chunk)) | ||
140 | return -EFAULT; | ||
141 | |||
142 | nbytes -= chunk; | ||
143 | ret += chunk; | ||
144 | ubuf += chunk; | ||
145 | } | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | static struct file_operations prng_fops = { | ||
150 | .owner = THIS_MODULE, | ||
151 | .open = &prng_open, | ||
152 | .release = NULL, | ||
153 | .read = &prng_read, | ||
154 | }; | ||
155 | |||
156 | static struct miscdevice prng_dev = { | ||
157 | .name = "prandom", | ||
158 | .minor = MISC_DYNAMIC_MINOR, | ||
159 | .fops = &prng_fops, | ||
160 | }; | ||
161 | |||
162 | static int __init prng_init(void) | ||
163 | { | ||
164 | int ret; | ||
165 | |||
166 | /* check if the CPU has a PRNG */ | ||
167 | if (!crypt_s390_func_available(KMC_PRNG)) | ||
168 | return -EOPNOTSUPP; | ||
169 | |||
170 | if (prng_chunk_size < 8) | ||
171 | return -EINVAL; | ||
172 | |||
173 | p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL); | ||
174 | if (!p) | ||
175 | return -ENOMEM; | ||
176 | p->count = 0; | ||
177 | |||
178 | p->buf = kmalloc(prng_chunk_size, GFP_KERNEL); | ||
179 | if (!p->buf) { | ||
180 | ret = -ENOMEM; | ||
181 | goto out_free; | ||
182 | } | ||
183 | |||
184 | /* initialize the PRNG, add 128 bits of entropy */ | ||
185 | prng_seed(16); | ||
186 | |||
187 | ret = misc_register(&prng_dev); | ||
188 | if (ret) { | ||
189 | printk(KERN_WARNING | ||
190 | "Could not register misc device for PRNG.\n"); | ||
191 | goto out_buf; | ||
192 | } | ||
193 | return 0; | ||
194 | |||
195 | out_buf: | ||
196 | kfree(p->buf); | ||
197 | out_free: | ||
198 | kfree(p); | ||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | static void __exit prng_exit(void) | ||
203 | { | ||
204 | /* wipe me */ | ||
205 | memset(p->buf, 0, prng_chunk_size); | ||
206 | kfree(p->buf); | ||
207 | kfree(p); | ||
208 | |||
209 | misc_deregister(&prng_dev); | ||
210 | } | ||
211 | |||
212 | module_init(prng_init); | ||
213 | module_exit(prng_exit); | ||
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 49ca8690ee39..969639f31977 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -8,8 +8,9 @@ | |||
8 | * implementation written by Steve Reid. | 8 | * implementation written by Steve Reid. |
9 | * | 9 | * |
10 | * s390 Version: | 10 | * s390 Version: |
11 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | 11 | * Copyright IBM Corp. 2003,2007 |
12 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 12 | * Author(s): Thomas Spatzier |
13 | * Jan Glauber (jan.glauber@de.ibm.com) | ||
13 | * | 14 | * |
14 | * Derived from "crypto/sha1.c" | 15 | * Derived from "crypto/sha1.c" |
15 | * Copyright (c) Alan Smithee. | 16 | * Copyright (c) Alan Smithee. |
@@ -43,16 +44,14 @@ struct crypt_s390_sha1_ctx { | |||
43 | static void sha1_init(struct crypto_tfm *tfm) | 44 | static void sha1_init(struct crypto_tfm *tfm) |
44 | { | 45 | { |
45 | struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); |
46 | static const u32 initstate[5] = { | 47 | |
47 | 0x67452301, | 48 | ctx->state[0] = 0x67452301; |
48 | 0xEFCDAB89, | 49 | ctx->state[1] = 0xEFCDAB89; |
49 | 0x98BADCFE, | 50 | ctx->state[2] = 0x98BADCFE; |
50 | 0x10325476, | 51 | ctx->state[3] = 0x10325476; |
51 | 0xC3D2E1F0 | 52 | ctx->state[4] = 0xC3D2E1F0; |
52 | }; | ||
53 | 53 | ||
54 | ctx->count = 0; | 54 | ctx->count = 0; |
55 | memcpy(ctx->state, &initstate, sizeof(initstate)); | ||
56 | ctx->buf_len = 0; | 55 | ctx->buf_len = 0; |
57 | } | 56 | } |
58 | 57 | ||
@@ -63,13 +62,13 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data, | |||
63 | long imd_len; | 62 | long imd_len; |
64 | 63 | ||
65 | sctx = crypto_tfm_ctx(tfm); | 64 | sctx = crypto_tfm_ctx(tfm); |
66 | sctx->count += len * 8; //message bit length | 65 | sctx->count += len * 8; /* message bit length */ |
67 | 66 | ||
68 | //anything in buffer yet? -> must be completed | 67 | /* anything in buffer yet? -> must be completed */ |
69 | if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { | 68 | if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { |
70 | //complete full block and hash | 69 | /* complete full block and hash */ |
71 | memcpy(sctx->buffer + sctx->buf_len, data, | 70 | memcpy(sctx->buffer + sctx->buf_len, data, |
72 | SHA1_BLOCK_SIZE - sctx->buf_len); | 71 | SHA1_BLOCK_SIZE - sctx->buf_len); |
73 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, | 72 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, |
74 | SHA1_BLOCK_SIZE); | 73 | SHA1_BLOCK_SIZE); |
75 | data += SHA1_BLOCK_SIZE - sctx->buf_len; | 74 | data += SHA1_BLOCK_SIZE - sctx->buf_len; |
@@ -77,37 +76,36 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data, | |||
77 | sctx->buf_len = 0; | 76 | sctx->buf_len = 0; |
78 | } | 77 | } |
79 | 78 | ||
80 | //rest of data contains full blocks? | 79 | /* rest of data contains full blocks? */ |
81 | imd_len = len & ~0x3ful; | 80 | imd_len = len & ~0x3ful; |
82 | if (imd_len){ | 81 | if (imd_len) { |
83 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); | 82 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); |
84 | data += imd_len; | 83 | data += imd_len; |
85 | len -= imd_len; | 84 | len -= imd_len; |
86 | } | 85 | } |
87 | //anything left? store in buffer | 86 | /* anything left? store in buffer */ |
88 | if (len){ | 87 | if (len) { |
89 | memcpy(sctx->buffer + sctx->buf_len , data, len); | 88 | memcpy(sctx->buffer + sctx->buf_len , data, len); |
90 | sctx->buf_len += len; | 89 | sctx->buf_len += len; |
91 | } | 90 | } |
92 | } | 91 | } |
93 | 92 | ||
94 | 93 | ||
95 | static void | 94 | static void pad_message(struct crypt_s390_sha1_ctx* sctx) |
96 | pad_message(struct crypt_s390_sha1_ctx* sctx) | ||
97 | { | 95 | { |
98 | int index; | 96 | int index; |
99 | 97 | ||
100 | index = sctx->buf_len; | 98 | index = sctx->buf_len; |
101 | sctx->buf_len = (sctx->buf_len < 56)? | 99 | sctx->buf_len = (sctx->buf_len < 56) ? |
102 | SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; | 100 | SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; |
103 | //start pad with 1 | 101 | /* start pad with 1 */ |
104 | sctx->buffer[index] = 0x80; | 102 | sctx->buffer[index] = 0x80; |
105 | //pad with zeros | 103 | /* pad with zeros */ |
106 | index++; | 104 | index++; |
107 | memset(sctx->buffer + index, 0x00, sctx->buf_len - index); | 105 | memset(sctx->buffer + index, 0x00, sctx->buf_len - index); |
108 | //append length | 106 | /* append length */ |
109 | memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, | 107 | memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, |
110 | sizeof sctx->count); | 108 | sizeof sctx->count); |
111 | } | 109 | } |
112 | 110 | ||
113 | /* Add padding and return the message digest. */ | 111 | /* Add padding and return the message digest. */ |
@@ -115,47 +113,40 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out) | |||
115 | { | 113 | { |
116 | struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); | 114 | struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); |
117 | 115 | ||
118 | //must perform manual padding | 116 | /* must perform manual padding */ |
119 | pad_message(sctx); | 117 | pad_message(sctx); |
120 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); | 118 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); |
121 | //copy digest to out | 119 | /* copy digest to out */ |
122 | memcpy(out, sctx->state, SHA1_DIGEST_SIZE); | 120 | memcpy(out, sctx->state, SHA1_DIGEST_SIZE); |
123 | /* Wipe context */ | 121 | /* wipe context */ |
124 | memset(sctx, 0, sizeof *sctx); | 122 | memset(sctx, 0, sizeof *sctx); |
125 | } | 123 | } |
126 | 124 | ||
127 | static struct crypto_alg alg = { | 125 | static struct crypto_alg alg = { |
128 | .cra_name = "sha1", | 126 | .cra_name = "sha1", |
129 | .cra_driver_name = "sha1-s390", | 127 | .cra_driver_name= "sha1-s390", |
130 | .cra_priority = CRYPT_S390_PRIORITY, | 128 | .cra_priority = CRYPT_S390_PRIORITY, |
131 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST, | 129 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST, |
132 | .cra_blocksize = SHA1_BLOCK_SIZE, | 130 | .cra_blocksize = SHA1_BLOCK_SIZE, |
133 | .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), | 131 | .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), |
134 | .cra_module = THIS_MODULE, | 132 | .cra_module = THIS_MODULE, |
135 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 133 | .cra_list = LIST_HEAD_INIT(alg.cra_list), |
136 | .cra_u = { .digest = { | 134 | .cra_u = { .digest = { |
137 | .dia_digestsize = SHA1_DIGEST_SIZE, | 135 | .dia_digestsize = SHA1_DIGEST_SIZE, |
138 | .dia_init = sha1_init, | 136 | .dia_init = sha1_init, |
139 | .dia_update = sha1_update, | 137 | .dia_update = sha1_update, |
140 | .dia_final = sha1_final } } | 138 | .dia_final = sha1_final } } |
141 | }; | 139 | }; |
142 | 140 | ||
143 | static int | 141 | static int __init init(void) |
144 | init(void) | ||
145 | { | 142 | { |
146 | int ret = -ENOSYS; | 143 | if (!crypt_s390_func_available(KIMD_SHA_1)) |
144 | return -EOPNOTSUPP; | ||
147 | 145 | ||
148 | if (crypt_s390_func_available(KIMD_SHA_1)){ | 146 | return crypto_register_alg(&alg); |
149 | ret = crypto_register_alg(&alg); | ||
150 | if (ret == 0){ | ||
151 | printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n"); | ||
152 | } | ||
153 | } | ||
154 | return ret; | ||
155 | } | 147 | } |
156 | 148 | ||
157 | static void __exit | 149 | static void __exit fini(void) |
158 | fini(void) | ||
159 | { | 150 | { |
160 | crypto_unregister_alg(&alg); | 151 | crypto_unregister_alg(&alg); |
161 | } | 152 | } |
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 8e4e67503fe7..78436c696d37 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * s390 implementation of the SHA256 Secure Hash Algorithm. | 4 | * s390 implementation of the SHA256 Secure Hash Algorithm. |
5 | * | 5 | * |
6 | * s390 Version: | 6 | * s390 Version: |
7 | * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2005,2007 |
8 | * Author(s): Jan Glauber (jang@de.ibm.com) | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
9 | * | 9 | * |
10 | * Derived from "crypto/sha256.c" | 10 | * Derived from "crypto/sha256.c" |
@@ -143,15 +143,10 @@ static struct crypto_alg alg = { | |||
143 | 143 | ||
144 | static int init(void) | 144 | static int init(void) |
145 | { | 145 | { |
146 | int ret; | ||
147 | |||
148 | if (!crypt_s390_func_available(KIMD_SHA_256)) | 146 | if (!crypt_s390_func_available(KIMD_SHA_256)) |
149 | return -ENOSYS; | 147 | return -EOPNOTSUPP; |
150 | 148 | ||
151 | ret = crypto_register_alg(&alg); | 149 | return crypto_register_alg(&alg); |
152 | if (ret != 0) | ||
153 | printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded."); | ||
154 | return ret; | ||
155 | } | 150 | } |
156 | 151 | ||
157 | static void __exit fini(void) | 152 | static void __exit fini(void) |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 5368cf4a350e..7c621b8ef683 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000 | |||
108 | CONFIG_COMPAT=y | 108 | CONFIG_COMPAT=y |
109 | CONFIG_SYSVIPC_COMPAT=y | 109 | CONFIG_SYSVIPC_COMPAT=y |
110 | CONFIG_AUDIT_ARCH=y | 110 | CONFIG_AUDIT_ARCH=y |
111 | CONFIG_S390_SWITCH_AMODE=y | ||
112 | CONFIG_S390_EXEC_PROTECT=y | ||
111 | 113 | ||
112 | # | 114 | # |
113 | # Code generation options | 115 | # Code generation options |
@@ -431,7 +433,6 @@ CONFIG_TN3270_CONSOLE=y | |||
431 | CONFIG_TN3215=y | 433 | CONFIG_TN3215=y |
432 | CONFIG_TN3215_CONSOLE=y | 434 | CONFIG_TN3215_CONSOLE=y |
433 | CONFIG_CCW_CONSOLE=y | 435 | CONFIG_CCW_CONSOLE=y |
434 | CONFIG_SCLP=y | ||
435 | CONFIG_SCLP_TTY=y | 436 | CONFIG_SCLP_TTY=y |
436 | CONFIG_SCLP_CONSOLE=y | 437 | CONFIG_SCLP_CONSOLE=y |
437 | CONFIG_SCLP_VT220_TTY=y | 438 | CONFIG_SCLP_VT220_TTY=y |
@@ -724,9 +725,7 @@ CONFIG_CRYPTO_MANAGER=y | |||
724 | # CONFIG_CRYPTO_MD4 is not set | 725 | # CONFIG_CRYPTO_MD4 is not set |
725 | # CONFIG_CRYPTO_MD5 is not set | 726 | # CONFIG_CRYPTO_MD5 is not set |
726 | # CONFIG_CRYPTO_SHA1 is not set | 727 | # CONFIG_CRYPTO_SHA1 is not set |
727 | # CONFIG_CRYPTO_SHA1_S390 is not set | ||
728 | # CONFIG_CRYPTO_SHA256 is not set | 728 | # CONFIG_CRYPTO_SHA256 is not set |
729 | # CONFIG_CRYPTO_SHA256_S390 is not set | ||
730 | # CONFIG_CRYPTO_SHA512 is not set | 729 | # CONFIG_CRYPTO_SHA512 is not set |
731 | # CONFIG_CRYPTO_WP512 is not set | 730 | # CONFIG_CRYPTO_WP512 is not set |
732 | # CONFIG_CRYPTO_TGR192 is not set | 731 | # CONFIG_CRYPTO_TGR192 is not set |
@@ -735,12 +734,10 @@ CONFIG_CRYPTO_ECB=m | |||
735 | CONFIG_CRYPTO_CBC=y | 734 | CONFIG_CRYPTO_CBC=y |
736 | # CONFIG_CRYPTO_LRW is not set | 735 | # CONFIG_CRYPTO_LRW is not set |
737 | # CONFIG_CRYPTO_DES is not set | 736 | # CONFIG_CRYPTO_DES is not set |
738 | # CONFIG_CRYPTO_DES_S390 is not set | ||
739 | # CONFIG_CRYPTO_BLOWFISH is not set | 737 | # CONFIG_CRYPTO_BLOWFISH is not set |
740 | # CONFIG_CRYPTO_TWOFISH is not set | 738 | # CONFIG_CRYPTO_TWOFISH is not set |
741 | # CONFIG_CRYPTO_SERPENT is not set | 739 | # CONFIG_CRYPTO_SERPENT is not set |
742 | # CONFIG_CRYPTO_AES is not set | 740 | # CONFIG_CRYPTO_AES is not set |
743 | # CONFIG_CRYPTO_AES_S390 is not set | ||
744 | # CONFIG_CRYPTO_CAST5 is not set | 741 | # CONFIG_CRYPTO_CAST5 is not set |
745 | # CONFIG_CRYPTO_CAST6 is not set | 742 | # CONFIG_CRYPTO_CAST6 is not set |
746 | # CONFIG_CRYPTO_TEA is not set | 743 | # CONFIG_CRYPTO_TEA is not set |
@@ -755,6 +752,11 @@ CONFIG_CRYPTO_CBC=y | |||
755 | # | 752 | # |
756 | # Hardware crypto devices | 753 | # Hardware crypto devices |
757 | # | 754 | # |
755 | # CONFIG_CRYPTO_SHA1_S390 is not set | ||
756 | # CONFIG_CRYPTO_SHA256_S390 is not set | ||
757 | # CONFIG_CRYPTO_DES_S390 is not set | ||
758 | # CONFIG_CRYPTO_AES_S390 is not set | ||
759 | CONFIG_S390_PRNG=m | ||
758 | 760 | ||
759 | # | 761 | # |
760 | # Library routines | 762 | # Library routines |
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile index f4b00cd81f7c..b08d2abf6178 100644 --- a/arch/s390/hypfs/Makefile +++ b/arch/s390/hypfs/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o | 5 | obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o |
6 | 6 | ||
7 | s390_hypfs-objs := inode.o hypfs_diag.o | 7 | s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o |
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index f3dbd91965c6..aea572009d60 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h | |||
@@ -27,4 +27,13 @@ extern struct dentry *hypfs_create_str(struct super_block *sb, | |||
27 | struct dentry *dir, const char *name, | 27 | struct dentry *dir, const char *name, |
28 | char *string); | 28 | char *string); |
29 | 29 | ||
30 | /* LPAR Hypervisor */ | ||
31 | extern int hypfs_diag_init(void); | ||
32 | extern void hypfs_diag_exit(void); | ||
33 | extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root); | ||
34 | |||
35 | /* VM Hypervisor */ | ||
36 | extern int hypfs_vm_init(void); | ||
37 | extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); | ||
38 | |||
30 | #endif /* _HYPFS_H_ */ | 39 | #endif /* _HYPFS_H_ */ |
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h deleted file mode 100644 index 256b384aebe1..000000000000 --- a/arch/s390/hypfs/hypfs_diag.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | /* | ||
2 | * arch/s390/hypfs_diag.h | ||
3 | * Hypervisor filesystem for Linux on s390. | ||
4 | * | ||
5 | * Copyright (C) IBM Corp. 2006 | ||
6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _HYPFS_DIAG_H_ | ||
10 | #define _HYPFS_DIAG_H_ | ||
11 | |||
12 | extern int hypfs_diag_init(void); | ||
13 | extern void hypfs_diag_exit(void); | ||
14 | extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root); | ||
15 | |||
16 | #endif /* _HYPFS_DIAG_H_ */ | ||
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c new file mode 100644 index 000000000000..d01fc8f799f0 --- /dev/null +++ b/arch/s390/hypfs/hypfs_vm.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Hypervisor filesystem for Linux on s390. z/VM implementation. | ||
3 | * | ||
4 | * Copyright (C) IBM Corp. 2006 | ||
5 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/vmalloc.h> | ||
12 | #include <asm/ebcdic.h> | ||
13 | #include "hypfs.h" | ||
14 | |||
15 | #define NAME_LEN 8 | ||
16 | |||
17 | static char local_guest[] = " "; | ||
18 | static char all_guests[] = "* "; | ||
19 | static char *guest_query; | ||
20 | |||
21 | struct diag2fc_data { | ||
22 | __u32 version; | ||
23 | __u32 flags; | ||
24 | __u64 used_cpu; | ||
25 | __u64 el_time; | ||
26 | __u64 mem_min_kb; | ||
27 | __u64 mem_max_kb; | ||
28 | __u64 mem_share_kb; | ||
29 | __u64 mem_used_kb; | ||
30 | __u32 pcpus; | ||
31 | __u32 lcpus; | ||
32 | __u32 vcpus; | ||
33 | __u32 cpu_min; | ||
34 | __u32 cpu_max; | ||
35 | __u32 cpu_shares; | ||
36 | __u32 cpu_use_samp; | ||
37 | __u32 cpu_delay_samp; | ||
38 | __u32 page_wait_samp; | ||
39 | __u32 idle_samp; | ||
40 | __u32 other_samp; | ||
41 | __u32 total_samp; | ||
42 | char guest_name[NAME_LEN]; | ||
43 | }; | ||
44 | |||
45 | struct diag2fc_parm_list { | ||
46 | char userid[NAME_LEN]; | ||
47 | char aci_grp[NAME_LEN]; | ||
48 | __u64 addr; | ||
49 | __u32 size; | ||
50 | __u32 fmt; | ||
51 | }; | ||
52 | |||
53 | static int diag2fc(int size, char* query, void *addr) | ||
54 | { | ||
55 | unsigned long residual_cnt; | ||
56 | unsigned long rc; | ||
57 | struct diag2fc_parm_list parm_list; | ||
58 | |||
59 | memcpy(parm_list.userid, query, NAME_LEN); | ||
60 | ASCEBC(parm_list.userid, NAME_LEN); | ||
61 | parm_list.addr = (unsigned long) addr ; | ||
62 | parm_list.size = size; | ||
63 | parm_list.fmt = 0x02; | ||
64 | memset(parm_list.aci_grp, 0x40, NAME_LEN); | ||
65 | rc = -1; | ||
66 | |||
67 | asm volatile( | ||
68 | " diag %0,%1,0x2fc\n" | ||
69 | "0:\n" | ||
70 | EX_TABLE(0b,0b) | ||
71 | : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory"); | ||
72 | |||
73 | if ((rc != 0 ) && (rc != -2)) | ||
74 | return rc; | ||
75 | else | ||
76 | return -residual_cnt; | ||
77 | } | ||
78 | |||
79 | static struct diag2fc_data *diag2fc_store(char *query, int *count) | ||
80 | { | ||
81 | int size; | ||
82 | struct diag2fc_data *data; | ||
83 | |||
84 | do { | ||
85 | size = diag2fc(0, query, NULL); | ||
86 | if (size < 0) | ||
87 | return ERR_PTR(-EACCES); | ||
88 | data = vmalloc(size); | ||
89 | if (!data) | ||
90 | return ERR_PTR(-ENOMEM); | ||
91 | if (diag2fc(size, query, data) == 0) | ||
92 | break; | ||
93 | vfree(data); | ||
94 | } while (1); | ||
95 | *count = (size / sizeof(*data)); | ||
96 | |||
97 | return data; | ||
98 | } | ||
99 | |||
100 | static void diag2fc_free(void *data) | ||
101 | { | ||
102 | vfree(data); | ||
103 | } | ||
104 | |||
105 | #define ATTRIBUTE(sb, dir, name, member) \ | ||
106 | do { \ | ||
107 | void *rc; \ | ||
108 | rc = hypfs_create_u64(sb, dir, name, member); \ | ||
109 | if (IS_ERR(rc)) \ | ||
110 | return PTR_ERR(rc); \ | ||
111 | } while(0) | ||
112 | |||
113 | static int hpyfs_vm_create_guest(struct super_block *sb, | ||
114 | struct dentry *systems_dir, | ||
115 | struct diag2fc_data *data) | ||
116 | { | ||
117 | char guest_name[NAME_LEN + 1] = {}; | ||
118 | struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir; | ||
119 | int dedicated_flag, capped_value; | ||
120 | |||
121 | capped_value = (data->flags & 0x00000006) >> 1; | ||
122 | dedicated_flag = (data->flags & 0x00000008) >> 3; | ||
123 | |||
124 | /* guest dir */ | ||
125 | memcpy(guest_name, data->guest_name, NAME_LEN); | ||
126 | EBCASC(guest_name, NAME_LEN); | ||
127 | strstrip(guest_name); | ||
128 | guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); | ||
129 | if (IS_ERR(guest_dir)) | ||
130 | return PTR_ERR(guest_dir); | ||
131 | ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time); | ||
132 | |||
133 | /* logical cpu information */ | ||
134 | cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus"); | ||
135 | if (IS_ERR(cpus_dir)) | ||
136 | return PTR_ERR(cpus_dir); | ||
137 | ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu); | ||
138 | ATTRIBUTE(sb, cpus_dir, "capped", capped_value); | ||
139 | ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag); | ||
140 | ATTRIBUTE(sb, cpus_dir, "count", data->vcpus); | ||
141 | ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min); | ||
142 | ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max); | ||
143 | ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares); | ||
144 | |||
145 | /* memory information */ | ||
146 | mem_dir = hypfs_mkdir(sb, guest_dir, "mem"); | ||
147 | if (IS_ERR(mem_dir)) | ||
148 | return PTR_ERR(mem_dir); | ||
149 | ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb); | ||
150 | ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb); | ||
151 | ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb); | ||
152 | ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb); | ||
153 | |||
154 | /* samples */ | ||
155 | samples_dir = hypfs_mkdir(sb, guest_dir, "samples"); | ||
156 | if (IS_ERR(samples_dir)) | ||
157 | return PTR_ERR(samples_dir); | ||
158 | ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp); | ||
159 | ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp); | ||
160 | ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp); | ||
161 | ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp); | ||
162 | ATTRIBUTE(sb, samples_dir, "other", data->other_samp); | ||
163 | ATTRIBUTE(sb, samples_dir, "total", data->total_samp); | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | int hypfs_vm_create_files(struct super_block *sb, struct dentry *root) | ||
168 | { | ||
169 | struct dentry *dir, *file; | ||
170 | struct diag2fc_data *data; | ||
171 | int rc, i, count = 0; | ||
172 | |||
173 | data = diag2fc_store(guest_query, &count); | ||
174 | if (IS_ERR(data)) | ||
175 | return PTR_ERR(data); | ||
176 | |||
177 | /* Hpervisor Info */ | ||
178 | dir = hypfs_mkdir(sb, root, "hyp"); | ||
179 | if (IS_ERR(dir)) { | ||
180 | rc = PTR_ERR(dir); | ||
181 | goto failed; | ||
182 | } | ||
183 | file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor"); | ||
184 | if (IS_ERR(file)) { | ||
185 | rc = PTR_ERR(file); | ||
186 | goto failed; | ||
187 | } | ||
188 | |||
189 | /* physical cpus */ | ||
190 | dir = hypfs_mkdir(sb, root, "cpus"); | ||
191 | if (IS_ERR(dir)) { | ||
192 | rc = PTR_ERR(dir); | ||
193 | goto failed; | ||
194 | } | ||
195 | file = hypfs_create_u64(sb, dir, "count", data->lcpus); | ||
196 | if (IS_ERR(file)) { | ||
197 | rc = PTR_ERR(file); | ||
198 | goto failed; | ||
199 | } | ||
200 | |||
201 | /* guests */ | ||
202 | dir = hypfs_mkdir(sb, root, "systems"); | ||
203 | if (IS_ERR(dir)) { | ||
204 | rc = PTR_ERR(dir); | ||
205 | goto failed; | ||
206 | } | ||
207 | |||
208 | for (i = 0; i < count; i++) { | ||
209 | rc = hpyfs_vm_create_guest(sb, dir, &(data[i])); | ||
210 | if (rc) | ||
211 | goto failed; | ||
212 | } | ||
213 | diag2fc_free(data); | ||
214 | return 0; | ||
215 | |||
216 | failed: | ||
217 | diag2fc_free(data); | ||
218 | return rc; | ||
219 | } | ||
220 | |||
221 | int hypfs_vm_init(void) | ||
222 | { | ||
223 | if (diag2fc(0, all_guests, NULL) > 0) | ||
224 | guest_query = all_guests; | ||
225 | else if (diag2fc(0, local_guest, NULL) > 0) | ||
226 | guest_query = local_guest; | ||
227 | else | ||
228 | return -EACCES; | ||
229 | |||
230 | return 0; | ||
231 | } | ||
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index b6716c4b9934..a4fda7b53640 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <asm/ebcdic.h> | 20 | #include <asm/ebcdic.h> |
21 | #include "hypfs.h" | 21 | #include "hypfs.h" |
22 | #include "hypfs_diag.h" | ||
23 | 22 | ||
24 | #define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ | 23 | #define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ |
25 | #define TMP_SIZE 64 /* size of temporary buffers */ | 24 | #define TMP_SIZE 64 /* size of temporary buffers */ |
@@ -192,7 +191,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
192 | goto out; | 191 | goto out; |
193 | } | 192 | } |
194 | hypfs_delete_tree(sb->s_root); | 193 | hypfs_delete_tree(sb->s_root); |
195 | rc = hypfs_diag_create_files(sb, sb->s_root); | 194 | if (MACHINE_IS_VM) |
195 | rc = hypfs_vm_create_files(sb, sb->s_root); | ||
196 | else | ||
197 | rc = hypfs_diag_create_files(sb, sb->s_root); | ||
196 | if (rc) { | 198 | if (rc) { |
197 | printk(KERN_ERR "hypfs: Update failed\n"); | 199 | printk(KERN_ERR "hypfs: Update failed\n"); |
198 | hypfs_delete_tree(sb->s_root); | 200 | hypfs_delete_tree(sb->s_root); |
@@ -289,7 +291,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) | |||
289 | rc = -ENOMEM; | 291 | rc = -ENOMEM; |
290 | goto err_alloc; | 292 | goto err_alloc; |
291 | } | 293 | } |
292 | rc = hypfs_diag_create_files(sb, root_dentry); | 294 | if (MACHINE_IS_VM) |
295 | rc = hypfs_vm_create_files(sb, root_dentry); | ||
296 | else | ||
297 | rc = hypfs_diag_create_files(sb, root_dentry); | ||
293 | if (rc) | 298 | if (rc) |
294 | goto err_tree; | 299 | goto err_tree; |
295 | sbi->update_file = hypfs_create_update_file(sb, root_dentry); | 300 | sbi->update_file = hypfs_create_update_file(sb, root_dentry); |
@@ -462,11 +467,15 @@ static int __init hypfs_init(void) | |||
462 | { | 467 | { |
463 | int rc; | 468 | int rc; |
464 | 469 | ||
465 | if (MACHINE_IS_VM) | 470 | if (MACHINE_IS_VM) { |
466 | return -ENODATA; | 471 | if (hypfs_vm_init()) |
467 | if (hypfs_diag_init()) { | 472 | /* no diag 2fc, just exit */ |
468 | rc = -ENODATA; | 473 | return -ENODATA; |
469 | goto fail_diag; | 474 | } else { |
475 | if (hypfs_diag_init()) { | ||
476 | rc = -ENODATA; | ||
477 | goto fail_diag; | ||
478 | } | ||
470 | } | 479 | } |
471 | kset_set_kset_s(&s390_subsys, hypervisor_subsys); | 480 | kset_set_kset_s(&s390_subsys, hypervisor_subsys); |
472 | rc = subsystem_register(&s390_subsys); | 481 | rc = subsystem_register(&s390_subsys); |
@@ -480,7 +489,8 @@ static int __init hypfs_init(void) | |||
480 | fail_filesystem: | 489 | fail_filesystem: |
481 | subsystem_unregister(&s390_subsys); | 490 | subsystem_unregister(&s390_subsys); |
482 | fail_sysfs: | 491 | fail_sysfs: |
483 | hypfs_diag_exit(); | 492 | if (!MACHINE_IS_VM) |
493 | hypfs_diag_exit(); | ||
484 | fail_diag: | 494 | fail_diag: |
485 | printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); | 495 | printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); |
486 | return rc; | 496 | return rc; |
@@ -488,7 +498,8 @@ fail_diag: | |||
488 | 498 | ||
489 | static void __exit hypfs_exit(void) | 499 | static void __exit hypfs_exit(void) |
490 | { | 500 | { |
491 | hypfs_diag_exit(); | 501 | if (!MACHINE_IS_VM) |
502 | hypfs_diag_exit(); | ||
492 | unregister_filesystem(&hypfs_type); | 503 | unregister_filesystem(&hypfs_type); |
493 | subsystem_unregister(&s390_subsys); | 504 | subsystem_unregister(&s390_subsys); |
494 | } | 505 | } |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index a81881c9b297..5492d25d7d69 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -4,9 +4,9 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | obj-y := bitmap.o traps.o time.o process.o reset.o \ | 7 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ |
8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
9 | semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o | 9 | semaphore.o s390_ext.o debug.o irq.o ipl.o |
10 | 10 | ||
11 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 11 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
12 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 12 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S new file mode 100644 index 000000000000..dc7e5259770f --- /dev/null +++ b/arch/s390/kernel/base.S | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/base.S | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006,2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | * Michael Holzheu <holzheu@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/lowcore.h> | ||
11 | |||
12 | #ifdef CONFIG_64BIT | ||
13 | |||
14 | .globl s390_base_mcck_handler | ||
15 | s390_base_mcck_handler: | ||
16 | basr %r13,0 | ||
17 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
18 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
19 | larl %r1,s390_base_mcck_handler_fn | ||
20 | lg %r1,0(%r1) | ||
21 | ltgr %r1,%r1 | ||
22 | jz 1f | ||
23 | basr %r14,%r1 | ||
24 | 1: la %r1,4095 | ||
25 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | ||
26 | lpswe __LC_MCK_OLD_PSW | ||
27 | |||
28 | .section .bss | ||
29 | .globl s390_base_mcck_handler_fn | ||
30 | s390_base_mcck_handler_fn: | ||
31 | .quad 0 | ||
32 | .previous | ||
33 | |||
34 | .globl s390_base_ext_handler | ||
35 | s390_base_ext_handler: | ||
36 | stmg %r0,%r15,__LC_SAVE_AREA | ||
37 | basr %r13,0 | ||
38 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | ||
39 | larl %r1,s390_base_ext_handler_fn | ||
40 | lg %r1,0(%r1) | ||
41 | ltgr %r1,%r1 | ||
42 | jz 1f | ||
43 | basr %r14,%r1 | ||
44 | 1: lmg %r0,%r15,__LC_SAVE_AREA | ||
45 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | ||
46 | lpswe __LC_EXT_OLD_PSW | ||
47 | |||
48 | .section .bss | ||
49 | .globl s390_base_ext_handler_fn | ||
50 | s390_base_ext_handler_fn: | ||
51 | .quad 0 | ||
52 | .previous | ||
53 | |||
54 | .globl s390_base_pgm_handler | ||
55 | s390_base_pgm_handler: | ||
56 | stmg %r0,%r15,__LC_SAVE_AREA | ||
57 | basr %r13,0 | ||
58 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | ||
59 | larl %r1,s390_base_pgm_handler_fn | ||
60 | lg %r1,0(%r1) | ||
61 | ltgr %r1,%r1 | ||
62 | jz 1f | ||
63 | basr %r14,%r1 | ||
64 | lmg %r0,%r15,__LC_SAVE_AREA | ||
65 | lpswe __LC_PGM_OLD_PSW | ||
66 | 1: lpswe disabled_wait_psw-0b(%r13) | ||
67 | |||
68 | .align 8 | ||
69 | disabled_wait_psw: | ||
70 | .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler | ||
71 | |||
72 | .section .bss | ||
73 | .globl s390_base_pgm_handler_fn | ||
74 | s390_base_pgm_handler_fn: | ||
75 | .quad 0 | ||
76 | .previous | ||
77 | |||
78 | #else /* CONFIG_64BIT */ | ||
79 | |||
80 | .globl s390_base_mcck_handler | ||
81 | s390_base_mcck_handler: | ||
82 | basr %r13,0 | ||
83 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
84 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
85 | l %r1,2f-0b(%r13) | ||
86 | l %r1,0(%r1) | ||
87 | ltr %r1,%r1 | ||
88 | jz 1f | ||
89 | basr %r14,%r1 | ||
90 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
91 | lpsw __LC_MCK_OLD_PSW | ||
92 | |||
93 | 2: .long s390_base_mcck_handler_fn | ||
94 | |||
95 | .section .bss | ||
96 | .globl s390_base_mcck_handler_fn | ||
97 | s390_base_mcck_handler_fn: | ||
98 | .long 0 | ||
99 | .previous | ||
100 | |||
101 | .globl s390_base_ext_handler | ||
102 | s390_base_ext_handler: | ||
103 | stm %r0,%r15,__LC_SAVE_AREA | ||
104 | basr %r13,0 | ||
105 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
106 | l %r1,2f-0b(%r13) | ||
107 | l %r1,0(%r1) | ||
108 | ltr %r1,%r1 | ||
109 | jz 1f | ||
110 | basr %r14,%r1 | ||
111 | 1: lm %r0,%r15,__LC_SAVE_AREA | ||
112 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | ||
113 | lpsw __LC_EXT_OLD_PSW | ||
114 | |||
115 | 2: .long s390_base_ext_handler_fn | ||
116 | |||
117 | .section .bss | ||
118 | .globl s390_base_ext_handler_fn | ||
119 | s390_base_ext_handler_fn: | ||
120 | .long 0 | ||
121 | .previous | ||
122 | |||
123 | .globl s390_base_pgm_handler | ||
124 | s390_base_pgm_handler: | ||
125 | stm %r0,%r15,__LC_SAVE_AREA | ||
126 | basr %r13,0 | ||
127 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
128 | l %r1,2f-0b(%r13) | ||
129 | l %r1,0(%r1) | ||
130 | ltr %r1,%r1 | ||
131 | jz 1f | ||
132 | basr %r14,%r1 | ||
133 | lm %r0,%r15,__LC_SAVE_AREA | ||
134 | lpsw __LC_PGM_OLD_PSW | ||
135 | |||
136 | 1: lpsw disabled_wait_psw-0b(%r13) | ||
137 | |||
138 | 2: .long s390_base_pgm_handler_fn | ||
139 | |||
140 | disabled_wait_psw: | ||
141 | .align 8 | ||
142 | .long 0x000a0000,0x00000000 + s390_base_pgm_handler | ||
143 | |||
144 | .section .bss | ||
145 | .globl s390_base_pgm_handler_fn | ||
146 | s390_base_pgm_handler_fn: | ||
147 | .long 0 | ||
148 | .previous | ||
149 | |||
150 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c index 5c46054195cb..f1e40ca00d8d 100644 --- a/arch/s390/kernel/binfmt_elf32.c +++ b/arch/s390/kernel/binfmt_elf32.c | |||
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>"); | |||
192 | 192 | ||
193 | #undef cputime_to_timeval | 193 | #undef cputime_to_timeval |
194 | #define cputime_to_timeval cputime_to_compat_timeval | 194 | #define cputime_to_timeval cputime_to_compat_timeval |
195 | static __inline__ void | 195 | static inline void |
196 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | 196 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) |
197 | { | 197 | { |
198 | value->tv_usec = cputime % 1000000; | 198 | value->tv_usec = cputime % 1000000; |
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c index 71d27c493568..914d49444f92 100644 --- a/arch/s390/kernel/compat_exec_domain.c +++ b/arch/s390/kernel/compat_exec_domain.c | |||
@@ -12,10 +12,9 @@ | |||
12 | #include <linux/personality.h> | 12 | #include <linux/personality.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | 14 | ||
15 | struct exec_domain s390_exec_domain; | 15 | static struct exec_domain s390_exec_domain; |
16 | 16 | ||
17 | static int __init | 17 | static int __init s390_init (void) |
18 | s390_init (void) | ||
19 | { | 18 | { |
20 | s390_exec_domain.name = "Linux/s390"; | 19 | s390_exec_domain.name = "Linux/s390"; |
21 | s390_exec_domain.handler = NULL; | 20 | s390_exec_domain.handler = NULL; |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 5b33f823863a..666bb6daa148 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -69,6 +69,12 @@ | |||
69 | 69 | ||
70 | #include "compat_linux.h" | 70 | #include "compat_linux.h" |
71 | 71 | ||
72 | long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
73 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
74 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | ||
75 | long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | | ||
76 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
77 | PSW32_MASK_PSTATE); | ||
72 | 78 | ||
73 | /* For this source file, we want overflow handling. */ | 79 | /* For this source file, we want overflow handling. */ |
74 | 80 | ||
@@ -416,7 +422,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info) | |||
416 | mm_segment_t old_fs = get_fs (); | 422 | mm_segment_t old_fs = get_fs (); |
417 | 423 | ||
418 | set_fs (KERNEL_DS); | 424 | set_fs (KERNEL_DS); |
419 | ret = sys_sysinfo((struct sysinfo __user *) &s); | 425 | ret = sys_sysinfo((struct sysinfo __force __user *) &s); |
420 | set_fs (old_fs); | 426 | set_fs (old_fs); |
421 | err = put_user (s.uptime, &info->uptime); | 427 | err = put_user (s.uptime, &info->uptime); |
422 | err |= __put_user (s.loads[0], &info->loads[0]); | 428 | err |= __put_user (s.loads[0], &info->loads[0]); |
@@ -445,7 +451,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, | |||
445 | mm_segment_t old_fs = get_fs (); | 451 | mm_segment_t old_fs = get_fs (); |
446 | 452 | ||
447 | set_fs (KERNEL_DS); | 453 | set_fs (KERNEL_DS); |
448 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); | 454 | ret = sys_sched_rr_get_interval(pid, |
455 | (struct timespec __force __user *) &t); | ||
449 | set_fs (old_fs); | 456 | set_fs (old_fs); |
450 | if (put_compat_timespec(&t, interval)) | 457 | if (put_compat_timespec(&t, interval)) |
451 | return -EFAULT; | 458 | return -EFAULT; |
@@ -472,8 +479,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
472 | } | 479 | } |
473 | set_fs (KERNEL_DS); | 480 | set_fs (KERNEL_DS); |
474 | ret = sys_rt_sigprocmask(how, | 481 | ret = sys_rt_sigprocmask(how, |
475 | set ? (sigset_t __user *) &s : NULL, | 482 | set ? (sigset_t __force __user *) &s : NULL, |
476 | oset ? (sigset_t __user *) &s : NULL, | 483 | oset ? (sigset_t __force __user *) &s : NULL, |
477 | sigsetsize); | 484 | sigsetsize); |
478 | set_fs (old_fs); | 485 | set_fs (old_fs); |
479 | if (ret) return ret; | 486 | if (ret) return ret; |
@@ -499,7 +506,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | |||
499 | mm_segment_t old_fs = get_fs(); | 506 | mm_segment_t old_fs = get_fs(); |
500 | 507 | ||
501 | set_fs (KERNEL_DS); | 508 | set_fs (KERNEL_DS); |
502 | ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); | 509 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); |
503 | set_fs (old_fs); | 510 | set_fs (old_fs); |
504 | if (!ret) { | 511 | if (!ret) { |
505 | switch (_NSIG_WORDS) { | 512 | switch (_NSIG_WORDS) { |
@@ -524,7 +531,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) | |||
524 | if (copy_siginfo_from_user32(&info, uinfo)) | 531 | if (copy_siginfo_from_user32(&info, uinfo)) |
525 | return -EFAULT; | 532 | return -EFAULT; |
526 | set_fs (KERNEL_DS); | 533 | set_fs (KERNEL_DS); |
527 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); | 534 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info); |
528 | set_fs (old_fs); | 535 | set_fs (old_fs); |
529 | return ret; | 536 | return ret; |
530 | } | 537 | } |
@@ -682,7 +689,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse | |||
682 | 689 | ||
683 | set_fs(KERNEL_DS); | 690 | set_fs(KERNEL_DS); |
684 | ret = sys_sendfile(out_fd, in_fd, | 691 | ret = sys_sendfile(out_fd, in_fd, |
685 | offset ? (off_t __user *) &of : NULL, count); | 692 | offset ? (off_t __force __user *) &of : NULL, count); |
686 | set_fs(old_fs); | 693 | set_fs(old_fs); |
687 | 694 | ||
688 | if (offset && put_user(of, offset)) | 695 | if (offset && put_user(of, offset)) |
@@ -703,7 +710,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd, | |||
703 | 710 | ||
704 | set_fs(KERNEL_DS); | 711 | set_fs(KERNEL_DS); |
705 | ret = sys_sendfile64(out_fd, in_fd, | 712 | ret = sys_sendfile64(out_fd, in_fd, |
706 | offset ? (loff_t __user *) &lof : NULL, count); | 713 | offset ? (loff_t __force __user *) &lof : NULL, |
714 | count); | ||
707 | set_fs(old_fs); | 715 | set_fs(old_fs); |
708 | 716 | ||
709 | if (offset && put_user(lof, offset)) | 717 | if (offset && put_user(lof, offset)) |
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index 1a18e29668ef..e89f8c0c42a0 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h | |||
@@ -115,37 +115,6 @@ typedef struct | |||
115 | __u32 addr; | 115 | __u32 addr; |
116 | } _psw_t32 __attribute__ ((aligned(8))); | 116 | } _psw_t32 __attribute__ ((aligned(8))); |
117 | 117 | ||
118 | #define PSW32_MASK_PER 0x40000000UL | ||
119 | #define PSW32_MASK_DAT 0x04000000UL | ||
120 | #define PSW32_MASK_IO 0x02000000UL | ||
121 | #define PSW32_MASK_EXT 0x01000000UL | ||
122 | #define PSW32_MASK_KEY 0x00F00000UL | ||
123 | #define PSW32_MASK_MCHECK 0x00040000UL | ||
124 | #define PSW32_MASK_WAIT 0x00020000UL | ||
125 | #define PSW32_MASK_PSTATE 0x00010000UL | ||
126 | #define PSW32_MASK_ASC 0x0000C000UL | ||
127 | #define PSW32_MASK_CC 0x00003000UL | ||
128 | #define PSW32_MASK_PM 0x00000f00UL | ||
129 | |||
130 | #define PSW32_ADDR_AMODE31 0x80000000UL | ||
131 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | ||
132 | |||
133 | #define PSW32_BASE_BITS 0x00080000UL | ||
134 | |||
135 | #define PSW32_ASC_PRIMARY 0x00000000UL | ||
136 | #define PSW32_ASC_ACCREG 0x00004000UL | ||
137 | #define PSW32_ASC_SECONDARY 0x00008000UL | ||
138 | #define PSW32_ASC_HOME 0x0000C000UL | ||
139 | |||
140 | #define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \ | ||
141 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \ | ||
142 | PSW32_MASK_PSTATE) | ||
143 | |||
144 | #define PSW32_MASK_MERGE(CURRENT,NEW) \ | ||
145 | (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \ | ||
146 | ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM))) | ||
147 | |||
148 | |||
149 | typedef struct | 118 | typedef struct |
150 | { | 119 | { |
151 | _psw_t32 psw; | 120 | _psw_t32 psw; |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 861888ab8c13..887a9881d0d0 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, | |||
275 | } | 275 | } |
276 | 276 | ||
277 | set_fs (KERNEL_DS); | 277 | set_fs (KERNEL_DS); |
278 | ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), | 278 | ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL), |
279 | (stack_t __user *) (uoss ? &koss : NULL), | 279 | (stack_t __force __user *) (uoss ? &koss : NULL), |
280 | regs->gprs[15]); | 280 | regs->gprs[15]); |
281 | set_fs (old_fs); | 281 | set_fs (old_fs); |
282 | 282 | ||
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) | |||
298 | _s390_regs_common32 regs32; | 298 | _s390_regs_common32 regs32; |
299 | int err, i; | 299 | int err, i; |
300 | 300 | ||
301 | regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, | 301 | regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits, |
302 | (__u32)(regs->psw.mask >> 32)); | 302 | (__u32)(regs->psw.mask >> 32)); |
303 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; | 303 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; |
304 | for (i = 0; i < NUM_GPRS; i++) | 304 | for (i = 0; i < NUM_GPRS; i++) |
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) | |||
401 | goto badframe; | 401 | goto badframe; |
402 | 402 | ||
403 | set_fs (KERNEL_DS); | 403 | set_fs (KERNEL_DS); |
404 | do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); | 404 | do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); |
405 | set_fs (old_fs); | 405 | set_fs (old_fs); |
406 | 406 | ||
407 | return regs->gprs[2]; | 407 | return regs->gprs[2]; |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index a5972f1541fe..6c89f30c8e31 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
17 | #include <asm/cpcmd.h> | 17 | #include <asm/cpcmd.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/io.h> | ||
19 | 20 | ||
20 | static DEFINE_SPINLOCK(cpcmd_lock); | 21 | static DEFINE_SPINLOCK(cpcmd_lock); |
21 | static char cpcmd_buf[241]; | 22 | static char cpcmd_buf[241]; |
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
88 | int len; | 89 | int len; |
89 | unsigned long flags; | 90 | unsigned long flags; |
90 | 91 | ||
91 | if ((rlen == 0) || (response == NULL) | 92 | if ((virt_to_phys(response) != (unsigned long) response) || |
92 | || !((unsigned long)response >> 31)) { | 93 | (((unsigned long)response + rlen) >> 31)) { |
93 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
94 | len = __cpcmd(cmd, response, rlen, response_code); | ||
95 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
96 | } | ||
97 | else { | ||
98 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); | 94 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); |
99 | if (!lowbuf) { | 95 | if (!lowbuf) { |
100 | printk(KERN_WARNING | 96 | printk(KERN_WARNING |
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
106 | spin_unlock_irqrestore(&cpcmd_lock, flags); | 102 | spin_unlock_irqrestore(&cpcmd_lock, flags); |
107 | memcpy(response, lowbuf, rlen); | 103 | memcpy(response, lowbuf, rlen); |
108 | kfree(lowbuf); | 104 | kfree(lowbuf); |
105 | } else { | ||
106 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
107 | len = __cpcmd(cmd, response, rlen, response_code); | ||
108 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
109 | } | 109 | } |
110 | return len; | 110 | return len; |
111 | } | 111 | } |
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c index 926cceeae0fa..8cc7c9fa64f5 100644 --- a/arch/s390/kernel/crash.c +++ b/arch/s390/kernel/crash.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/threads.h> | 10 | #include <linux/threads.h> |
11 | #include <linux/kexec.h> | 11 | #include <linux/kexec.h> |
12 | #include <linux/reboot.h> | ||
12 | 13 | ||
13 | void machine_crash_shutdown(struct pt_regs *regs) | 14 | void machine_crash_shutdown(struct pt_regs *regs) |
14 | { | 15 | { |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index bb57bc0e3fc8..f4b62df02aa2 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = { | |||
120 | NULL | 120 | NULL |
121 | }; | 121 | }; |
122 | 122 | ||
123 | struct debug_view debug_level_view = { | 123 | static struct debug_view debug_level_view = { |
124 | "level", | 124 | "level", |
125 | &debug_prolog_level_fn, | 125 | &debug_prolog_level_fn, |
126 | NULL, | 126 | NULL, |
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = { | |||
129 | NULL | 129 | NULL |
130 | }; | 130 | }; |
131 | 131 | ||
132 | struct debug_view debug_pages_view = { | 132 | static struct debug_view debug_pages_view = { |
133 | "pages", | 133 | "pages", |
134 | &debug_prolog_pages_fn, | 134 | &debug_prolog_pages_fn, |
135 | NULL, | 135 | NULL, |
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = { | |||
138 | NULL | 138 | NULL |
139 | }; | 139 | }; |
140 | 140 | ||
141 | struct debug_view debug_flush_view = { | 141 | static struct debug_view debug_flush_view = { |
142 | "flush", | 142 | "flush", |
143 | NULL, | 143 | NULL, |
144 | NULL, | 144 | NULL, |
@@ -156,14 +156,14 @@ struct debug_view debug_sprintf_view = { | |||
156 | NULL | 156 | NULL |
157 | }; | 157 | }; |
158 | 158 | ||
159 | 159 | /* used by dump analysis tools to determine version of debug feature */ | |
160 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; | 160 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; |
161 | 161 | ||
162 | /* static globals */ | 162 | /* static globals */ |
163 | 163 | ||
164 | static debug_info_t *debug_area_first = NULL; | 164 | static debug_info_t *debug_area_first = NULL; |
165 | static debug_info_t *debug_area_last = NULL; | 165 | static debug_info_t *debug_area_last = NULL; |
166 | DECLARE_MUTEX(debug_lock); | 166 | static DECLARE_MUTEX(debug_lock); |
167 | 167 | ||
168 | static int initialized; | 168 | static int initialized; |
169 | 169 | ||
@@ -905,7 +905,7 @@ static struct ctl_table s390dbf_dir_table[] = { | |||
905 | { .ctl_name = 0 } | 905 | { .ctl_name = 0 } |
906 | }; | 906 | }; |
907 | 907 | ||
908 | struct ctl_table_header *s390dbf_sysctl_header; | 908 | static struct ctl_table_header *s390dbf_sysctl_header; |
909 | 909 | ||
910 | void | 910 | void |
911 | debug_stop_all(void) | 911 | debug_stop_all(void) |
@@ -1300,8 +1300,7 @@ out: | |||
1300 | * flushes debug areas | 1300 | * flushes debug areas |
1301 | */ | 1301 | */ |
1302 | 1302 | ||
1303 | void | 1303 | static void debug_flush(debug_info_t* id, int area) |
1304 | debug_flush(debug_info_t* id, int area) | ||
1305 | { | 1304 | { |
1306 | unsigned long flags; | 1305 | unsigned long flags; |
1307 | int i,j; | 1306 | int i,j; |
@@ -1511,8 +1510,7 @@ out: | |||
1511 | /* | 1510 | /* |
1512 | * clean up module | 1511 | * clean up module |
1513 | */ | 1512 | */ |
1514 | void | 1513 | static void __exit debug_exit(void) |
1515 | __exit debug_exit(void) | ||
1516 | { | 1514 | { |
1517 | debugfs_remove(debug_debugfs_root_entry); | 1515 | debugfs_remove(debug_debugfs_root_entry); |
1518 | unregister_sysctl_table(s390dbf_sysctl_header); | 1516 | unregister_sysctl_table(s390dbf_sysctl_header); |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c new file mode 100644 index 000000000000..e518dd53eff5 --- /dev/null +++ b/arch/s390/kernel/early.c | |||
@@ -0,0 +1,306 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/early.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Hongjie Yang <hongjie@us.ibm.com>, | ||
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/lockdep.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/pfn.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <asm/lowcore.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/sections.h> | ||
20 | #include <asm/setup.h> | ||
21 | #include <asm/cpcmd.h> | ||
22 | #include <asm/sclp.h> | ||
23 | |||
24 | /* | ||
25 | * Create a Kernel NSS if the SAVESYS= parameter is defined | ||
26 | */ | ||
27 | #define DEFSYS_CMD_SIZE 96 | ||
28 | #define SAVESYS_CMD_SIZE 32 | ||
29 | |||
30 | char kernel_nss_name[NSS_NAME_SIZE + 1]; | ||
31 | |||
32 | #ifdef CONFIG_SHARED_KERNEL | ||
33 | static noinline __init void create_kernel_nss(void) | ||
34 | { | ||
35 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; | ||
36 | #ifdef CONFIG_BLK_DEV_INITRD | ||
37 | unsigned int sinitrd_pfn, einitrd_pfn; | ||
38 | #endif | ||
39 | int response; | ||
40 | char *savesys_ptr; | ||
41 | char upper_command_line[COMMAND_LINE_SIZE]; | ||
42 | char defsys_cmd[DEFSYS_CMD_SIZE]; | ||
43 | char savesys_cmd[SAVESYS_CMD_SIZE]; | ||
44 | |||
45 | /* Do nothing if we are not running under VM */ | ||
46 | if (!MACHINE_IS_VM) | ||
47 | return; | ||
48 | |||
49 | /* Convert COMMAND_LINE to upper case */ | ||
50 | for (i = 0; i < strlen(COMMAND_LINE); i++) | ||
51 | upper_command_line[i] = toupper(COMMAND_LINE[i]); | ||
52 | |||
53 | savesys_ptr = strstr(upper_command_line, "SAVESYS="); | ||
54 | |||
55 | if (!savesys_ptr) | ||
56 | return; | ||
57 | |||
58 | savesys_ptr += 8; /* Point to the beginning of the NSS name */ | ||
59 | for (i = 0; i < NSS_NAME_SIZE; i++) { | ||
60 | if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0') | ||
61 | break; | ||
62 | kernel_nss_name[i] = savesys_ptr[i]; | ||
63 | } | ||
64 | |||
65 | stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
66 | eshared_pfn = PFN_DOWN(__pa(&_eshared)); | ||
67 | end_pfn = PFN_UP(__pa(&_end)); | ||
68 | min_size = end_pfn << 2; | ||
69 | |||
70 | sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X", | ||
71 | kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1, | ||
72 | eshared_pfn, end_pfn); | ||
73 | |||
74 | #ifdef CONFIG_BLK_DEV_INITRD | ||
75 | if (INITRD_START && INITRD_SIZE) { | ||
76 | sinitrd_pfn = PFN_DOWN(__pa(INITRD_START)); | ||
77 | einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE)); | ||
78 | min_size = einitrd_pfn << 2; | ||
79 | sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd, | ||
80 | sinitrd_pfn, einitrd_pfn); | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); | ||
85 | sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", | ||
86 | kernel_nss_name, kernel_nss_name); | ||
87 | |||
88 | __cpcmd(defsys_cmd, NULL, 0, &response); | ||
89 | |||
90 | if (response != 0) | ||
91 | return; | ||
92 | |||
93 | __cpcmd(savesys_cmd, NULL, 0, &response); | ||
94 | |||
95 | if (response != strlen(savesys_cmd)) | ||
96 | return; | ||
97 | |||
98 | ipl_flags = IPL_NSS_VALID; | ||
99 | } | ||
100 | |||
101 | #else /* CONFIG_SHARED_KERNEL */ | ||
102 | |||
103 | static inline void create_kernel_nss(void) { } | ||
104 | |||
105 | #endif /* CONFIG_SHARED_KERNEL */ | ||
106 | |||
107 | /* | ||
108 | * Clear bss memory | ||
109 | */ | ||
110 | static noinline __init void clear_bss_section(void) | ||
111 | { | ||
112 | memset(__bss_start, 0, _end - __bss_start); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Initialize storage key for kernel pages | ||
117 | */ | ||
118 | static noinline __init void init_kernel_storage_key(void) | ||
119 | { | ||
120 | unsigned long end_pfn, init_pfn; | ||
121 | |||
122 | end_pfn = PFN_UP(__pa(&_end)); | ||
123 | |||
124 | for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) | ||
125 | page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); | ||
126 | } | ||
127 | |||
128 | static noinline __init void detect_machine_type(void) | ||
129 | { | ||
130 | struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; | ||
131 | |||
132 | asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id)); | ||
133 | |||
134 | /* Running under z/VM ? */ | ||
135 | if (cpuinfo->cpu_id.version == 0xff) | ||
136 | machine_flags |= 1; | ||
137 | |||
138 | /* Running on a P/390 ? */ | ||
139 | if (cpuinfo->cpu_id.machine == 0x7490) | ||
140 | machine_flags |= 4; | ||
141 | } | ||
142 | |||
143 | static noinline __init int memory_fast_detect(void) | ||
144 | { | ||
145 | |||
146 | unsigned long val0 = 0; | ||
147 | unsigned long val1 = 0xc; | ||
148 | int ret = -ENOSYS; | ||
149 | |||
150 | if (ipl_flags & IPL_NSS_VALID) | ||
151 | return -ENOSYS; | ||
152 | |||
153 | asm volatile( | ||
154 | " diag %1,%2,0x260\n" | ||
155 | "0: lhi %0,0\n" | ||
156 | "1:\n" | ||
157 | EX_TABLE(0b,1b) | ||
158 | : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc"); | ||
159 | |||
160 | if (ret || val0 != val1) | ||
161 | return -ENOSYS; | ||
162 | |||
163 | memory_chunk[0].size = val0; | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | #define ADDR2G (1UL << 31) | ||
168 | |||
169 | static noinline __init unsigned long sclp_memory_detect(void) | ||
170 | { | ||
171 | struct sclp_readinfo_sccb *sccb; | ||
172 | unsigned long long memsize; | ||
173 | |||
174 | sccb = &s390_readinfo_sccb; | ||
175 | |||
176 | if (sccb->header.response_code != 0x10) | ||
177 | return 0; | ||
178 | |||
179 | if (sccb->rnsize) | ||
180 | memsize = sccb->rnsize << 20; | ||
181 | else | ||
182 | memsize = sccb->rnsize2 << 20; | ||
183 | if (sccb->rnmax) | ||
184 | memsize *= sccb->rnmax; | ||
185 | else | ||
186 | memsize *= sccb->rnmax2; | ||
187 | #ifndef CONFIG_64BIT | ||
188 | /* | ||
189 | * Can't deal with more than 2G in 31 bit addressing mode, so | ||
190 | * limit the value in order to avoid strange side effects. | ||
191 | */ | ||
192 | if (memsize > ADDR2G) | ||
193 | memsize = ADDR2G; | ||
194 | #endif | ||
195 | return (unsigned long) memsize; | ||
196 | } | ||
197 | |||
198 | static inline __init unsigned long __tprot(unsigned long addr) | ||
199 | { | ||
200 | int cc = -1; | ||
201 | |||
202 | asm volatile( | ||
203 | " tprot 0(%1),0\n" | ||
204 | "0: ipm %0\n" | ||
205 | " srl %0,28\n" | ||
206 | "1:\n" | ||
207 | EX_TABLE(0b,1b) | ||
208 | : "+d" (cc) : "a" (addr) : "cc"); | ||
209 | return (unsigned long)cc; | ||
210 | } | ||
211 | |||
212 | /* Checking memory in 128KB increments. */ | ||
213 | #define CHUNK_INCR (1UL << 17) | ||
214 | |||
215 | static noinline __init void find_memory_chunks(unsigned long memsize) | ||
216 | { | ||
217 | unsigned long addr = 0, old_addr = 0; | ||
218 | unsigned long old_cc = CHUNK_READ_WRITE; | ||
219 | unsigned long cc; | ||
220 | int chunk = 0; | ||
221 | |||
222 | while (chunk < MEMORY_CHUNKS) { | ||
223 | cc = __tprot(addr); | ||
224 | while (cc == old_cc) { | ||
225 | addr += CHUNK_INCR; | ||
226 | cc = __tprot(addr); | ||
227 | #ifndef CONFIG_64BIT | ||
228 | if (addr == ADDR2G) | ||
229 | break; | ||
230 | #endif | ||
231 | } | ||
232 | |||
233 | if (old_addr != addr && | ||
234 | (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) { | ||
235 | memory_chunk[chunk].addr = old_addr; | ||
236 | memory_chunk[chunk].size = addr - old_addr; | ||
237 | memory_chunk[chunk].type = old_cc; | ||
238 | chunk++; | ||
239 | } | ||
240 | |||
241 | old_addr = addr; | ||
242 | old_cc = cc; | ||
243 | |||
244 | #ifndef CONFIG_64BIT | ||
245 | if (addr == ADDR2G) | ||
246 | break; | ||
247 | #endif | ||
248 | /* | ||
249 | * Finish memory detection at the first hole, unless | ||
250 | * - we reached the hsa -> skip it. | ||
251 | * - we know there must be more. | ||
252 | */ | ||
253 | if (cc == -1UL && !memsize && old_addr != ADDR2G) | ||
254 | break; | ||
255 | if (memsize && addr >= memsize) | ||
256 | break; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | static __init void early_pgm_check_handler(void) | ||
261 | { | ||
262 | unsigned long addr; | ||
263 | const struct exception_table_entry *fixup; | ||
264 | |||
265 | addr = S390_lowcore.program_old_psw.addr; | ||
266 | fixup = search_exception_tables(addr & PSW_ADDR_INSN); | ||
267 | if (!fixup) | ||
268 | disabled_wait(0); | ||
269 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; | ||
270 | } | ||
271 | |||
272 | static noinline __init void setup_lowcore_early(void) | ||
273 | { | ||
274 | psw_t psw; | ||
275 | |||
276 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | ||
277 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; | ||
278 | S390_lowcore.external_new_psw = psw; | ||
279 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | ||
280 | S390_lowcore.program_new_psw = psw; | ||
281 | s390_base_pgm_handler_fn = early_pgm_check_handler; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * Save ipl parameters, clear bss memory, initialize storage keys | ||
286 | * and create a kernel NSS at startup if the SAVESYS= parm is defined | ||
287 | */ | ||
288 | void __init startup_init(void) | ||
289 | { | ||
290 | unsigned long memsize; | ||
291 | |||
292 | ipl_save_parameters(); | ||
293 | clear_bss_section(); | ||
294 | init_kernel_storage_key(); | ||
295 | lockdep_init(); | ||
296 | lockdep_off(); | ||
297 | detect_machine_type(); | ||
298 | create_kernel_nss(); | ||
299 | sort_main_extable(); | ||
300 | setup_lowcore_early(); | ||
301 | sclp_readinfo_early(); | ||
302 | memsize = sclp_memory_detect(); | ||
303 | if (memory_fast_detect() < 0) | ||
304 | find_memory_chunks(memsize); | ||
305 | lockdep_on(); | ||
306 | } | ||
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c index bb0f973137f0..cc0dc609d738 100644 --- a/arch/s390/kernel/ebcdic.c +++ b/arch/s390/kernel/ebcdic.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/types.h> | 13 | #include <asm/types.h> |
14 | #include <asm/ebcdic.h> | ||
14 | 15 | ||
15 | /* | 16 | /* |
16 | * ASCII (IBM PC 437) -> EBCDIC 037 | 17 | * ASCII (IBM PC 437) -> EBCDIC 037 |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index eca507050e47..453fd3b4edea 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -51,176 +51,15 @@ startup_continue: | |||
51 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 51 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
52 | ahi %r15,-96 | 52 | ahi %r15,-96 |
53 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | 53 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain |
54 | |||
55 | l %r14,.Lipl_save_parameters-.LPG1(%r13) | ||
56 | basr %r14,%r14 | ||
57 | # | 54 | # |
58 | # clear bss memory | 55 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
56 | # and create a kernel NSS if the SAVESYS= parm is defined | ||
59 | # | 57 | # |
60 | l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss | 58 | l %r14,.Lstartup_init-.LPG1(%r13) |
61 | l %r3,.Lbss_end-.LPG1(%r13) # end of bss | 59 | basr %r14,%r14 |
62 | sr %r3,%r2 # length of bss | ||
63 | sr %r4,%r4 | ||
64 | sr %r5,%r5 # set src,length and pad to zero | ||
65 | sr %r0,%r0 | ||
66 | mvcle %r2,%r4,0 # clear mem | ||
67 | jo .-4 # branch back, if not finish | ||
68 | |||
69 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | ||
70 | .Lservicecall: | ||
71 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | ||
72 | |||
73 | stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0 | ||
74 | la %r1,0x200 # set bit 22 | ||
75 | o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1 | ||
76 | st %r1,.Lcr-.LPG1(%r13) | ||
77 | lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0 | ||
78 | |||
79 | mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw | ||
80 | la %r1, .Lsclph-.LPG1(%r13) | ||
81 | a %r1,__LC_EXT_NEW_PSW+4 # set handler | ||
82 | st %r1,__LC_EXT_NEW_PSW+4 | ||
83 | |||
84 | l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff | ||
85 | lr %r1,%r4 # our sccb | ||
86 | .insn rre,0xb2200000,%r2,%r1 # service call | ||
87 | ipm %r1 | ||
88 | srl %r1,28 # get cc code | ||
89 | xr %r3, %r3 | ||
90 | chi %r1,3 | ||
91 | be .Lfchunk-.LPG1(%r13) # leave | ||
92 | chi %r1,2 | ||
93 | be .Lservicecall-.LPG1(%r13) | ||
94 | lpsw .Lwaitsclp-.LPG1(%r13) | ||
95 | .Lsclph: | ||
96 | lh %r1,.Lsccbr-.Lsccb(%r4) | ||
97 | chi %r1,0x10 # 0x0010 is the sucess code | ||
98 | je .Lprocsccb # let's process the sccb | ||
99 | chi %r1,0x1f0 | ||
100 | bne .Lfchunk-.LPG1(%r13) # unhandled error code | ||
101 | c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced | ||
102 | bne .Lfchunk-.LPG1(%r13) # if no, give up | ||
103 | l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP | ||
104 | b .Lservicecall-.LPG1(%r13) | ||
105 | .Lprocsccb: | ||
106 | lhi %r1,0 | ||
107 | icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0 | ||
108 | jnz .Lscnd | ||
109 | lhi %r1,0x800 # otherwise report 2GB | ||
110 | .Lscnd: | ||
111 | lhi %r3,0x800 # limit reported memory size to 2GB | ||
112 | cr %r1,%r3 | ||
113 | jl .Lno2gb | ||
114 | lr %r1,%r3 | ||
115 | .Lno2gb: | ||
116 | xr %r3,%r3 # same logic | ||
117 | ic %r3,.Lscpa1-.Lsccb(%r4) | ||
118 | chi %r3,0x00 | ||
119 | jne .Lcompmem | ||
120 | l %r3,.Lscpa2-.Lsccb(%r4) | ||
121 | .Lcompmem: | ||
122 | mr %r2,%r1 # mem in MB on 128-bit | ||
123 | l %r1,.Lonemb-.LPG1(%r13) | ||
124 | mr %r2,%r1 # mem size in bytes in %r3 | ||
125 | b .Lfchunk-.LPG1(%r13) | ||
126 | |||
127 | .align 4 | ||
128 | .Lipl_save_parameters: | ||
129 | .long ipl_save_parameters | ||
130 | .Linittu: | ||
131 | .long init_thread_union | ||
132 | .Lpmask: | ||
133 | .byte 0 | ||
134 | .align 8 | ||
135 | .Lpcext:.long 0x00080000,0x80000000 | ||
136 | .Lcr: | ||
137 | .long 0x00 # place holder for cr0 | ||
138 | .align 8 | ||
139 | .Lwaitsclp: | ||
140 | .long 0x010a0000,0x80000000 + .Lsclph | ||
141 | .Lrcp: | ||
142 | .int 0x00120001 # Read SCP forced code | ||
143 | .Lrcp2: | ||
144 | .int 0x00020001 # Read SCP code | ||
145 | .Lonemb: | ||
146 | .int 0x100000 | ||
147 | .Lfchunk: | ||
148 | 60 | ||
149 | # | ||
150 | # find memory chunks. | ||
151 | # | ||
152 | lr %r9,%r3 # end of mem | ||
153 | mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13) | ||
154 | la %r1,1 # test in increments of 128KB | ||
155 | sll %r1,17 | ||
156 | l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array | ||
157 | slr %r4,%r4 # set start of chunk to zero | ||
158 | slr %r5,%r5 # set end of chunk to zero | ||
159 | slr %r6,%r6 # set access code to zero | ||
160 | la %r10,MEMORY_CHUNKS # number of chunks | ||
161 | .Lloop: | ||
162 | tprot 0(%r5),0 # test protection of first byte | ||
163 | ipm %r7 | ||
164 | srl %r7,28 | ||
165 | clr %r6,%r7 # compare cc with last access code | ||
166 | be .Lsame-.LPG1(%r13) | ||
167 | lhi %r8,0 # no program checks | ||
168 | b .Lsavchk-.LPG1(%r13) | ||
169 | .Lsame: | ||
170 | ar %r5,%r1 # add 128KB to end of chunk | ||
171 | bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop | ||
172 | .Lchkmem: # > 2GB or tprot got a program check | ||
173 | lhi %r8,1 # set program check flag | ||
174 | .Lsavchk: | ||
175 | clr %r4,%r5 # chunk size > 0? | ||
176 | be .Lchkloop-.LPG1(%r13) | ||
177 | st %r4,0(%r3) # store start address of chunk | ||
178 | lr %r0,%r5 | ||
179 | slr %r0,%r4 | ||
180 | st %r0,4(%r3) # store size of chunk | ||
181 | st %r6,8(%r3) # store type of chunk | ||
182 | la %r3,12(%r3) | ||
183 | ahi %r10,-1 # update chunk number | ||
184 | .Lchkloop: | ||
185 | lr %r6,%r7 # set access code to last cc | ||
186 | # we got an exception or we're starting a new | ||
187 | # chunk , we must check if we should | ||
188 | # still try to find valid memory (if we detected | ||
189 | # the amount of available storage), and if we | ||
190 | # have chunks left | ||
191 | xr %r0,%r0 | ||
192 | clr %r0,%r9 # did we detect memory? | ||
193 | je .Ldonemem # if not, leave | ||
194 | chi %r10,0 # do we have chunks left? | ||
195 | je .Ldonemem | ||
196 | chi %r8,1 # program check ? | ||
197 | je .Lpgmchk | ||
198 | lr %r4,%r5 # potential new chunk | ||
199 | alr %r5,%r1 # add 128KB to end of chunk | ||
200 | j .Llpcnt | ||
201 | .Lpgmchk: | ||
202 | alr %r5,%r1 # add 128KB to end of chunk | ||
203 | lr %r4,%r5 # potential new chunk | ||
204 | .Llpcnt: | ||
205 | clr %r5,%r9 # should we go on? | ||
206 | jl .Lloop | ||
207 | .Ldonemem: | ||
208 | l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags | 61 | l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags |
209 | # | 62 | # |
210 | # find out if we are running under VM | ||
211 | # | ||
212 | stidp __LC_CPUID # store cpuid | ||
213 | tm __LC_CPUID,0xff # running under VM ? | ||
214 | bno .Lnovm-.LPG1(%r13) | ||
215 | oi 3(%r12),1 # set VM flag | ||
216 | .Lnovm: | ||
217 | lh %r0,__LC_CPUID+4 # get cpu version | ||
218 | chi %r0,0x7490 # running on a P/390 ? | ||
219 | bne .Lnop390-.LPG1(%r13) | ||
220 | oi 3(%r12),4 # set P/390 flag | ||
221 | .Lnop390: | ||
222 | |||
223 | # | ||
224 | # find out if we have an IEEE fpu | 63 | # find out if we have an IEEE fpu |
225 | # | 64 | # |
226 | mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) | 65 | mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) |
@@ -295,7 +134,6 @@ startup_continue: | |||
295 | .long 0 # cr15: linkage stack operations | 134 | .long 0 # cr15: linkage stack operations |
296 | .Lduct: .long 0,0,0,0,0,0,0,0 | 135 | .Lduct: .long 0,0,0,0,0,0,0,0 |
297 | .long 0,0,0,0,0,0,0,0 | 136 | .long 0,0,0,0,0,0,0,0 |
298 | .Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem | ||
299 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu | 137 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu |
300 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp | 138 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp |
301 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg | 139 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg |
@@ -306,7 +144,9 @@ startup_continue: | |||
306 | .Lbss_bgn: .long __bss_start | 144 | .Lbss_bgn: .long __bss_start |
307 | .Lbss_end: .long _end | 145 | .Lbss_end: .long _end |
308 | .Lparmaddr: .long PARMAREA | 146 | .Lparmaddr: .long PARMAREA |
309 | .Lsccbaddr: .long .Lsccb | 147 | .Linittu: .long init_thread_union |
148 | .Lstartup_init: | ||
149 | .long startup_init | ||
310 | 150 | ||
311 | .globl ipl_schib | 151 | .globl ipl_schib |
312 | ipl_schib: | 152 | ipl_schib: |
@@ -322,26 +162,6 @@ ipl_devno: | |||
322 | .word 0 | 162 | .word 0 |
323 | 163 | ||
324 | .org 0x12000 | 164 | .org 0x12000 |
325 | .globl s390_readinfo_sccb | ||
326 | s390_readinfo_sccb: | ||
327 | .Lsccb: | ||
328 | .hword 0x1000 # length, one page | ||
329 | .byte 0x00,0x00,0x00 | ||
330 | .byte 0x80 # variable response bit set | ||
331 | .Lsccbr: | ||
332 | .hword 0x00 # response code | ||
333 | .Lscpincr1: | ||
334 | .hword 0x00 | ||
335 | .Lscpa1: | ||
336 | .byte 0x00 | ||
337 | .fill 89,1,0 | ||
338 | .Lscpa2: | ||
339 | .int 0x00 | ||
340 | .Lscpincr2: | ||
341 | .quad 0x00 | ||
342 | .fill 3984,1,0 | ||
343 | .org 0x13000 | ||
344 | |||
345 | #ifdef CONFIG_SHARED_KERNEL | 165 | #ifdef CONFIG_SHARED_KERNEL |
346 | .org 0x100000 | 166 | .org 0x100000 |
347 | #endif | 167 | #endif |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 6ba3f4512dd1..b8fec4e5c5d4 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -58,183 +58,15 @@ startup_continue: | |||
58 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack | 58 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack |
59 | aghi %r15,-160 | 59 | aghi %r15,-160 |
60 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | 60 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain |
61 | |||
62 | brasl %r14,ipl_save_parameters | ||
63 | # | 61 | # |
64 | # clear bss memory | 62 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
63 | # and create a kernel NSS if the SAVESYS= parm is defined | ||
65 | # | 64 | # |
66 | larl %r2,__bss_start # start of bss segment | 65 | brasl %r14,startup_init |
67 | larl %r3,_end # end of bss segment | ||
68 | sgr %r3,%r2 # length of bss | ||
69 | sgr %r4,%r4 # | ||
70 | sgr %r5,%r5 # set src,length and pad to zero | ||
71 | mvcle %r2,%r4,0 # clear mem | ||
72 | jo .-4 # branch back, if not finish | ||
73 | # set program check new psw mask | 66 | # set program check new psw mask |
74 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) | 67 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) |
75 | larl %r1,.Lslowmemdetect # set program check address | ||
76 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
77 | lghi %r1,0xc | ||
78 | diag %r0,%r1,0x260 # get memory size of virtual machine | ||
79 | cgr %r0,%r1 # different? -> old detection routine | ||
80 | jne .Lslowmemdetect | ||
81 | aghi %r1,1 # size is one more than end | ||
82 | larl %r2,memory_chunk | ||
83 | stg %r1,8(%r2) # store size of chunk | ||
84 | j .Ldonemem | ||
85 | |||
86 | .Lslowmemdetect: | ||
87 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | ||
88 | .Lservicecall: | ||
89 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | ||
90 | |||
91 | stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0 | ||
92 | la %r1,0x200 # set bit 22 | ||
93 | og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1 | ||
94 | stg %r1,.Lcr-.LPG1(%r13) | ||
95 | lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0 | ||
96 | |||
97 | mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw | ||
98 | larl %r1,.Lsclph | ||
99 | stg %r1,__LC_EXT_NEW_PSW+8 # set handler | ||
100 | |||
101 | larl %r4,.Lsccb # %r4 is our index for sccb stuff | ||
102 | lgr %r1,%r4 # our sccb | ||
103 | .insn rre,0xb2200000,%r2,%r1 # service call | ||
104 | ipm %r1 | ||
105 | srl %r1,28 # get cc code | ||
106 | xr %r3,%r3 | ||
107 | chi %r1,3 | ||
108 | be .Lfchunk-.LPG1(%r13) # leave | ||
109 | chi %r1,2 | ||
110 | be .Lservicecall-.LPG1(%r13) | ||
111 | lpswe .Lwaitsclp-.LPG1(%r13) | ||
112 | .Lsclph: | ||
113 | lh %r1,.Lsccbr-.Lsccb(%r4) | ||
114 | chi %r1,0x10 # 0x0010 is the sucess code | ||
115 | je .Lprocsccb # let's process the sccb | ||
116 | chi %r1,0x1f0 | ||
117 | bne .Lfchunk-.LPG1(%r13) # unhandled error code | ||
118 | c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced | ||
119 | bne .Lfchunk-.LPG1(%r13) # if no, give up | ||
120 | l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP | ||
121 | b .Lservicecall-.LPG1(%r13) | ||
122 | .Lprocsccb: | ||
123 | lghi %r1,0 | ||
124 | icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0 | ||
125 | jnz .Lscnd | ||
126 | lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one | ||
127 | .Lscnd: | ||
128 | xr %r3,%r3 # same logic | ||
129 | ic %r3,.Lscpa1-.Lsccb(%r4) | ||
130 | chi %r3,0x00 | ||
131 | jne .Lcompmem | ||
132 | l %r3,.Lscpa2-.Lsccb(%r4) | ||
133 | .Lcompmem: | ||
134 | mlgr %r2,%r1 # mem in MB on 128-bit | ||
135 | l %r1,.Lonemb-.LPG1(%r13) | ||
136 | mlgr %r2,%r1 # mem size in bytes in %r3 | ||
137 | b .Lfchunk-.LPG1(%r13) | ||
138 | |||
139 | .align 4 | ||
140 | .Lpmask: | ||
141 | .byte 0 | ||
142 | .align 8 | ||
143 | .Lcr: | ||
144 | .quad 0x00 # place holder for cr0 | ||
145 | .Lwaitsclp: | ||
146 | .quad 0x0102000180000000,.Lsclph | ||
147 | .Lrcp: | ||
148 | .int 0x00120001 # Read SCP forced code | ||
149 | .Lrcp2: | ||
150 | .int 0x00020001 # Read SCP code | ||
151 | .Lonemb: | ||
152 | .int 0x100000 | ||
153 | |||
154 | .Lfchunk: | ||
155 | |||
156 | # | ||
157 | # find memory chunks. | ||
158 | # | ||
159 | lgr %r9,%r3 # end of mem | ||
160 | larl %r1,.Lchkmem # set program check address | ||
161 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
162 | la %r1,1 # test in increments of 128KB | ||
163 | sllg %r1,%r1,17 | ||
164 | larl %r3,memory_chunk | ||
165 | slgr %r4,%r4 # set start of chunk to zero | ||
166 | slgr %r5,%r5 # set end of chunk to zero | ||
167 | slr %r6,%r6 # set access code to zero | ||
168 | la %r10,MEMORY_CHUNKS # number of chunks | ||
169 | .Lloop: | ||
170 | tprot 0(%r5),0 # test protection of first byte | ||
171 | ipm %r7 | ||
172 | srl %r7,28 | ||
173 | clr %r6,%r7 # compare cc with last access code | ||
174 | je .Lsame | ||
175 | lghi %r8,0 # no program checks | ||
176 | j .Lsavchk | ||
177 | .Lsame: | ||
178 | algr %r5,%r1 # add 128KB to end of chunk | ||
179 | # no need to check here, | ||
180 | brc 12,.Lloop # this is the same chunk | ||
181 | .Lchkmem: # > 16EB or tprot got a program check | ||
182 | lghi %r8,1 # set program check flag | ||
183 | .Lsavchk: | ||
184 | clgr %r4,%r5 # chunk size > 0? | ||
185 | je .Lchkloop | ||
186 | stg %r4,0(%r3) # store start address of chunk | ||
187 | lgr %r0,%r5 | ||
188 | slgr %r0,%r4 | ||
189 | stg %r0,8(%r3) # store size of chunk | ||
190 | st %r6,20(%r3) # store type of chunk | ||
191 | la %r3,24(%r3) | ||
192 | ahi %r10,-1 # update chunk number | ||
193 | .Lchkloop: | ||
194 | lr %r6,%r7 # set access code to last cc | ||
195 | # we got an exception or we're starting a new | ||
196 | # chunk , we must check if we should | ||
197 | # still try to find valid memory (if we detected | ||
198 | # the amount of available storage), and if we | ||
199 | # have chunks left | ||
200 | lghi %r4,1 | ||
201 | sllg %r4,%r4,31 | ||
202 | clgr %r5,%r4 | ||
203 | je .Lhsaskip | ||
204 | xr %r0, %r0 | ||
205 | clgr %r0, %r9 # did we detect memory? | ||
206 | je .Ldonemem # if not, leave | ||
207 | chi %r10, 0 # do we have chunks left? | ||
208 | je .Ldonemem | ||
209 | .Lhsaskip: | ||
210 | chi %r8,1 # program check ? | ||
211 | je .Lpgmchk | ||
212 | lgr %r4,%r5 # potential new chunk | ||
213 | algr %r5,%r1 # add 128KB to end of chunk | ||
214 | j .Llpcnt | ||
215 | .Lpgmchk: | ||
216 | algr %r5,%r1 # add 128KB to end of chunk | ||
217 | lgr %r4,%r5 # potential new chunk | ||
218 | .Llpcnt: | ||
219 | clgr %r5,%r9 # should we go on? | ||
220 | jl .Lloop | ||
221 | .Ldonemem: | ||
222 | |||
223 | larl %r12,machine_flags | 68 | larl %r12,machine_flags |
224 | # | 69 | # |
225 | # find out if we are running under VM | ||
226 | # | ||
227 | stidp __LC_CPUID # store cpuid | ||
228 | tm __LC_CPUID,0xff # running under VM ? | ||
229 | bno 0f-.LPG1(%r13) | ||
230 | oi 7(%r12),1 # set VM flag | ||
231 | 0: lh %r0,__LC_CPUID+4 # get cpu version | ||
232 | chi %r0,0x7490 # running on a P/390 ? | ||
233 | bne 1f-.LPG1(%r13) | ||
234 | oi 7(%r12),4 # set P/390 flag | ||
235 | 1: | ||
236 | |||
237 | # | ||
238 | # find out if we have the MVPG instruction | 70 | # find out if we have the MVPG instruction |
239 | # | 71 | # |
240 | la %r1,0f-.LPG1(%r13) # set program check address | 72 | la %r1,0f-.LPG1(%r13) # set program check address |
@@ -336,25 +168,6 @@ ipl_devno: | |||
336 | .word 0 | 168 | .word 0 |
337 | 169 | ||
338 | .org 0x12000 | 170 | .org 0x12000 |
339 | .globl s390_readinfo_sccb | ||
340 | s390_readinfo_sccb: | ||
341 | .Lsccb: | ||
342 | .hword 0x1000 # length, one page | ||
343 | .byte 0x00,0x00,0x00 | ||
344 | .byte 0x80 # variable response bit set | ||
345 | .Lsccbr: | ||
346 | .hword 0x00 # response code | ||
347 | .Lscpincr1: | ||
348 | .hword 0x00 | ||
349 | .Lscpa1: | ||
350 | .byte 0x00 | ||
351 | .fill 89,1,0 | ||
352 | .Lscpa2: | ||
353 | .int 0x00 | ||
354 | .Lscpincr2: | ||
355 | .quad 0x00 | ||
356 | .fill 3984,1,0 | ||
357 | .org 0x13000 | ||
358 | 171 | ||
359 | #ifdef CONFIG_SHARED_KERNEL | 172 | #ifdef CONFIG_SHARED_KERNEL |
360 | .org 0x100000 | 173 | .org 0x100000 |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 9e9972e8a52b..052259530651 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -20,26 +20,27 @@ | |||
20 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
21 | #include <asm/ebcdic.h> | 21 | #include <asm/ebcdic.h> |
22 | #include <asm/reset.h> | 22 | #include <asm/reset.h> |
23 | #include <asm/sclp.h> | ||
23 | 24 | ||
24 | #define IPL_PARM_BLOCK_VERSION 0 | 25 | #define IPL_PARM_BLOCK_VERSION 0 |
25 | #define LOADPARM_LEN 8 | ||
26 | 26 | ||
27 | extern char s390_readinfo_sccb[]; | 27 | #define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10) |
28 | #define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) | 28 | #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) |
29 | #define SCCB_LOADPARM (&s390_readinfo_sccb[24]) | 29 | #define SCCB_FLAG (s390_readinfo_sccb.flags) |
30 | #define SCCB_FLAG (s390_readinfo_sccb[91]) | ||
31 | 30 | ||
32 | enum ipl_type { | 31 | enum ipl_type { |
33 | IPL_TYPE_NONE = 1, | 32 | IPL_TYPE_NONE = 1, |
34 | IPL_TYPE_UNKNOWN = 2, | 33 | IPL_TYPE_UNKNOWN = 2, |
35 | IPL_TYPE_CCW = 4, | 34 | IPL_TYPE_CCW = 4, |
36 | IPL_TYPE_FCP = 8, | 35 | IPL_TYPE_FCP = 8, |
36 | IPL_TYPE_NSS = 16, | ||
37 | }; | 37 | }; |
38 | 38 | ||
39 | #define IPL_NONE_STR "none" | 39 | #define IPL_NONE_STR "none" |
40 | #define IPL_UNKNOWN_STR "unknown" | 40 | #define IPL_UNKNOWN_STR "unknown" |
41 | #define IPL_CCW_STR "ccw" | 41 | #define IPL_CCW_STR "ccw" |
42 | #define IPL_FCP_STR "fcp" | 42 | #define IPL_FCP_STR "fcp" |
43 | #define IPL_NSS_STR "nss" | ||
43 | 44 | ||
44 | static char *ipl_type_str(enum ipl_type type) | 45 | static char *ipl_type_str(enum ipl_type type) |
45 | { | 46 | { |
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type) | |||
50 | return IPL_CCW_STR; | 51 | return IPL_CCW_STR; |
51 | case IPL_TYPE_FCP: | 52 | case IPL_TYPE_FCP: |
52 | return IPL_FCP_STR; | 53 | return IPL_FCP_STR; |
54 | case IPL_TYPE_NSS: | ||
55 | return IPL_NSS_STR; | ||
53 | case IPL_TYPE_UNKNOWN: | 56 | case IPL_TYPE_UNKNOWN: |
54 | default: | 57 | default: |
55 | return IPL_UNKNOWN_STR; | 58 | return IPL_UNKNOWN_STR; |
@@ -64,6 +67,7 @@ enum ipl_method { | |||
64 | IPL_METHOD_FCP_RO_DIAG, | 67 | IPL_METHOD_FCP_RO_DIAG, |
65 | IPL_METHOD_FCP_RW_DIAG, | 68 | IPL_METHOD_FCP_RW_DIAG, |
66 | IPL_METHOD_FCP_RO_VM, | 69 | IPL_METHOD_FCP_RO_VM, |
70 | IPL_METHOD_NSS, | ||
67 | }; | 71 | }; |
68 | 72 | ||
69 | enum shutdown_action { | 73 | enum shutdown_action { |
@@ -114,11 +118,14 @@ enum diag308_rc { | |||
114 | static int diag308_set_works = 0; | 118 | static int diag308_set_works = 0; |
115 | 119 | ||
116 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; | 120 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; |
121 | |||
117 | static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; | 122 | static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; |
118 | static enum ipl_method reipl_method = IPL_METHOD_NONE; | 123 | static enum ipl_method reipl_method = IPL_METHOD_NONE; |
119 | static struct ipl_parameter_block *reipl_block_fcp; | 124 | static struct ipl_parameter_block *reipl_block_fcp; |
120 | static struct ipl_parameter_block *reipl_block_ccw; | 125 | static struct ipl_parameter_block *reipl_block_ccw; |
121 | 126 | ||
127 | static char reipl_nss_name[NSS_NAME_SIZE + 1]; | ||
128 | |||
122 | static int dump_capabilities = IPL_TYPE_NONE; | 129 | static int dump_capabilities = IPL_TYPE_NONE; |
123 | static enum ipl_type dump_type = IPL_TYPE_NONE; | 130 | static enum ipl_type dump_type = IPL_TYPE_NONE; |
124 | static enum ipl_method dump_method = IPL_METHOD_NONE; | 131 | static enum ipl_method dump_method = IPL_METHOD_NONE; |
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ | |||
173 | sys_##_prefix##_##_name##_show, \ | 180 | sys_##_prefix##_##_name##_show, \ |
174 | sys_##_prefix##_##_name##_store); | 181 | sys_##_prefix##_##_name##_store); |
175 | 182 | ||
183 | #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ | ||
184 | static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ | ||
185 | char *page) \ | ||
186 | { \ | ||
187 | return sprintf(page, _fmt_out, _value); \ | ||
188 | } \ | ||
189 | static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ | ||
190 | const char *buf, size_t len) \ | ||
191 | { \ | ||
192 | if (sscanf(buf, _fmt_in, _value) != 1) \ | ||
193 | return -EINVAL; \ | ||
194 | return len; \ | ||
195 | } \ | ||
196 | static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ | ||
197 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ | ||
198 | sys_##_prefix##_##_name##_show, \ | ||
199 | sys_##_prefix##_##_name##_store); | ||
200 | |||
176 | static void make_attrs_ro(struct attribute **attrs) | 201 | static void make_attrs_ro(struct attribute **attrs) |
177 | { | 202 | { |
178 | while (*attrs) { | 203 | while (*attrs) { |
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void) | |||
189 | { | 214 | { |
190 | struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; | 215 | struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; |
191 | 216 | ||
217 | if (ipl_flags & IPL_NSS_VALID) | ||
218 | return IPL_TYPE_NSS; | ||
192 | if (!(ipl_flags & IPL_DEVNO_VALID)) | 219 | if (!(ipl_flags & IPL_DEVNO_VALID)) |
193 | return IPL_TYPE_UNKNOWN; | 220 | return IPL_TYPE_UNKNOWN; |
194 | if (!(ipl_flags & IPL_PARMBLOCK_VALID)) | 221 | if (!(ipl_flags & IPL_PARMBLOCK_VALID)) |
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = { | |||
324 | .attrs = ipl_ccw_attrs, | 351 | .attrs = ipl_ccw_attrs, |
325 | }; | 352 | }; |
326 | 353 | ||
354 | /* NSS ipl device attributes */ | ||
355 | |||
356 | DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name); | ||
357 | |||
358 | static struct attribute *ipl_nss_attrs[] = { | ||
359 | &sys_ipl_type_attr.attr, | ||
360 | &sys_ipl_nss_name_attr.attr, | ||
361 | NULL, | ||
362 | }; | ||
363 | |||
364 | static struct attribute_group ipl_nss_attr_group = { | ||
365 | .attrs = ipl_nss_attrs, | ||
366 | }; | ||
367 | |||
327 | /* UNKNOWN ipl device attributes */ | 368 | /* UNKNOWN ipl device attributes */ |
328 | 369 | ||
329 | static struct attribute *ipl_unknown_attrs[] = { | 370 | static struct attribute *ipl_unknown_attrs[] = { |
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = { | |||
432 | .attrs = reipl_ccw_attrs, | 473 | .attrs = reipl_ccw_attrs, |
433 | }; | 474 | }; |
434 | 475 | ||
476 | |||
477 | /* NSS reipl device attributes */ | ||
478 | |||
479 | DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name); | ||
480 | |||
481 | static struct attribute *reipl_nss_attrs[] = { | ||
482 | &sys_reipl_nss_name_attr.attr, | ||
483 | NULL, | ||
484 | }; | ||
485 | |||
486 | static struct attribute_group reipl_nss_attr_group = { | ||
487 | .name = IPL_NSS_STR, | ||
488 | .attrs = reipl_nss_attrs, | ||
489 | }; | ||
490 | |||
435 | /* reipl type */ | 491 | /* reipl type */ |
436 | 492 | ||
437 | static int reipl_set_type(enum ipl_type type) | 493 | static int reipl_set_type(enum ipl_type type) |
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type) | |||
454 | else | 510 | else |
455 | reipl_method = IPL_METHOD_FCP_RO_DIAG; | 511 | reipl_method = IPL_METHOD_FCP_RO_DIAG; |
456 | break; | 512 | break; |
513 | case IPL_TYPE_NSS: | ||
514 | reipl_method = IPL_METHOD_NSS; | ||
515 | break; | ||
457 | default: | 516 | default: |
458 | reipl_method = IPL_METHOD_NONE; | 517 | reipl_method = IPL_METHOD_NONE; |
459 | } | 518 | } |
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf, | |||
475 | rc = reipl_set_type(IPL_TYPE_CCW); | 534 | rc = reipl_set_type(IPL_TYPE_CCW); |
476 | else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) | 535 | else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) |
477 | rc = reipl_set_type(IPL_TYPE_FCP); | 536 | rc = reipl_set_type(IPL_TYPE_FCP); |
537 | else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0) | ||
538 | rc = reipl_set_type(IPL_TYPE_NSS); | ||
478 | return (rc != 0) ? rc : len; | 539 | return (rc != 0) ? rc : len; |
479 | } | 540 | } |
480 | 541 | ||
@@ -647,6 +708,10 @@ void do_reipl(void) | |||
647 | case IPL_METHOD_FCP_RO_VM: | 708 | case IPL_METHOD_FCP_RO_VM: |
648 | __cpcmd("IPL", NULL, 0, NULL); | 709 | __cpcmd("IPL", NULL, 0, NULL); |
649 | break; | 710 | break; |
711 | case IPL_METHOD_NSS: | ||
712 | sprintf(buf, "IPL %s", reipl_nss_name); | ||
713 | __cpcmd(buf, NULL, 0, NULL); | ||
714 | break; | ||
650 | case IPL_METHOD_NONE: | 715 | case IPL_METHOD_NONE: |
651 | default: | 716 | default: |
652 | if (MACHINE_IS_VM) | 717 | if (MACHINE_IS_VM) |
@@ -733,6 +798,10 @@ static int __init ipl_init(void) | |||
733 | case IPL_TYPE_FCP: | 798 | case IPL_TYPE_FCP: |
734 | rc = ipl_register_fcp_files(); | 799 | rc = ipl_register_fcp_files(); |
735 | break; | 800 | break; |
801 | case IPL_TYPE_NSS: | ||
802 | rc = sysfs_create_group(&ipl_subsys.kset.kobj, | ||
803 | &ipl_nss_attr_group); | ||
804 | break; | ||
736 | default: | 805 | default: |
737 | rc = sysfs_create_group(&ipl_subsys.kset.kobj, | 806 | rc = sysfs_create_group(&ipl_subsys.kset.kobj, |
738 | &ipl_unknown_attr_group); | 807 | &ipl_unknown_attr_group); |
@@ -755,6 +824,20 @@ static void __init reipl_probe(void) | |||
755 | free_page((unsigned long)buffer); | 824 | free_page((unsigned long)buffer); |
756 | } | 825 | } |
757 | 826 | ||
827 | static int __init reipl_nss_init(void) | ||
828 | { | ||
829 | int rc; | ||
830 | |||
831 | if (!MACHINE_IS_VM) | ||
832 | return 0; | ||
833 | rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group); | ||
834 | if (rc) | ||
835 | return rc; | ||
836 | strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); | ||
837 | reipl_capabilities |= IPL_TYPE_NSS; | ||
838 | return 0; | ||
839 | } | ||
840 | |||
758 | static int __init reipl_ccw_init(void) | 841 | static int __init reipl_ccw_init(void) |
759 | { | 842 | { |
760 | int rc; | 843 | int rc; |
@@ -837,6 +920,9 @@ static int __init reipl_init(void) | |||
837 | rc = reipl_fcp_init(); | 920 | rc = reipl_fcp_init(); |
838 | if (rc) | 921 | if (rc) |
839 | return rc; | 922 | return rc; |
923 | rc = reipl_nss_init(); | ||
924 | if (rc) | ||
925 | return rc; | ||
840 | rc = reipl_set_type(ipl_get_type()); | 926 | rc = reipl_set_type(ipl_get_type()); |
841 | if (rc) | 927 | if (rc) |
842 | return rc; | 928 | return rc; |
@@ -993,8 +1079,6 @@ static void do_reset_calls(void) | |||
993 | reset->fn(); | 1079 | reset->fn(); |
994 | } | 1080 | } |
995 | 1081 | ||
996 | extern void reset_mcck_handler(void); | ||
997 | extern void reset_pgm_handler(void); | ||
998 | extern __u32 dump_prefix_page; | 1082 | extern __u32 dump_prefix_page; |
999 | 1083 | ||
1000 | void s390_reset_system(void) | 1084 | void s390_reset_system(void) |
@@ -1016,14 +1100,14 @@ void s390_reset_system(void) | |||
1016 | __ctl_clear_bit(0,28); | 1100 | __ctl_clear_bit(0,28); |
1017 | 1101 | ||
1018 | /* Set new machine check handler */ | 1102 | /* Set new machine check handler */ |
1019 | S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; | 1103 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
1020 | S390_lowcore.mcck_new_psw.addr = | 1104 | S390_lowcore.mcck_new_psw.addr = |
1021 | PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; | 1105 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; |
1022 | 1106 | ||
1023 | /* Set new program check handler */ | 1107 | /* Set new program check handler */ |
1024 | S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; | 1108 | S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
1025 | S390_lowcore.program_new_psw.addr = | 1109 | S390_lowcore.program_new_psw.addr = |
1026 | PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; | 1110 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
1027 | 1111 | ||
1028 | do_reset_calls(); | 1112 | do_reset_calls(); |
1029 | } | 1113 | } |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1eef50918615..8f0cbca31203 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/irq.c | 2 | * arch/s390/kernel/irq.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * Copyright IBM Corp. 2004,2007 |
5 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
6 | * Thomas Spatzier (tspat@de.ibm.com) | ||
7 | * | 7 | * |
8 | * This file contains interrupt related functions. | 8 | * This file contains interrupt related functions. |
9 | */ | 9 | */ |
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/profile.h> | ||
17 | 19 | ||
18 | /* | 20 | /* |
19 | * show_interrupts is needed by /proc/interrupts. | 21 | * show_interrupts is needed by /proc/interrupts. |
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void) | |||
93 | 95 | ||
94 | local_irq_restore(flags); | 96 | local_irq_restore(flags); |
95 | } | 97 | } |
96 | |||
97 | EXPORT_SYMBOL(do_softirq); | 98 | EXPORT_SYMBOL(do_softirq); |
99 | |||
100 | void init_irq_proc(void) | ||
101 | { | ||
102 | struct proc_dir_entry *root_irq_dir; | ||
103 | |||
104 | root_irq_dir = proc_mkdir("irq", NULL); | ||
105 | create_prof_cpu_mask(root_irq_dir); | ||
106 | } | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 576368c4f605..a466bab6677e 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) | |||
155 | static int __kprobes swap_instruction(void *aref) | 155 | static int __kprobes swap_instruction(void *aref) |
156 | { | 156 | { |
157 | struct ins_replace_args *args = aref; | 157 | struct ins_replace_args *args = aref; |
158 | u32 *addr; | ||
159 | u32 instr; | ||
158 | int err = -EFAULT; | 160 | int err = -EFAULT; |
159 | 161 | ||
162 | /* | ||
163 | * Text segment is read-only, hence we use stura to bypass dynamic | ||
164 | * address translation to exchange the instruction. Since stura | ||
165 | * always operates on four bytes, but we only want to exchange two | ||
166 | * bytes do some calculations to get things right. In addition we | ||
167 | * shall not cross any page boundaries (vmalloc area!) when writing | ||
168 | * the new instruction. | ||
169 | */ | ||
170 | addr = (u32 *)ALIGN((unsigned long)args->ptr, 4); | ||
171 | if ((unsigned long)args->ptr & 2) | ||
172 | instr = ((*addr) & 0xffff0000) | args->new; | ||
173 | else | ||
174 | instr = ((*addr) & 0x0000ffff) | args->new << 16; | ||
175 | |||
160 | asm volatile( | 176 | asm volatile( |
161 | "0: mvc 0(2,%2),0(%3)\n" | 177 | " lra %1,0(%1)\n" |
162 | "1: la %0,0\n" | 178 | "0: stura %2,%1\n" |
179 | "1: la %0,0\n" | ||
163 | "2:\n" | 180 | "2:\n" |
164 | EX_TABLE(0b,2b) | 181 | EX_TABLE(0b,2b) |
165 | : "+d" (err), "=m" (*args->ptr) | 182 | : "+d" (err) |
166 | : "a" (args->ptr), "a" (&args->new), "m" (args->new)); | 183 | : "a" (addr), "d" (instr) |
184 | : "memory", "cc"); | ||
185 | |||
167 | return err; | 186 | return err; |
168 | } | 187 | } |
169 | 188 | ||
@@ -356,7 +375,7 @@ no_kprobe: | |||
356 | * - When the probed function returns, this probe | 375 | * - When the probed function returns, this probe |
357 | * causes the handlers to fire | 376 | * causes the handlers to fire |
358 | */ | 377 | */ |
359 | void __kprobes kretprobe_trampoline_holder(void) | 378 | void kretprobe_trampoline_holder(void) |
360 | { | 379 | { |
361 | asm volatile(".global kretprobe_trampoline\n" | 380 | asm volatile(".global kretprobe_trampoline\n" |
362 | "kretprobe_trampoline: bcr 0,0\n"); | 381 | "kretprobe_trampoline: bcr 0,0\n"); |
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void) | |||
365 | /* | 384 | /* |
366 | * Called when the probe at kretprobe trampoline is hit | 385 | * Called when the probe at kretprobe trampoline is hit |
367 | */ | 386 | */ |
368 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 387 | static int __kprobes trampoline_probe_handler(struct kprobe *p, |
388 | struct pt_regs *regs) | ||
369 | { | 389 | { |
370 | struct kretprobe_instance *ri = NULL; | 390 | struct kretprobe_instance *ri = NULL; |
371 | struct hlist_head *head, empty_rp; | 391 | struct hlist_head *head, empty_rp; |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index f6d9bcc0f75b..52f57af252b4 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/kexec.h> | 12 | #include <linux/kexec.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/reboot.h> | ||
14 | #include <asm/cio.h> | 15 | #include <asm/cio.h> |
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index d989ed45a7aa..39d1dd752529 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/moduleloader.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #define DEBUGP printk | 36 | #define DEBUGP printk |
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region) | |||
58 | table entries. */ | 59 | table entries. */ |
59 | } | 60 | } |
60 | 61 | ||
61 | static inline void | 62 | static void |
62 | check_rela(Elf_Rela *rela, struct module *me) | 63 | check_rela(Elf_Rela *rela, struct module *me) |
63 | { | 64 | { |
64 | struct mod_arch_syminfo *info; | 65 | struct mod_arch_syminfo *info; |
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, | |||
181 | return -ENOEXEC; | 182 | return -ENOEXEC; |
182 | } | 183 | } |
183 | 184 | ||
184 | static inline int | 185 | static int |
185 | apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | 186 | apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, |
186 | struct module *me) | 187 | struct module *me) |
187 | { | 188 | { |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 6603fbb41d07..5acfac654f9d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -144,7 +144,7 @@ static void default_idle(void) | |||
144 | 144 | ||
145 | trace_hardirqs_on(); | 145 | trace_hardirqs_on(); |
146 | /* Wait for external, I/O or machine check interrupt. */ | 146 | /* Wait for external, I/O or machine check interrupt. */ |
147 | __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | | 147 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
148 | PSW_MASK_IO | PSW_MASK_EXT); | 148 | PSW_MASK_IO | PSW_MASK_EXT); |
149 | } | 149 | } |
150 | 150 | ||
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
190 | struct pt_regs regs; | 190 | struct pt_regs regs; |
191 | 191 | ||
192 | memset(®s, 0, sizeof(regs)); | 192 | memset(®s, 0, sizeof(regs)); |
193 | regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | 193 | regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; |
194 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; | 194 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; |
195 | regs.gprs[9] = (unsigned long) fn; | 195 | regs.gprs[9] = (unsigned long) fn; |
196 | regs.gprs[10] = (unsigned long) arg; | 196 | regs.gprs[10] = (unsigned long) arg; |
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c deleted file mode 100644 index b81aa1f569ca..000000000000 --- a/arch/s390/kernel/profile.c +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/profile.c | ||
3 | * | ||
4 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
6 | * | ||
7 | */ | ||
8 | #include <linux/proc_fs.h> | ||
9 | #include <linux/profile.h> | ||
10 | |||
11 | static struct proc_dir_entry * root_irq_dir; | ||
12 | |||
13 | void init_irq_proc(void) | ||
14 | { | ||
15 | /* create /proc/irq */ | ||
16 | root_irq_dir = proc_mkdir("irq", NULL); | ||
17 | |||
18 | /* create /proc/irq/prof_cpu_mask */ | ||
19 | create_prof_cpu_mask(root_irq_dir); | ||
20 | } | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 8f36504075ed..2a8f0872ea8b 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task) | |||
86 | per_info->control_regs.bits.storage_alt_space_ctl = 0; | 86 | per_info->control_regs.bits.storage_alt_space_ctl = 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | void | 89 | static void set_single_step(struct task_struct *task) |
90 | set_single_step(struct task_struct *task) | ||
91 | { | 90 | { |
92 | task->thread.per_info.single_step = 1; | 91 | task->thread.per_info.single_step = 1; |
93 | FixPerRegisters(task); | 92 | FixPerRegisters(task); |
94 | } | 93 | } |
95 | 94 | ||
96 | void | 95 | static void clear_single_step(struct task_struct *task) |
97 | clear_single_step(struct task_struct *task) | ||
98 | { | 96 | { |
99 | task->thread.per_info.single_step = 0; | 97 | task->thread.per_info.single_step = 0; |
100 | FixPerRegisters(task); | 98 | FixPerRegisters(task); |
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
232 | */ | 230 | */ |
233 | if (addr == (addr_t) &dummy->regs.psw.mask && | 231 | if (addr == (addr_t) &dummy->regs.psw.mask && |
234 | #ifdef CONFIG_COMPAT | 232 | #ifdef CONFIG_COMPAT |
235 | data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && | 233 | data != PSW_MASK_MERGE(psw_user32_bits, data) && |
236 | #endif | 234 | #endif |
237 | data != PSW_MASK_MERGE(PSW_USER_BITS, data)) | 235 | data != PSW_MASK_MERGE(psw_user_bits, data)) |
238 | /* Invalid psw mask. */ | 236 | /* Invalid psw mask. */ |
239 | return -EINVAL; | 237 | return -EINVAL; |
240 | #ifndef CONFIG_64BIT | 238 | #ifndef CONFIG_64BIT |
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | |||
309 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 307 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); |
310 | if (copied != sizeof(tmp)) | 308 | if (copied != sizeof(tmp)) |
311 | return -EIO; | 309 | return -EIO; |
312 | return put_user(tmp, (unsigned long __user *) data); | 310 | return put_user(tmp, (unsigned long __force __user *) data); |
313 | 311 | ||
314 | case PTRACE_PEEKUSR: | 312 | case PTRACE_PEEKUSR: |
315 | /* read the word at location addr in the USER area. */ | 313 | /* read the word at location addr in the USER area. */ |
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | |||
331 | 329 | ||
332 | case PTRACE_PEEKUSR_AREA: | 330 | case PTRACE_PEEKUSR_AREA: |
333 | case PTRACE_POKEUSR_AREA: | 331 | case PTRACE_POKEUSR_AREA: |
334 | if (copy_from_user(&parea, (void __user *) addr, | 332 | if (copy_from_user(&parea, (void __force __user *) addr, |
335 | sizeof(parea))) | 333 | sizeof(parea))) |
336 | return -EFAULT; | 334 | return -EFAULT; |
337 | addr = parea.kernel_addr; | 335 | addr = parea.kernel_addr; |
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | |||
341 | if (request == PTRACE_PEEKUSR_AREA) | 339 | if (request == PTRACE_PEEKUSR_AREA) |
342 | ret = peek_user(child, addr, data); | 340 | ret = peek_user(child, addr, data); |
343 | else { | 341 | else { |
344 | addr_t tmp; | 342 | addr_t utmp; |
345 | if (get_user (tmp, (addr_t __user *) data)) | 343 | if (get_user(utmp, |
344 | (addr_t __force __user *) data)) | ||
346 | return -EFAULT; | 345 | return -EFAULT; |
347 | ret = poke_user(child, addr, tmp); | 346 | ret = poke_user(child, addr, utmp); |
348 | } | 347 | } |
349 | if (ret) | 348 | if (ret) |
350 | return ret; | 349 | return ret; |
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
394 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 393 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
395 | /* Fake a 31 bit psw mask. */ | 394 | /* Fake a 31 bit psw mask. */ |
396 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); | 395 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); |
397 | tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); | 396 | tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); |
398 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 397 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
399 | /* Fake a 31 bit psw address. */ | 398 | /* Fake a 31 bit psw address. */ |
400 | tmp = (__u32) task_pt_regs(child)->psw.addr | | 399 | tmp = (__u32) task_pt_regs(child)->psw.addr | |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
469 | */ | 468 | */ |
470 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 469 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
471 | /* Build a 64 bit psw mask from 31 bit mask. */ | 470 | /* Build a 64 bit psw mask from 31 bit mask. */ |
472 | if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) | 471 | if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) |
473 | /* Invalid psw mask. */ | 472 | /* Invalid psw mask. */ |
474 | return -EINVAL; | 473 | return -EINVAL; |
475 | task_pt_regs(child)->psw.mask = | 474 | task_pt_regs(child)->psw.mask = |
476 | PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); | 475 | PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); |
477 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 476 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
478 | /* Build a 64 bit psw address from 31 bit address. */ | 477 | /* Build a 64 bit psw address from 31 bit address. */ |
479 | task_pt_regs(child)->psw.addr = | 478 | task_pt_regs(child)->psw.addr = |
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
550 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 549 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); |
551 | if (copied != sizeof(tmp)) | 550 | if (copied != sizeof(tmp)) |
552 | return -EIO; | 551 | return -EIO; |
553 | return put_user(tmp, (unsigned int __user *) data); | 552 | return put_user(tmp, (unsigned int __force __user *) data); |
554 | 553 | ||
555 | case PTRACE_PEEKUSR: | 554 | case PTRACE_PEEKUSR: |
556 | /* read the word at location addr in the USER area. */ | 555 | /* read the word at location addr in the USER area. */ |
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
571 | 570 | ||
572 | case PTRACE_PEEKUSR_AREA: | 571 | case PTRACE_PEEKUSR_AREA: |
573 | case PTRACE_POKEUSR_AREA: | 572 | case PTRACE_POKEUSR_AREA: |
574 | if (copy_from_user(&parea, (void __user *) addr, | 573 | if (copy_from_user(&parea, (void __force __user *) addr, |
575 | sizeof(parea))) | 574 | sizeof(parea))) |
576 | return -EFAULT; | 575 | return -EFAULT; |
577 | addr = parea.kernel_addr; | 576 | addr = parea.kernel_addr; |
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
581 | if (request == PTRACE_PEEKUSR_AREA) | 580 | if (request == PTRACE_PEEKUSR_AREA) |
582 | ret = peek_user_emu31(child, addr, data); | 581 | ret = peek_user_emu31(child, addr, data); |
583 | else { | 582 | else { |
584 | __u32 tmp; | 583 | __u32 utmp; |
585 | if (get_user (tmp, (__u32 __user *) data)) | 584 | if (get_user(utmp, |
585 | (__u32 __force __user *) data)) | ||
586 | return -EFAULT; | 586 | return -EFAULT; |
587 | ret = poke_user_emu31(child, addr, tmp); | 587 | ret = poke_user_emu31(child, addr, utmp); |
588 | } | 588 | } |
589 | if (ret) | 589 | if (ret) |
590 | return ret; | 590 | return ret; |
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
595 | return 0; | 595 | return 0; |
596 | case PTRACE_GETEVENTMSG: | 596 | case PTRACE_GETEVENTMSG: |
597 | return put_user((__u32) child->ptrace_message, | 597 | return put_user((__u32) child->ptrace_message, |
598 | (unsigned int __user *) data); | 598 | (unsigned int __force __user *) data); |
599 | case PTRACE_GETSIGINFO: | 599 | case PTRACE_GETSIGINFO: |
600 | if (child->last_siginfo == NULL) | 600 | if (child->last_siginfo == NULL) |
601 | return -EINVAL; | 601 | return -EINVAL; |
602 | return copy_siginfo_to_user32((compat_siginfo_t __user *) data, | 602 | return copy_siginfo_to_user32((compat_siginfo_t |
603 | __force __user *) data, | ||
603 | child->last_siginfo); | 604 | child->last_siginfo); |
604 | case PTRACE_SETSIGINFO: | 605 | case PTRACE_SETSIGINFO: |
605 | if (child->last_siginfo == NULL) | 606 | if (child->last_siginfo == NULL) |
606 | return -EINVAL; | 607 | return -EINVAL; |
607 | return copy_siginfo_from_user32(child->last_siginfo, | 608 | return copy_siginfo_from_user32(child->last_siginfo, |
608 | (compat_siginfo_t __user *) data); | 609 | (compat_siginfo_t |
610 | __force __user *) data); | ||
609 | } | 611 | } |
610 | return ptrace_request(child, request, addr, data); | 612 | return ptrace_request(child, request, addr, data); |
611 | } | 613 | } |
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S deleted file mode 100644 index 8a87355161fa..000000000000 --- a/arch/s390/kernel/reset.S +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/reset.S | ||
3 | * | ||
4 | * Copyright (C) IBM Corp. 2006 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | * Michael Holzheu <holzheu@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/lowcore.h> | ||
11 | |||
12 | #ifdef CONFIG_64BIT | ||
13 | |||
14 | .globl reset_mcck_handler | ||
15 | reset_mcck_handler: | ||
16 | basr %r13,0 | ||
17 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
18 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
19 | lg %r1,s390_reset_mcck_handler-0b(%r13) | ||
20 | ltgr %r1,%r1 | ||
21 | jz 1f | ||
22 | basr %r14,%r1 | ||
23 | 1: la %r1,4095 | ||
24 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | ||
25 | lpswe __LC_MCK_OLD_PSW | ||
26 | |||
27 | .globl s390_reset_mcck_handler | ||
28 | s390_reset_mcck_handler: | ||
29 | .quad 0 | ||
30 | |||
31 | .globl reset_pgm_handler | ||
32 | reset_pgm_handler: | ||
33 | stmg %r0,%r15,__LC_SAVE_AREA | ||
34 | basr %r13,0 | ||
35 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
36 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
37 | lg %r1,s390_reset_pgm_handler-0b(%r13) | ||
38 | ltgr %r1,%r1 | ||
39 | jz 1f | ||
40 | basr %r14,%r1 | ||
41 | lmg %r0,%r15,__LC_SAVE_AREA | ||
42 | lpswe __LC_PGM_OLD_PSW | ||
43 | 1: lpswe disabled_wait_psw-0b(%r13) | ||
44 | .globl s390_reset_pgm_handler | ||
45 | s390_reset_pgm_handler: | ||
46 | .quad 0 | ||
47 | .align 8 | ||
48 | disabled_wait_psw: | ||
49 | .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler | ||
50 | |||
51 | #else /* CONFIG_64BIT */ | ||
52 | |||
53 | .globl reset_mcck_handler | ||
54 | reset_mcck_handler: | ||
55 | basr %r13,0 | ||
56 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
57 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
58 | l %r1,s390_reset_mcck_handler-0b(%r13) | ||
59 | ltr %r1,%r1 | ||
60 | jz 1f | ||
61 | basr %r14,%r1 | ||
62 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
63 | lpsw __LC_MCK_OLD_PSW | ||
64 | |||
65 | .globl s390_reset_mcck_handler | ||
66 | s390_reset_mcck_handler: | ||
67 | .long 0 | ||
68 | |||
69 | .globl reset_pgm_handler | ||
70 | reset_pgm_handler: | ||
71 | stm %r0,%r15,__LC_SAVE_AREA | ||
72 | basr %r13,0 | ||
73 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
74 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
75 | l %r1,s390_reset_pgm_handler-0b(%r13) | ||
76 | ltr %r1,%r1 | ||
77 | jz 1f | ||
78 | basr %r14,%r1 | ||
79 | lm %r0,%r15,__LC_SAVE_AREA | ||
80 | lpsw __LC_PGM_OLD_PSW | ||
81 | |||
82 | 1: lpsw disabled_wait_psw-0b(%r13) | ||
83 | .globl s390_reset_pgm_handler | ||
84 | s390_reset_pgm_handler: | ||
85 | .long 0 | ||
86 | disabled_wait_psw: | ||
87 | .align 8 | ||
88 | .long 0x000a0000,0x00000000 + reset_pgm_handler | ||
89 | |||
90 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index bc5beaa8f98e..acf93dba7727 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code) | |||
125 | * Make sure that the i/o interrupt did not "overtake" | 125 | * Make sure that the i/o interrupt did not "overtake" |
126 | * the last HZ timer interrupt. | 126 | * the last HZ timer interrupt. |
127 | */ | 127 | */ |
128 | account_ticks(); | 128 | account_ticks(S390_lowcore.int_clock); |
129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | 129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; |
130 | index = ext_hash(code); | 130 | index = ext_hash(code); |
131 | for (p = ext_int_hash[index]; p; p = p->next) { | 131 | for (p = ext_int_hash[index]; p; p = p->next) { |
132 | if (likely(p->code == code)) { | 132 | if (likely(p->code == code)) |
133 | if (likely(p->handler)) | 133 | p->handler(code); |
134 | p->handler(code); | ||
135 | } | ||
136 | } | 134 | } |
137 | irq_exit(); | 135 | irq_exit(); |
138 | set_irq_regs(old_regs); | 136 | set_irq_regs(old_regs); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 5d8ee3baac14..03739813d3bf 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <linux/device.h> | 38 | #include <linux/device.h> |
39 | #include <linux/notifier.h> | 39 | #include <linux/notifier.h> |
40 | #include <linux/pfn.h> | 40 | #include <linux/pfn.h> |
41 | #include <linux/ctype.h> | ||
42 | #include <linux/reboot.h> | ||
41 | 43 | ||
42 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
43 | #include <asm/system.h> | 45 | #include <asm/system.h> |
@@ -49,6 +51,14 @@ | |||
49 | #include <asm/page.h> | 51 | #include <asm/page.h> |
50 | #include <asm/ptrace.h> | 52 | #include <asm/ptrace.h> |
51 | #include <asm/sections.h> | 53 | #include <asm/sections.h> |
54 | #include <asm/ebcdic.h> | ||
55 | #include <asm/compat.h> | ||
56 | |||
57 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | ||
58 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | ||
59 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
60 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
61 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | ||
52 | 62 | ||
53 | /* | 63 | /* |
54 | * User copy operations. | 64 | * User copy operations. |
@@ -117,9 +127,9 @@ void __devinit cpu_init (void) | |||
117 | */ | 127 | */ |
118 | char vmhalt_cmd[128] = ""; | 128 | char vmhalt_cmd[128] = ""; |
119 | char vmpoff_cmd[128] = ""; | 129 | char vmpoff_cmd[128] = ""; |
120 | char vmpanic_cmd[128] = ""; | 130 | static char vmpanic_cmd[128] = ""; |
121 | 131 | ||
122 | static inline void strncpy_skip_quote(char *dst, char *src, int n) | 132 | static void strncpy_skip_quote(char *dst, char *src, int n) |
123 | { | 133 | { |
124 | int sx, dx; | 134 | int sx, dx; |
125 | 135 | ||
@@ -275,10 +285,6 @@ static void __init conmode_default(void) | |||
275 | } | 285 | } |
276 | 286 | ||
277 | #ifdef CONFIG_SMP | 287 | #ifdef CONFIG_SMP |
278 | extern void machine_restart_smp(char *); | ||
279 | extern void machine_halt_smp(void); | ||
280 | extern void machine_power_off_smp(void); | ||
281 | |||
282 | void (*_machine_restart)(char *command) = machine_restart_smp; | 288 | void (*_machine_restart)(char *command) = machine_restart_smp; |
283 | void (*_machine_halt)(void) = machine_halt_smp; | 289 | void (*_machine_halt)(void) = machine_halt_smp; |
284 | void (*_machine_power_off)(void) = machine_power_off_smp; | 290 | void (*_machine_power_off)(void) = machine_power_off_smp; |
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p) | |||
386 | } | 392 | } |
387 | early_param("ipldelay", early_parse_ipldelay); | 393 | early_param("ipldelay", early_parse_ipldelay); |
388 | 394 | ||
395 | #ifdef CONFIG_S390_SWITCH_AMODE | ||
396 | unsigned int switch_amode = 0; | ||
397 | EXPORT_SYMBOL_GPL(switch_amode); | ||
398 | |||
399 | static void set_amode_and_uaccess(unsigned long user_amode, | ||
400 | unsigned long user32_amode) | ||
401 | { | ||
402 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | ||
403 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
404 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
405 | #ifdef CONFIG_COMPAT | ||
406 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | ||
407 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
408 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
409 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | ||
410 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
411 | PSW32_MASK_PSTATE; | ||
412 | #endif | ||
413 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
414 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | ||
415 | |||
416 | if (MACHINE_HAS_MVCOS) { | ||
417 | printk("mvcos available.\n"); | ||
418 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | ||
419 | } else { | ||
420 | printk("mvcos not available.\n"); | ||
421 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); | ||
422 | } | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Switch kernel/user addressing modes? | ||
427 | */ | ||
428 | static int __init early_parse_switch_amode(char *p) | ||
429 | { | ||
430 | switch_amode = 1; | ||
431 | return 0; | ||
432 | } | ||
433 | early_param("switch_amode", early_parse_switch_amode); | ||
434 | |||
435 | #else /* CONFIG_S390_SWITCH_AMODE */ | ||
436 | static inline void set_amode_and_uaccess(unsigned long user_amode, | ||
437 | unsigned long user32_amode) | ||
438 | { | ||
439 | } | ||
440 | #endif /* CONFIG_S390_SWITCH_AMODE */ | ||
441 | |||
442 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
443 | unsigned int s390_noexec = 0; | ||
444 | EXPORT_SYMBOL_GPL(s390_noexec); | ||
445 | |||
446 | /* | ||
447 | * Enable execute protection? | ||
448 | */ | ||
449 | static int __init early_parse_noexec(char *p) | ||
450 | { | ||
451 | if (!strncmp(p, "off", 3)) | ||
452 | return 0; | ||
453 | switch_amode = 1; | ||
454 | s390_noexec = 1; | ||
455 | return 0; | ||
456 | } | ||
457 | early_param("noexec", early_parse_noexec); | ||
458 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
459 | |||
460 | static void setup_addressing_mode(void) | ||
461 | { | ||
462 | if (s390_noexec) { | ||
463 | printk("S390 execute protection active, "); | ||
464 | set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); | ||
465 | return; | ||
466 | } | ||
467 | if (switch_amode) { | ||
468 | printk("S390 address spaces switched, "); | ||
469 | set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); | ||
470 | } | ||
471 | } | ||
472 | |||
389 | static void __init | 473 | static void __init |
390 | setup_lowcore(void) | 474 | setup_lowcore(void) |
391 | { | 475 | { |
@@ -402,19 +486,21 @@ setup_lowcore(void) | |||
402 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 486 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; |
403 | lc->restart_psw.addr = | 487 | lc->restart_psw.addr = |
404 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 488 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
405 | lc->external_new_psw.mask = PSW_KERNEL_BITS; | 489 | if (switch_amode) |
490 | lc->restart_psw.mask |= PSW_ASC_HOME; | ||
491 | lc->external_new_psw.mask = psw_kernel_bits; | ||
406 | lc->external_new_psw.addr = | 492 | lc->external_new_psw.addr = |
407 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 493 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
408 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | 494 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; |
409 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 495 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
410 | lc->program_new_psw.mask = PSW_KERNEL_BITS; | 496 | lc->program_new_psw.mask = psw_kernel_bits; |
411 | lc->program_new_psw.addr = | 497 | lc->program_new_psw.addr = |
412 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 498 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; |
413 | lc->mcck_new_psw.mask = | 499 | lc->mcck_new_psw.mask = |
414 | PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | 500 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; |
415 | lc->mcck_new_psw.addr = | 501 | lc->mcck_new_psw.addr = |
416 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 502 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
417 | lc->io_new_psw.mask = PSW_KERNEL_BITS; | 503 | lc->io_new_psw.mask = psw_kernel_bits; |
418 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 504 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
419 | lc->ipl_device = S390_lowcore.ipl_device; | 505 | lc->ipl_device = S390_lowcore.ipl_device; |
420 | lc->jiffy_timer = -1LL; | 506 | lc->jiffy_timer = -1LL; |
@@ -439,7 +525,7 @@ setup_lowcore(void) | |||
439 | static void __init | 525 | static void __init |
440 | setup_resources(void) | 526 | setup_resources(void) |
441 | { | 527 | { |
442 | struct resource *res; | 528 | struct resource *res, *sub_res; |
443 | int i; | 529 | int i; |
444 | 530 | ||
445 | code_resource.start = (unsigned long) &_text; | 531 | code_resource.start = (unsigned long) &_text; |
@@ -464,8 +550,38 @@ setup_resources(void) | |||
464 | res->start = memory_chunk[i].addr; | 550 | res->start = memory_chunk[i].addr; |
465 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; | 551 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; |
466 | request_resource(&iomem_resource, res); | 552 | request_resource(&iomem_resource, res); |
467 | request_resource(res, &code_resource); | 553 | |
468 | request_resource(res, &data_resource); | 554 | if (code_resource.start >= res->start && |
555 | code_resource.start <= res->end && | ||
556 | code_resource.end > res->end) { | ||
557 | sub_res = alloc_bootmem_low(sizeof(struct resource)); | ||
558 | memcpy(sub_res, &code_resource, | ||
559 | sizeof(struct resource)); | ||
560 | sub_res->end = res->end; | ||
561 | code_resource.start = res->end + 1; | ||
562 | request_resource(res, sub_res); | ||
563 | } | ||
564 | |||
565 | if (code_resource.start >= res->start && | ||
566 | code_resource.start <= res->end && | ||
567 | code_resource.end <= res->end) | ||
568 | request_resource(res, &code_resource); | ||
569 | |||
570 | if (data_resource.start >= res->start && | ||
571 | data_resource.start <= res->end && | ||
572 | data_resource.end > res->end) { | ||
573 | sub_res = alloc_bootmem_low(sizeof(struct resource)); | ||
574 | memcpy(sub_res, &data_resource, | ||
575 | sizeof(struct resource)); | ||
576 | sub_res->end = res->end; | ||
577 | data_resource.start = res->end + 1; | ||
578 | request_resource(res, sub_res); | ||
579 | } | ||
580 | |||
581 | if (data_resource.start >= res->start && | ||
582 | data_resource.start <= res->end && | ||
583 | data_resource.end <= res->end) | ||
584 | request_resource(res, &data_resource); | ||
469 | } | 585 | } |
470 | } | 586 | } |
471 | 587 | ||
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void) | |||
495 | } | 611 | } |
496 | if (!memory_end) | 612 | if (!memory_end) |
497 | memory_end = memory_size; | 613 | memory_end = memory_size; |
498 | if (real_size > memory_end) | ||
499 | printk("More memory detected than supported. Unused: %luk\n", | ||
500 | (real_size - memory_end) >> 10); | ||
501 | } | 614 | } |
502 | 615 | ||
503 | static void __init | 616 | static void __init |
504 | setup_memory(void) | 617 | setup_memory(void) |
505 | { | 618 | { |
506 | unsigned long bootmap_size; | 619 | unsigned long bootmap_size; |
507 | unsigned long start_pfn, end_pfn, init_pfn; | 620 | unsigned long start_pfn, end_pfn; |
508 | int i; | 621 | int i; |
509 | 622 | ||
510 | /* | 623 | /* |
@@ -514,10 +627,6 @@ setup_memory(void) | |||
514 | start_pfn = PFN_UP(__pa(&_end)); | 627 | start_pfn = PFN_UP(__pa(&_end)); |
515 | end_pfn = max_pfn = PFN_DOWN(memory_end); | 628 | end_pfn = max_pfn = PFN_DOWN(memory_end); |
516 | 629 | ||
517 | /* Initialize storage key for kernel pages */ | ||
518 | for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) | ||
519 | page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); | ||
520 | |||
521 | #ifdef CONFIG_BLK_DEV_INITRD | 630 | #ifdef CONFIG_BLK_DEV_INITRD |
522 | /* | 631 | /* |
523 | * Move the initrd in case the bitmap of the bootmem allocater | 632 | * Move the initrd in case the bitmap of the bootmem allocater |
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p) | |||
651 | parse_early_param(); | 760 | parse_early_param(); |
652 | 761 | ||
653 | setup_memory_end(); | 762 | setup_memory_end(); |
763 | setup_addressing_mode(); | ||
654 | setup_memory(); | 764 | setup_memory(); |
655 | setup_resources(); | 765 | setup_resources(); |
656 | setup_lowcore(); | 766 | setup_lowcore(); |
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
694 | struct cpuinfo_S390 *cpuinfo; | 804 | struct cpuinfo_S390 *cpuinfo; |
695 | unsigned long n = (unsigned long) v - 1; | 805 | unsigned long n = (unsigned long) v - 1; |
696 | 806 | ||
807 | s390_adjust_jiffies(); | ||
697 | preempt_disable(); | 808 | preempt_disable(); |
698 | if (!n) { | 809 | if (!n) { |
699 | seq_printf(m, "vendor_id : IBM/S390\n" | 810 | seq_printf(m, "vendor_id : IBM/S390\n" |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 4c8a7954ef48..554f9cf7499c 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
119 | 119 | ||
120 | /* Copy a 'clean' PSW mask to the user to avoid leaking | 120 | /* Copy a 'clean' PSW mask to the user to avoid leaking |
121 | information about whether PER is currently on. */ | 121 | information about whether PER is currently on. */ |
122 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); | 122 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask); |
123 | user_sregs.regs.psw.addr = regs->psw.addr; | 123 | user_sregs.regs.psw.addr = regs->psw.addr; |
124 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); | 124 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); |
125 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, | 125 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c0cd255fddbd..65b52320d145 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -22,23 +22,23 @@ | |||
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | |||
26 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
27 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
28 | #include <linux/kernel_stat.h> | 27 | #include <linux/kernel_stat.h> |
29 | #include <linux/smp_lock.h> | 28 | #include <linux/smp_lock.h> |
30 | |||
31 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
32 | #include <linux/cache.h> | 30 | #include <linux/cache.h> |
33 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
34 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
35 | 33 | #include <linux/timex.h> | |
34 | #include <asm/setup.h> | ||
36 | #include <asm/sigp.h> | 35 | #include <asm/sigp.h> |
37 | #include <asm/pgalloc.h> | 36 | #include <asm/pgalloc.h> |
38 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
39 | #include <asm/s390_ext.h> | 38 | #include <asm/s390_ext.h> |
40 | #include <asm/cpcmd.h> | 39 | #include <asm/cpcmd.h> |
41 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | #include <asm/timer.h> | ||
42 | 42 | ||
43 | extern volatile int __cpu_logical_map[]; | 43 | extern volatile int __cpu_logical_map[]; |
44 | 44 | ||
@@ -53,12 +53,6 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE; | |||
53 | 53 | ||
54 | static struct task_struct *current_set[NR_CPUS]; | 54 | static struct task_struct *current_set[NR_CPUS]; |
55 | 55 | ||
56 | /* | ||
57 | * Reboot, halt and power_off routines for SMP. | ||
58 | */ | ||
59 | extern char vmhalt_cmd[]; | ||
60 | extern char vmpoff_cmd[]; | ||
61 | |||
62 | static void smp_ext_bitcall(int, ec_bit_sig); | 56 | static void smp_ext_bitcall(int, ec_bit_sig); |
63 | static void smp_ext_bitcall_others(ec_bit_sig); | 57 | static void smp_ext_bitcall_others(ec_bit_sig); |
64 | 58 | ||
@@ -200,7 +194,7 @@ int smp_call_function_on(void (*func) (void *info), void *info, | |||
200 | } | 194 | } |
201 | EXPORT_SYMBOL(smp_call_function_on); | 195 | EXPORT_SYMBOL(smp_call_function_on); |
202 | 196 | ||
203 | static inline void do_send_stop(void) | 197 | static void do_send_stop(void) |
204 | { | 198 | { |
205 | int cpu, rc; | 199 | int cpu, rc; |
206 | 200 | ||
@@ -214,7 +208,7 @@ static inline void do_send_stop(void) | |||
214 | } | 208 | } |
215 | } | 209 | } |
216 | 210 | ||
217 | static inline void do_store_status(void) | 211 | static void do_store_status(void) |
218 | { | 212 | { |
219 | int cpu, rc; | 213 | int cpu, rc; |
220 | 214 | ||
@@ -230,7 +224,7 @@ static inline void do_store_status(void) | |||
230 | } | 224 | } |
231 | } | 225 | } |
232 | 226 | ||
233 | static inline void do_wait_for_stop(void) | 227 | static void do_wait_for_stop(void) |
234 | { | 228 | { |
235 | int cpu; | 229 | int cpu; |
236 | 230 | ||
@@ -250,7 +244,7 @@ static inline void do_wait_for_stop(void) | |||
250 | void smp_send_stop(void) | 244 | void smp_send_stop(void) |
251 | { | 245 | { |
252 | /* Disable all interrupts/machine checks */ | 246 | /* Disable all interrupts/machine checks */ |
253 | __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); | 247 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
254 | 248 | ||
255 | /* write magic number to zero page (absolute 0) */ | 249 | /* write magic number to zero page (absolute 0) */ |
256 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | 250 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; |
@@ -298,7 +292,7 @@ void machine_power_off_smp(void) | |||
298 | * cpus are handled. | 292 | * cpus are handled. |
299 | */ | 293 | */ |
300 | 294 | ||
301 | void do_ext_call_interrupt(__u16 code) | 295 | static void do_ext_call_interrupt(__u16 code) |
302 | { | 296 | { |
303 | unsigned long bits; | 297 | unsigned long bits; |
304 | 298 | ||
@@ -385,7 +379,7 @@ struct ec_creg_mask_parms { | |||
385 | /* | 379 | /* |
386 | * callback for setting/clearing control bits | 380 | * callback for setting/clearing control bits |
387 | */ | 381 | */ |
388 | void smp_ctl_bit_callback(void *info) { | 382 | static void smp_ctl_bit_callback(void *info) { |
389 | struct ec_creg_mask_parms *pp = info; | 383 | struct ec_creg_mask_parms *pp = info; |
390 | unsigned long cregs[16]; | 384 | unsigned long cregs[16]; |
391 | int i; | 385 | int i; |
@@ -458,17 +452,15 @@ __init smp_count_cpus(void) | |||
458 | /* | 452 | /* |
459 | * Activate a secondary processor. | 453 | * Activate a secondary processor. |
460 | */ | 454 | */ |
461 | extern void init_cpu_timer(void); | ||
462 | extern void init_cpu_vtimer(void); | ||
463 | |||
464 | int __devinit start_secondary(void *cpuvoid) | 455 | int __devinit start_secondary(void *cpuvoid) |
465 | { | 456 | { |
466 | /* Setup the cpu */ | 457 | /* Setup the cpu */ |
467 | cpu_init(); | 458 | cpu_init(); |
468 | preempt_disable(); | 459 | preempt_disable(); |
469 | /* init per CPU timer */ | 460 | /* Enable TOD clock interrupts on the secondary cpu. */ |
470 | init_cpu_timer(); | 461 | init_cpu_timer(); |
471 | #ifdef CONFIG_VIRT_TIMER | 462 | #ifdef CONFIG_VIRT_TIMER |
463 | /* Enable cpu timer interrupts on the secondary cpu. */ | ||
472 | init_cpu_vtimer(); | 464 | init_cpu_vtimer(); |
473 | #endif | 465 | #endif |
474 | /* Enable pfault pseudo page faults on this cpu. */ | 466 | /* Enable pfault pseudo page faults on this cpu. */ |
@@ -542,7 +534,7 @@ smp_put_cpu(int cpu) | |||
542 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | 534 | spin_unlock_irqrestore(&smp_reserve_lock, flags); |
543 | } | 535 | } |
544 | 536 | ||
545 | static inline int | 537 | static int |
546 | cpu_stopped(int cpu) | 538 | cpu_stopped(int cpu) |
547 | { | 539 | { |
548 | __u32 status; | 540 | __u32 status; |
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 0d14a4789bf2..2e5c65a1863e 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c | |||
@@ -11,11 +11,11 @@ | |||
11 | #include <linux/stacktrace.h> | 11 | #include <linux/stacktrace.h> |
12 | #include <linux/kallsyms.h> | 12 | #include <linux/kallsyms.h> |
13 | 13 | ||
14 | static inline unsigned long save_context_stack(struct stack_trace *trace, | 14 | static unsigned long save_context_stack(struct stack_trace *trace, |
15 | unsigned int *skip, | 15 | unsigned int *skip, |
16 | unsigned long sp, | 16 | unsigned long sp, |
17 | unsigned long low, | 17 | unsigned long low, |
18 | unsigned long high) | 18 | unsigned long high) |
19 | { | 19 | { |
20 | struct stack_frame *sf; | 20 | struct stack_frame *sf; |
21 | struct pt_regs *regs; | 21 | struct pt_regs *regs; |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 6cceed4df73e..3b91f27ab202 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -37,11 +37,15 @@ | |||
37 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/irq_regs.h> | 38 | #include <asm/irq_regs.h> |
39 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
40 | #include <asm/etr.h> | ||
40 | 41 | ||
41 | /* change this if you have some constant time drift */ | 42 | /* change this if you have some constant time drift */ |
42 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 43 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
43 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) | 44 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) |
44 | 45 | ||
46 | /* The value of the TOD clock for 1.1.1970. */ | ||
47 | #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL | ||
48 | |||
45 | /* | 49 | /* |
46 | * Create a small time difference between the timer interrupts | 50 | * Create a small time difference between the timer interrupts |
47 | * on the different cpus to avoid lock contention. | 51 | * on the different cpus to avoid lock contention. |
@@ -51,6 +55,7 @@ | |||
51 | #define TICK_SIZE tick | 55 | #define TICK_SIZE tick |
52 | 56 | ||
53 | static ext_int_info_t ext_int_info_cc; | 57 | static ext_int_info_t ext_int_info_cc; |
58 | static ext_int_info_t ext_int_etr_cc; | ||
54 | static u64 init_timer_cc; | 59 | static u64 init_timer_cc; |
55 | static u64 jiffies_timer_cc; | 60 | static u64 jiffies_timer_cc; |
56 | static u64 xtime_cc; | 61 | static u64 xtime_cc; |
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) | |||
89 | #define s390_do_profile() do { ; } while(0) | 94 | #define s390_do_profile() do { ; } while(0) |
90 | #endif /* CONFIG_PROFILING */ | 95 | #endif /* CONFIG_PROFILING */ |
91 | 96 | ||
92 | |||
93 | /* | 97 | /* |
94 | * timer_interrupt() needs to keep up the real-time clock, | 98 | * Advance the per cpu tick counter up to the time given with the |
95 | * as well as call the "do_timer()" routine every clocktick | 99 | * "time" argument. The per cpu update consists of accounting |
100 | * the virtual cpu time, calling update_process_times and calling | ||
101 | * the profiling hook. If xtime is before time it is advanced as well. | ||
96 | */ | 102 | */ |
97 | void account_ticks(void) | 103 | void account_ticks(u64 time) |
98 | { | 104 | { |
99 | __u64 tmp; | ||
100 | __u32 ticks; | 105 | __u32 ticks; |
106 | __u64 tmp; | ||
101 | 107 | ||
102 | /* Calculate how many ticks have passed. */ | 108 | /* Calculate how many ticks have passed. */ |
103 | if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { | 109 | if (time < S390_lowcore.jiffy_timer) |
104 | /* | ||
105 | * We have to program the clock comparator even if | ||
106 | * no tick has passed. That happens if e.g. an i/o | ||
107 | * interrupt wakes up an idle processor that has | ||
108 | * switched off its hz timer. | ||
109 | */ | ||
110 | tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; | ||
111 | asm volatile ("SCKC %0" : : "m" (tmp)); | ||
112 | return; | 110 | return; |
113 | } | 111 | tmp = time - S390_lowcore.jiffy_timer; |
114 | tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer; | ||
115 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ | 112 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ |
116 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; | 113 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; |
117 | S390_lowcore.jiffy_timer += | 114 | S390_lowcore.jiffy_timer += |
@@ -124,10 +121,6 @@ void account_ticks(void) | |||
124 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; | 121 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; |
125 | } | 122 | } |
126 | 123 | ||
127 | /* set clock comparator for next tick */ | ||
128 | tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; | ||
129 | asm volatile ("SCKC %0" : : "m" (tmp)); | ||
130 | |||
131 | #ifdef CONFIG_SMP | 124 | #ifdef CONFIG_SMP |
132 | /* | 125 | /* |
133 | * Do not rely on the boot cpu to do the calls to do_timer. | 126 | * Do not rely on the boot cpu to do the calls to do_timer. |
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1; | |||
173 | * Stop the HZ tick on the current CPU. | 166 | * Stop the HZ tick on the current CPU. |
174 | * Only cpu_idle may call this function. | 167 | * Only cpu_idle may call this function. |
175 | */ | 168 | */ |
176 | static inline void stop_hz_timer(void) | 169 | static void stop_hz_timer(void) |
177 | { | 170 | { |
178 | unsigned long flags; | 171 | unsigned long flags; |
179 | unsigned long seq, next; | 172 | unsigned long seq, next; |
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void) | |||
210 | if (timer >= jiffies_timer_cc) | 203 | if (timer >= jiffies_timer_cc) |
211 | todval = timer; | 204 | todval = timer; |
212 | } | 205 | } |
213 | asm volatile ("SCKC %0" : : "m" (todval)); | 206 | set_clock_comparator(todval); |
214 | } | 207 | } |
215 | 208 | ||
216 | /* | 209 | /* |
217 | * Start the HZ tick on the current CPU. | 210 | * Start the HZ tick on the current CPU. |
218 | * Only cpu_idle may call this function. | 211 | * Only cpu_idle may call this function. |
219 | */ | 212 | */ |
220 | static inline void start_hz_timer(void) | 213 | static void start_hz_timer(void) |
221 | { | 214 | { |
222 | BUG_ON(!in_interrupt()); | 215 | BUG_ON(!in_interrupt()); |
223 | 216 | ||
224 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | 217 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) |
225 | return; | 218 | return; |
226 | account_ticks(); | 219 | account_ticks(get_clock()); |
220 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
227 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | 221 | cpu_clear(smp_processor_id(), nohz_cpu_mask); |
228 | } | 222 | } |
229 | 223 | ||
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = { | |||
245 | .notifier_call = nohz_idle_notify, | 239 | .notifier_call = nohz_idle_notify, |
246 | }; | 240 | }; |
247 | 241 | ||
248 | void __init nohz_init(void) | 242 | static void __init nohz_init(void) |
249 | { | 243 | { |
250 | if (register_idle_notifier(&nohz_idle_nb)) | 244 | if (register_idle_notifier(&nohz_idle_nb)) |
251 | panic("Couldn't register idle notifier"); | 245 | panic("Couldn't register idle notifier"); |
@@ -254,24 +248,57 @@ void __init nohz_init(void) | |||
254 | #endif | 248 | #endif |
255 | 249 | ||
256 | /* | 250 | /* |
257 | * Start the clock comparator on the current CPU. | 251 | * Set up per cpu jiffy timer and set the clock comparator. |
252 | */ | ||
253 | static void setup_jiffy_timer(void) | ||
254 | { | ||
255 | /* Set up clock comparator to next jiffy. */ | ||
256 | S390_lowcore.jiffy_timer = | ||
257 | jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY; | ||
258 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Set up lowcore and control register of the current cpu to | ||
263 | * enable TOD clock and clock comparator interrupts. | ||
258 | */ | 264 | */ |
259 | void init_cpu_timer(void) | 265 | void init_cpu_timer(void) |
260 | { | 266 | { |
261 | unsigned long cr0; | 267 | setup_jiffy_timer(); |
262 | __u64 timer; | ||
263 | 268 | ||
264 | timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; | 269 | /* Enable clock comparator timer interrupt. */ |
265 | S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; | 270 | __ctl_set_bit(0,11); |
266 | timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; | 271 | |
267 | asm volatile ("SCKC %0" : : "m" (timer)); | 272 | /* Always allow ETR external interrupts, even without an ETR. */ |
268 | /* allow clock comparator timer interrupt */ | 273 | __ctl_set_bit(0, 4); |
269 | __ctl_store(cr0, 0, 0); | ||
270 | cr0 |= 0x800; | ||
271 | __ctl_load(cr0, 0, 0); | ||
272 | } | 274 | } |
273 | 275 | ||
274 | extern void vtime_init(void); | 276 | static void clock_comparator_interrupt(__u16 code) |
277 | { | ||
278 | /* set clock comparator for next tick */ | ||
279 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
280 | } | ||
281 | |||
282 | static void etr_reset(void); | ||
283 | static void etr_init(void); | ||
284 | static void etr_ext_handler(__u16); | ||
285 | |||
286 | /* | ||
287 | * Get the TOD clock running. | ||
288 | */ | ||
289 | static u64 __init reset_tod_clock(void) | ||
290 | { | ||
291 | u64 time; | ||
292 | |||
293 | etr_reset(); | ||
294 | if (store_clock(&time) == 0) | ||
295 | return time; | ||
296 | /* TOD clock not running. Set the clock to Unix Epoch. */ | ||
297 | if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) | ||
298 | panic("TOD clock not operational."); | ||
299 | |||
300 | return TOD_UNIX_EPOCH; | ||
301 | } | ||
275 | 302 | ||
276 | static cycle_t read_tod_clock(void) | 303 | static cycle_t read_tod_clock(void) |
277 | { | 304 | { |
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = { | |||
295 | */ | 322 | */ |
296 | void __init time_init(void) | 323 | void __init time_init(void) |
297 | { | 324 | { |
298 | __u64 set_time_cc; | 325 | init_timer_cc = reset_tod_clock(); |
299 | int cc; | 326 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; |
300 | |||
301 | /* kick the TOD clock */ | ||
302 | asm volatile( | ||
303 | " stck 0(%2)\n" | ||
304 | " ipm %0\n" | ||
305 | " srl %0,28" | ||
306 | : "=d" (cc), "=m" (init_timer_cc) | ||
307 | : "a" (&init_timer_cc) : "cc"); | ||
308 | switch (cc) { | ||
309 | case 0: /* clock in set state: all is fine */ | ||
310 | break; | ||
311 | case 1: /* clock in non-set state: FIXME */ | ||
312 | printk("time_init: TOD clock in non-set state\n"); | ||
313 | break; | ||
314 | case 2: /* clock in error state: FIXME */ | ||
315 | printk("time_init: TOD clock in error state\n"); | ||
316 | break; | ||
317 | case 3: /* clock in stopped or not-operational state: FIXME */ | ||
318 | printk("time_init: TOD clock stopped/non-operational\n"); | ||
319 | break; | ||
320 | } | ||
321 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; | 327 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; |
322 | 328 | ||
323 | /* set xtime */ | 329 | /* set xtime */ |
324 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; | 330 | tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime); |
325 | set_time_cc = init_timer_cc - 0x8126d60e46000000LL + | ||
326 | (0x3c26700LL*1000000*4096); | ||
327 | tod_to_timeval(set_time_cc, &xtime); | ||
328 | set_normalized_timespec(&wall_to_monotonic, | 331 | set_normalized_timespec(&wall_to_monotonic, |
329 | -xtime.tv_sec, -xtime.tv_nsec); | 332 | -xtime.tv_sec, -xtime.tv_nsec); |
330 | 333 | ||
331 | /* request the clock comparator external interrupt */ | 334 | /* request the clock comparator external interrupt */ |
332 | if (register_early_external_interrupt(0x1004, NULL, | 335 | if (register_early_external_interrupt(0x1004, |
336 | clock_comparator_interrupt, | ||
333 | &ext_int_info_cc) != 0) | 337 | &ext_int_info_cc) != 0) |
334 | panic("Couldn't request external interrupt 0x1004"); | 338 | panic("Couldn't request external interrupt 0x1004"); |
335 | 339 | ||
336 | if (clocksource_register(&clocksource_tod) != 0) | 340 | if (clocksource_register(&clocksource_tod) != 0) |
337 | panic("Could not register TOD clock source"); | 341 | panic("Could not register TOD clock source"); |
338 | 342 | ||
339 | init_cpu_timer(); | 343 | /* request the etr external interrupt */ |
344 | if (register_early_external_interrupt(0x1406, etr_ext_handler, | ||
345 | &ext_int_etr_cc) != 0) | ||
346 | panic("Couldn't request external interrupt 0x1406"); | ||
347 | |||
348 | /* Enable TOD clock interrupts on the boot cpu. */ | ||
349 | init_cpu_timer(); | ||
340 | 350 | ||
341 | #ifdef CONFIG_NO_IDLE_HZ | 351 | #ifdef CONFIG_NO_IDLE_HZ |
342 | nohz_init(); | 352 | nohz_init(); |
@@ -345,5 +355,1048 @@ void __init time_init(void) | |||
345 | #ifdef CONFIG_VIRT_TIMER | 355 | #ifdef CONFIG_VIRT_TIMER |
346 | vtime_init(); | 356 | vtime_init(); |
347 | #endif | 357 | #endif |
358 | etr_init(); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * External Time Reference (ETR) code. | ||
363 | */ | ||
364 | static int etr_port0_online; | ||
365 | static int etr_port1_online; | ||
366 | |||
367 | static int __init early_parse_etr(char *p) | ||
368 | { | ||
369 | if (strncmp(p, "off", 3) == 0) | ||
370 | etr_port0_online = etr_port1_online = 0; | ||
371 | else if (strncmp(p, "port0", 5) == 0) | ||
372 | etr_port0_online = 1; | ||
373 | else if (strncmp(p, "port1", 5) == 0) | ||
374 | etr_port1_online = 1; | ||
375 | else if (strncmp(p, "on", 2) == 0) | ||
376 | etr_port0_online = etr_port1_online = 1; | ||
377 | return 0; | ||
378 | } | ||
379 | early_param("etr", early_parse_etr); | ||
380 | |||
381 | enum etr_event { | ||
382 | ETR_EVENT_PORT0_CHANGE, | ||
383 | ETR_EVENT_PORT1_CHANGE, | ||
384 | ETR_EVENT_PORT_ALERT, | ||
385 | ETR_EVENT_SYNC_CHECK, | ||
386 | ETR_EVENT_SWITCH_LOCAL, | ||
387 | ETR_EVENT_UPDATE, | ||
388 | }; | ||
389 | |||
390 | enum etr_flags { | ||
391 | ETR_FLAG_ENOSYS, | ||
392 | ETR_FLAG_EACCES, | ||
393 | ETR_FLAG_STEAI, | ||
394 | }; | ||
395 | |||
396 | /* | ||
397 | * Valid bit combinations of the eacr register are (x = don't care): | ||
398 | * e0 e1 dp p0 p1 ea es sl | ||
399 | * 0 0 x 0 0 0 0 0 initial, disabled state | ||
400 | * 0 0 x 0 1 1 0 0 port 1 online | ||
401 | * 0 0 x 1 0 1 0 0 port 0 online | ||
402 | * 0 0 x 1 1 1 0 0 both ports online | ||
403 | * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode | ||
404 | * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode | ||
405 | * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync | ||
406 | * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync | ||
407 | * 0 1 x 1 1 1 0 0 both ports online, port 1 usable | ||
408 | * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync | ||
409 | * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync | ||
410 | * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode | ||
411 | * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode | ||
412 | * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync | ||
413 | * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync | ||
414 | * 1 0 x 1 1 1 0 0 both ports online, port 0 usable | ||
415 | * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync | ||
416 | * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync | ||
417 | * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync | ||
418 | * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync | ||
419 | */ | ||
420 | static struct etr_eacr etr_eacr; | ||
421 | static u64 etr_tolec; /* time of last eacr update */ | ||
422 | static unsigned long etr_flags; | ||
423 | static struct etr_aib etr_port0; | ||
424 | static int etr_port0_uptodate; | ||
425 | static struct etr_aib etr_port1; | ||
426 | static int etr_port1_uptodate; | ||
427 | static unsigned long etr_events; | ||
428 | static struct timer_list etr_timer; | ||
429 | static struct tasklet_struct etr_tasklet; | ||
430 | static DEFINE_PER_CPU(atomic_t, etr_sync_word); | ||
431 | |||
432 | static void etr_timeout(unsigned long dummy); | ||
433 | static void etr_tasklet_fn(unsigned long dummy); | ||
434 | |||
435 | /* | ||
436 | * The etr get_clock function. It will write the current clock value | ||
437 | * to the clock pointer and return 0 if the clock is in sync with the | ||
438 | * external time source. If the clock mode is local it will return | ||
439 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | ||
440 | * reference. This function is what ETR is all about.. | ||
441 | */ | ||
442 | int get_sync_clock(unsigned long long *clock) | ||
443 | { | ||
444 | atomic_t *sw_ptr; | ||
445 | unsigned int sw0, sw1; | ||
446 | |||
447 | sw_ptr = &get_cpu_var(etr_sync_word); | ||
448 | sw0 = atomic_read(sw_ptr); | ||
449 | *clock = get_clock(); | ||
450 | sw1 = atomic_read(sw_ptr); | ||
451 | put_cpu_var(etr_sync_sync); | ||
452 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | ||
453 | /* Success: time is in sync. */ | ||
454 | return 0; | ||
455 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
456 | return -ENOSYS; | ||
457 | if (test_bit(ETR_FLAG_EACCES, &etr_flags)) | ||
458 | return -EACCES; | ||
459 | return -EAGAIN; | ||
460 | } | ||
461 | EXPORT_SYMBOL(get_sync_clock); | ||
462 | |||
463 | /* | ||
464 | * Make get_sync_clock return -EAGAIN. | ||
465 | */ | ||
466 | static void etr_disable_sync_clock(void *dummy) | ||
467 | { | ||
468 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
469 | /* | ||
470 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | ||
471 | * fail until the sync bit is turned back on. In addition | ||
472 | * increase the "sequence" counter to avoid the race of an | ||
473 | * etr event and the complete recovery against get_sync_clock. | ||
474 | */ | ||
475 | atomic_clear_mask(0x80000000, sw_ptr); | ||
476 | atomic_inc(sw_ptr); | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Make get_sync_clock return 0 again. | ||
481 | * Needs to be called from a context disabled for preemption. | ||
482 | */ | ||
483 | static void etr_enable_sync_clock(void) | ||
484 | { | ||
485 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
486 | atomic_set_mask(0x80000000, sw_ptr); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Reset ETR attachment. | ||
491 | */ | ||
492 | static void etr_reset(void) | ||
493 | { | ||
494 | etr_eacr = (struct etr_eacr) { | ||
495 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, | ||
496 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, | ||
497 | .es = 0, .sl = 0 }; | ||
498 | if (etr_setr(&etr_eacr) == 0) | ||
499 | etr_tolec = get_clock(); | ||
500 | else { | ||
501 | set_bit(ETR_FLAG_ENOSYS, &etr_flags); | ||
502 | if (etr_port0_online || etr_port1_online) { | ||
503 | printk(KERN_WARNING "Running on non ETR capable " | ||
504 | "machine, only local mode available.\n"); | ||
505 | etr_port0_online = etr_port1_online = 0; | ||
506 | } | ||
507 | } | ||
508 | } | ||
509 | |||
510 | static void etr_init(void) | ||
511 | { | ||
512 | struct etr_aib aib; | ||
513 | |||
514 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
515 | return; | ||
516 | /* Check if this machine has the steai instruction. */ | ||
517 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | ||
518 | set_bit(ETR_FLAG_STEAI, &etr_flags); | ||
519 | setup_timer(&etr_timer, etr_timeout, 0UL); | ||
520 | tasklet_init(&etr_tasklet, etr_tasklet_fn, 0); | ||
521 | if (!etr_port0_online && !etr_port1_online) | ||
522 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
523 | if (etr_port0_online) { | ||
524 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | ||
525 | tasklet_hi_schedule(&etr_tasklet); | ||
526 | } | ||
527 | if (etr_port1_online) { | ||
528 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | ||
529 | tasklet_hi_schedule(&etr_tasklet); | ||
530 | } | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * Two sorts of ETR machine checks. The architecture reads: | ||
535 | * "When a machine-check niterruption occurs and if a switch-to-local or | ||
536 | * ETR-sync-check interrupt request is pending but disabled, this pending | ||
537 | * disabled interruption request is indicated and is cleared". | ||
538 | * Which means that we can get etr_switch_to_local events from the machine | ||
539 | * check handler although the interruption condition is disabled. Lovely.. | ||
540 | */ | ||
541 | |||
542 | /* | ||
543 | * Switch to local machine check. This is called when the last usable | ||
544 | * ETR port goes inactive. After switch to local the clock is not in sync. | ||
545 | */ | ||
546 | void etr_switch_to_local(void) | ||
547 | { | ||
548 | if (!etr_eacr.sl) | ||
549 | return; | ||
550 | etr_disable_sync_clock(NULL); | ||
551 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); | ||
552 | tasklet_hi_schedule(&etr_tasklet); | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * ETR sync check machine check. This is called when the ETR OTE and the | ||
557 | * local clock OTE are farther apart than the ETR sync check tolerance. | ||
558 | * After a ETR sync check the clock is not in sync. The machine check | ||
559 | * is broadcasted to all cpus at the same time. | ||
560 | */ | ||
561 | void etr_sync_check(void) | ||
562 | { | ||
563 | if (!etr_eacr.es) | ||
564 | return; | ||
565 | etr_disable_sync_clock(NULL); | ||
566 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); | ||
567 | tasklet_hi_schedule(&etr_tasklet); | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * ETR external interrupt. There are two causes: | ||
572 | * 1) port state change, check the usability of the port | ||
573 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the | ||
574 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) | ||
575 | * or ETR-data word 4 (edf4) has changed. | ||
576 | */ | ||
577 | static void etr_ext_handler(__u16 code) | ||
578 | { | ||
579 | struct etr_interruption_parameter *intparm = | ||
580 | (struct etr_interruption_parameter *) &S390_lowcore.ext_params; | ||
581 | |||
582 | if (intparm->pc0) | ||
583 | /* ETR port 0 state change. */ | ||
584 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | ||
585 | if (intparm->pc1) | ||
586 | /* ETR port 1 state change. */ | ||
587 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | ||
588 | if (intparm->eai) | ||
589 | /* | ||
590 | * ETR port alert on either port 0, 1 or both. | ||
591 | * Both ports are not up-to-date now. | ||
592 | */ | ||
593 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); | ||
594 | tasklet_hi_schedule(&etr_tasklet); | ||
595 | } | ||
596 | |||
597 | static void etr_timeout(unsigned long dummy) | ||
598 | { | ||
599 | set_bit(ETR_EVENT_UPDATE, &etr_events); | ||
600 | tasklet_hi_schedule(&etr_tasklet); | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Check if the etr mode is pss. | ||
605 | */ | ||
606 | static inline int etr_mode_is_pps(struct etr_eacr eacr) | ||
607 | { | ||
608 | return eacr.es && !eacr.sl; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Check if the etr mode is etr. | ||
613 | */ | ||
614 | static inline int etr_mode_is_etr(struct etr_eacr eacr) | ||
615 | { | ||
616 | return eacr.es && eacr.sl; | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * Check if the port can be used for TOD synchronization. | ||
621 | * For PPS mode the port has to receive OTEs. For ETR mode | ||
622 | * the port has to receive OTEs, the ETR stepping bit has to | ||
623 | * be zero and the validity bits for data frame 1, 2, and 3 | ||
624 | * have to be 1. | ||
625 | */ | ||
626 | static int etr_port_valid(struct etr_aib *aib, int port) | ||
627 | { | ||
628 | unsigned int psc; | ||
629 | |||
630 | /* Check that this port is receiving OTEs. */ | ||
631 | if (aib->tsp == 0) | ||
632 | return 0; | ||
633 | |||
634 | psc = port ? aib->esw.psc1 : aib->esw.psc0; | ||
635 | if (psc == etr_lpsc_pps_mode) | ||
636 | return 1; | ||
637 | if (psc == etr_lpsc_operational_step) | ||
638 | return !aib->esw.y && aib->slsw.v1 && | ||
639 | aib->slsw.v2 && aib->slsw.v3; | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * Check if two ports are on the same network. | ||
645 | */ | ||
646 | static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2) | ||
647 | { | ||
648 | // FIXME: any other fields we have to compare? | ||
649 | return aib1->edf1.net_id == aib2->edf1.net_id; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * Wrapper for etr_stei that converts physical port states | ||
654 | * to logical port states to be consistent with the output | ||
655 | * of stetr (see etr_psc vs. etr_lpsc). | ||
656 | */ | ||
657 | static void etr_steai_cv(struct etr_aib *aib, unsigned int func) | ||
658 | { | ||
659 | BUG_ON(etr_steai(aib, func) != 0); | ||
660 | /* Convert port state to logical port state. */ | ||
661 | if (aib->esw.psc0 == 1) | ||
662 | aib->esw.psc0 = 2; | ||
663 | else if (aib->esw.psc0 == 0 && aib->esw.p == 0) | ||
664 | aib->esw.psc0 = 1; | ||
665 | if (aib->esw.psc1 == 1) | ||
666 | aib->esw.psc1 = 2; | ||
667 | else if (aib->esw.psc1 == 0 && aib->esw.p == 1) | ||
668 | aib->esw.psc1 = 1; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * Check if the aib a2 is still connected to the same attachment as | ||
673 | * aib a1, the etv values differ by one and a2 is valid. | ||
674 | */ | ||
675 | static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | ||
676 | { | ||
677 | int state_a1, state_a2; | ||
678 | |||
679 | /* Paranoia check: e0/e1 should better be the same. */ | ||
680 | if (a1->esw.eacr.e0 != a2->esw.eacr.e0 || | ||
681 | a1->esw.eacr.e1 != a2->esw.eacr.e1) | ||
682 | return 0; | ||
683 | |||
684 | /* Still connected to the same etr ? */ | ||
685 | state_a1 = p ? a1->esw.psc1 : a1->esw.psc0; | ||
686 | state_a2 = p ? a2->esw.psc1 : a2->esw.psc0; | ||
687 | if (state_a1 == etr_lpsc_operational_step) { | ||
688 | if (state_a2 != etr_lpsc_operational_step || | ||
689 | a1->edf1.net_id != a2->edf1.net_id || | ||
690 | a1->edf1.etr_id != a2->edf1.etr_id || | ||
691 | a1->edf1.etr_pn != a2->edf1.etr_pn) | ||
692 | return 0; | ||
693 | } else if (state_a2 != etr_lpsc_pps_mode) | ||
694 | return 0; | ||
695 | |||
696 | /* The ETV value of a2 needs to be ETV of a1 + 1. */ | ||
697 | if (a1->edf2.etv + 1 != a2->edf2.etv) | ||
698 | return 0; | ||
699 | |||
700 | if (!etr_port_valid(a2, p)) | ||
701 | return 0; | ||
702 | |||
703 | return 1; | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * The time is "clock". xtime is what we think the time is. | ||
708 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | ||
709 | * "delay" is an approximation how long the synchronization took. If | ||
710 | * the time correction is positive, then "delay" is subtracted from | ||
711 | * the time difference and only the remaining part is passed to ntp. | ||
712 | */ | ||
713 | static void etr_adjust_time(unsigned long long clock, unsigned long long delay) | ||
714 | { | ||
715 | unsigned long long delta, ticks; | ||
716 | struct timex adjust; | ||
717 | |||
718 | /* | ||
719 | * We don't have to take the xtime lock because the cpu | ||
720 | * executing etr_adjust_time is running disabled in | ||
721 | * tasklet context and all other cpus are looping in | ||
722 | * etr_sync_cpu_start. | ||
723 | */ | ||
724 | if (clock > xtime_cc) { | ||
725 | /* It is later than we thought. */ | ||
726 | delta = ticks = clock - xtime_cc; | ||
727 | delta = ticks = (delta < delay) ? 0 : delta - delay; | ||
728 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
729 | init_timer_cc = init_timer_cc + delta; | ||
730 | jiffies_timer_cc = jiffies_timer_cc + delta; | ||
731 | xtime_cc = xtime_cc + delta; | ||
732 | adjust.offset = ticks * (1000000 / HZ); | ||
733 | } else { | ||
734 | /* It is earlier than we thought. */ | ||
735 | delta = ticks = xtime_cc - clock; | ||
736 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
737 | init_timer_cc = init_timer_cc - delta; | ||
738 | jiffies_timer_cc = jiffies_timer_cc - delta; | ||
739 | xtime_cc = xtime_cc - delta; | ||
740 | adjust.offset = -ticks * (1000000 / HZ); | ||
741 | } | ||
742 | if (adjust.offset != 0) { | ||
743 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | ||
744 | adjust.offset); | ||
745 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | ||
746 | do_adjtimex(&adjust); | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static void etr_sync_cpu_start(void *dummy) | ||
751 | { | ||
752 | int *in_sync = dummy; | ||
753 | |||
754 | etr_enable_sync_clock(); | ||
755 | /* | ||
756 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | ||
757 | * is called on all other cpus while the TOD clocks is stopped. | ||
758 | * __udelay will stop the cpu on an enabled wait psw until the | ||
759 | * TOD is running again. | ||
760 | */ | ||
761 | while (*in_sync == 0) | ||
762 | __udelay(1); | ||
763 | if (*in_sync != 1) | ||
764 | /* Didn't work. Clear per-cpu in sync bit again. */ | ||
765 | etr_disable_sync_clock(NULL); | ||
766 | /* | ||
767 | * This round of TOD syncing is done. Set the clock comparator | ||
768 | * to the next tick and let the processor continue. | ||
769 | */ | ||
770 | setup_jiffy_timer(); | ||
771 | } | ||
772 | |||
773 | static void etr_sync_cpu_end(void *dummy) | ||
774 | { | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Sync the TOD clock using the port refered to by aibp. This port | ||
779 | * has to be enabled and the other port has to be disabled. The | ||
780 | * last eacr update has to be more than 1.6 seconds in the past. | ||
781 | */ | ||
782 | static int etr_sync_clock(struct etr_aib *aib, int port) | ||
783 | { | ||
784 | struct etr_aib *sync_port; | ||
785 | unsigned long long clock, delay; | ||
786 | int in_sync, follows; | ||
787 | int rc; | ||
788 | |||
789 | /* Check if the current aib is adjacent to the sync port aib. */ | ||
790 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
791 | follows = etr_aib_follows(sync_port, aib, port); | ||
792 | memcpy(sync_port, aib, sizeof(*aib)); | ||
793 | if (!follows) | ||
794 | return -EAGAIN; | ||
795 | |||
796 | /* | ||
797 | * Catch all other cpus and make them wait until we have | ||
798 | * successfully synced the clock. smp_call_function will | ||
799 | * return after all other cpus are in etr_sync_cpu_start. | ||
800 | */ | ||
801 | in_sync = 0; | ||
802 | preempt_disable(); | ||
803 | smp_call_function(etr_sync_cpu_start,&in_sync,0,0); | ||
804 | local_irq_disable(); | ||
805 | etr_enable_sync_clock(); | ||
806 | |||
807 | /* Set clock to next OTE. */ | ||
808 | __ctl_set_bit(14, 21); | ||
809 | __ctl_set_bit(0, 29); | ||
810 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; | ||
811 | if (set_clock(clock) == 0) { | ||
812 | __udelay(1); /* Wait for the clock to start. */ | ||
813 | __ctl_clear_bit(0, 29); | ||
814 | __ctl_clear_bit(14, 21); | ||
815 | etr_stetr(aib); | ||
816 | /* Adjust Linux timing variables. */ | ||
817 | delay = (unsigned long long) | ||
818 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | ||
819 | etr_adjust_time(clock, delay); | ||
820 | setup_jiffy_timer(); | ||
821 | /* Verify that the clock is properly set. */ | ||
822 | if (!etr_aib_follows(sync_port, aib, port)) { | ||
823 | /* Didn't work. */ | ||
824 | etr_disable_sync_clock(NULL); | ||
825 | in_sync = -EAGAIN; | ||
826 | rc = -EAGAIN; | ||
827 | } else { | ||
828 | in_sync = 1; | ||
829 | rc = 0; | ||
830 | } | ||
831 | } else { | ||
832 | /* Could not set the clock ?!? */ | ||
833 | __ctl_clear_bit(0, 29); | ||
834 | __ctl_clear_bit(14, 21); | ||
835 | etr_disable_sync_clock(NULL); | ||
836 | in_sync = -EAGAIN; | ||
837 | rc = -EAGAIN; | ||
838 | } | ||
839 | local_irq_enable(); | ||
840 | smp_call_function(etr_sync_cpu_end,NULL,0,0); | ||
841 | preempt_enable(); | ||
842 | return rc; | ||
843 | } | ||
844 | |||
845 | /* | ||
846 | * Handle the immediate effects of the different events. | ||
847 | * The port change event is used for online/offline changes. | ||
848 | */ | ||
849 | static struct etr_eacr etr_handle_events(struct etr_eacr eacr) | ||
850 | { | ||
851 | if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) | ||
852 | eacr.es = 0; | ||
853 | if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) | ||
854 | eacr.es = eacr.sl = 0; | ||
855 | if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events)) | ||
856 | etr_port0_uptodate = etr_port1_uptodate = 0; | ||
857 | |||
858 | if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) { | ||
859 | if (eacr.e0) | ||
860 | /* | ||
861 | * Port change of an enabled port. We have to | ||
862 | * assume that this can have caused an stepping | ||
863 | * port switch. | ||
864 | */ | ||
865 | etr_tolec = get_clock(); | ||
866 | eacr.p0 = etr_port0_online; | ||
867 | if (!eacr.p0) | ||
868 | eacr.e0 = 0; | ||
869 | etr_port0_uptodate = 0; | ||
870 | } | ||
871 | if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) { | ||
872 | if (eacr.e1) | ||
873 | /* | ||
874 | * Port change of an enabled port. We have to | ||
875 | * assume that this can have caused an stepping | ||
876 | * port switch. | ||
877 | */ | ||
878 | etr_tolec = get_clock(); | ||
879 | eacr.p1 = etr_port1_online; | ||
880 | if (!eacr.p1) | ||
881 | eacr.e1 = 0; | ||
882 | etr_port1_uptodate = 0; | ||
883 | } | ||
884 | clear_bit(ETR_EVENT_UPDATE, &etr_events); | ||
885 | return eacr; | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * Set up a timer that expires after the etr_tolec + 1.6 seconds if | ||
890 | * one of the ports needs an update. | ||
891 | */ | ||
892 | static void etr_set_tolec_timeout(unsigned long long now) | ||
893 | { | ||
894 | unsigned long micros; | ||
895 | |||
896 | if ((!etr_eacr.p0 || etr_port0_uptodate) && | ||
897 | (!etr_eacr.p1 || etr_port1_uptodate)) | ||
898 | return; | ||
899 | micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0; | ||
900 | micros = (micros > 1600000) ? 0 : 1600000 - micros; | ||
901 | mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1); | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | * Set up a time that expires after 1/2 second. | ||
906 | */ | ||
907 | static void etr_set_sync_timeout(void) | ||
908 | { | ||
909 | mod_timer(&etr_timer, jiffies + HZ/2); | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * Update the aib information for one or both ports. | ||
914 | */ | ||
915 | static struct etr_eacr etr_handle_update(struct etr_aib *aib, | ||
916 | struct etr_eacr eacr) | ||
917 | { | ||
918 | /* With both ports disabled the aib information is useless. */ | ||
919 | if (!eacr.e0 && !eacr.e1) | ||
920 | return eacr; | ||
921 | |||
922 | /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ | ||
923 | if (aib->esw.q == 0) { | ||
924 | /* Information for port 0 stored. */ | ||
925 | if (eacr.p0 && !etr_port0_uptodate) { | ||
926 | etr_port0 = *aib; | ||
927 | if (etr_port0_online) | ||
928 | etr_port0_uptodate = 1; | ||
929 | } | ||
930 | } else { | ||
931 | /* Information for port 1 stored. */ | ||
932 | if (eacr.p1 && !etr_port1_uptodate) { | ||
933 | etr_port1 = *aib; | ||
934 | if (etr_port0_online) | ||
935 | etr_port1_uptodate = 1; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Do not try to get the alternate port aib if the clock | ||
941 | * is not in sync yet. | ||
942 | */ | ||
943 | if (!eacr.es) | ||
944 | return eacr; | ||
945 | |||
946 | /* | ||
947 | * If steai is available we can get the information about | ||
948 | * the other port immediately. If only stetr is available the | ||
949 | * data-port bit toggle has to be used. | ||
950 | */ | ||
951 | if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { | ||
952 | if (eacr.p0 && !etr_port0_uptodate) { | ||
953 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); | ||
954 | etr_port0_uptodate = 1; | ||
955 | } | ||
956 | if (eacr.p1 && !etr_port1_uptodate) { | ||
957 | etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1); | ||
958 | etr_port1_uptodate = 1; | ||
959 | } | ||
960 | } else { | ||
961 | /* | ||
962 | * One port was updated above, if the other | ||
963 | * port is not uptodate toggle dp bit. | ||
964 | */ | ||
965 | if ((eacr.p0 && !etr_port0_uptodate) || | ||
966 | (eacr.p1 && !etr_port1_uptodate)) | ||
967 | eacr.dp ^= 1; | ||
968 | else | ||
969 | eacr.dp = 0; | ||
970 | } | ||
971 | return eacr; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * Write new etr control register if it differs from the current one. | ||
976 | * Return 1 if etr_tolec has been updated as well. | ||
977 | */ | ||
978 | static void etr_update_eacr(struct etr_eacr eacr) | ||
979 | { | ||
980 | int dp_changed; | ||
981 | |||
982 | if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0) | ||
983 | /* No change, return. */ | ||
984 | return; | ||
985 | /* | ||
986 | * The disable of an active port of the change of the data port | ||
987 | * bit can/will cause a change in the data port. | ||
988 | */ | ||
989 | dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 || | ||
990 | (etr_eacr.dp ^ eacr.dp) != 0; | ||
991 | etr_eacr = eacr; | ||
992 | etr_setr(&etr_eacr); | ||
993 | if (dp_changed) | ||
994 | etr_tolec = get_clock(); | ||
995 | } | ||
996 | |||
997 | /* | ||
998 | * ETR tasklet. In this function you'll find the main logic. In | ||
999 | * particular this is the only function that calls etr_update_eacr(), | ||
1000 | * it "controls" the etr control register. | ||
1001 | */ | ||
1002 | static void etr_tasklet_fn(unsigned long dummy) | ||
1003 | { | ||
1004 | unsigned long long now; | ||
1005 | struct etr_eacr eacr; | ||
1006 | struct etr_aib aib; | ||
1007 | int sync_port; | ||
1008 | |||
1009 | /* Create working copy of etr_eacr. */ | ||
1010 | eacr = etr_eacr; | ||
1011 | |||
1012 | /* Check for the different events and their immediate effects. */ | ||
1013 | eacr = etr_handle_events(eacr); | ||
1014 | |||
1015 | /* Check if ETR is supposed to be active. */ | ||
1016 | eacr.ea = eacr.p0 || eacr.p1; | ||
1017 | if (!eacr.ea) { | ||
1018 | /* Both ports offline. Reset everything. */ | ||
1019 | eacr.dp = eacr.es = eacr.sl = 0; | ||
1020 | on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); | ||
1021 | del_timer_sync(&etr_timer); | ||
1022 | etr_update_eacr(eacr); | ||
1023 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1024 | return; | ||
1025 | } | ||
1026 | |||
1027 | /* Store aib to get the current ETR status word. */ | ||
1028 | BUG_ON(etr_stetr(&aib) != 0); | ||
1029 | etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ | ||
1030 | now = get_clock(); | ||
1031 | |||
1032 | /* | ||
1033 | * Update the port information if the last stepping port change | ||
1034 | * or data port change is older than 1.6 seconds. | ||
1035 | */ | ||
1036 | if (now >= etr_tolec + (1600000 << 12)) | ||
1037 | eacr = etr_handle_update(&aib, eacr); | ||
1038 | |||
1039 | /* | ||
1040 | * Select ports to enable. The prefered synchronization mode is PPS. | ||
1041 | * If a port can be enabled depends on a number of things: | ||
1042 | * 1) The port needs to be online and uptodate. A port is not | ||
1043 | * disabled just because it is not uptodate, but it is only | ||
1044 | * enabled if it is uptodate. | ||
1045 | * 2) The port needs to have the same mode (pps / etr). | ||
1046 | * 3) The port needs to be usable -> etr_port_valid() == 1 | ||
1047 | * 4) To enable the second port the clock needs to be in sync. | ||
1048 | * 5) If both ports are useable and are ETR ports, the network id | ||
1049 | * has to be the same. | ||
1050 | * The eacr.sl bit is used to indicate etr mode vs. pps mode. | ||
1051 | */ | ||
1052 | if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) { | ||
1053 | eacr.sl = 0; | ||
1054 | eacr.e0 = 1; | ||
1055 | if (!etr_mode_is_pps(etr_eacr)) | ||
1056 | eacr.es = 0; | ||
1057 | if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode) | ||
1058 | eacr.e1 = 0; | ||
1059 | // FIXME: uptodate checks ? | ||
1060 | else if (etr_port0_uptodate && etr_port1_uptodate) | ||
1061 | eacr.e1 = 1; | ||
1062 | sync_port = (etr_port0_uptodate && | ||
1063 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | ||
1064 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1065 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { | ||
1066 | eacr.sl = 0; | ||
1067 | eacr.e0 = 0; | ||
1068 | eacr.e1 = 1; | ||
1069 | if (!etr_mode_is_pps(etr_eacr)) | ||
1070 | eacr.es = 0; | ||
1071 | sync_port = (etr_port1_uptodate && | ||
1072 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | ||
1073 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1074 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { | ||
1075 | eacr.sl = 1; | ||
1076 | eacr.e0 = 1; | ||
1077 | if (!etr_mode_is_etr(etr_eacr)) | ||
1078 | eacr.es = 0; | ||
1079 | if (!eacr.es || !eacr.p1 || | ||
1080 | aib.esw.psc1 != etr_lpsc_operational_alt) | ||
1081 | eacr.e1 = 0; | ||
1082 | else if (etr_port0_uptodate && etr_port1_uptodate && | ||
1083 | etr_compare_network(&etr_port0, &etr_port1)) | ||
1084 | eacr.e1 = 1; | ||
1085 | sync_port = (etr_port0_uptodate && | ||
1086 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | ||
1087 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1088 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { | ||
1089 | eacr.sl = 1; | ||
1090 | eacr.e0 = 0; | ||
1091 | eacr.e1 = 1; | ||
1092 | if (!etr_mode_is_etr(etr_eacr)) | ||
1093 | eacr.es = 0; | ||
1094 | sync_port = (etr_port1_uptodate && | ||
1095 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | ||
1096 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1097 | } else { | ||
1098 | /* Both ports not usable. */ | ||
1099 | eacr.es = eacr.sl = 0; | ||
1100 | sync_port = -1; | ||
1101 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | * If the clock is in sync just update the eacr and return. | ||
1106 | * If there is no valid sync port wait for a port update. | ||
1107 | */ | ||
1108 | if (eacr.es || sync_port < 0) { | ||
1109 | etr_update_eacr(eacr); | ||
1110 | etr_set_tolec_timeout(now); | ||
1111 | return; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Prepare control register for clock syncing | ||
1116 | * (reset data port bit, set sync check control. | ||
1117 | */ | ||
1118 | eacr.dp = 0; | ||
1119 | eacr.es = 1; | ||
1120 | |||
1121 | /* | ||
1122 | * Update eacr and try to synchronize the clock. If the update | ||
1123 | * of eacr caused a stepping port switch (or if we have to | ||
1124 | * assume that a stepping port switch has occured) or the | ||
1125 | * clock syncing failed, reset the sync check control bit | ||
1126 | * and set up a timer to try again after 0.5 seconds | ||
1127 | */ | ||
1128 | etr_update_eacr(eacr); | ||
1129 | if (now < etr_tolec + (1600000 << 12) || | ||
1130 | etr_sync_clock(&aib, sync_port) != 0) { | ||
1131 | /* Sync failed. Try again in 1/2 second. */ | ||
1132 | eacr.es = 0; | ||
1133 | etr_update_eacr(eacr); | ||
1134 | etr_set_sync_timeout(); | ||
1135 | } else | ||
1136 | etr_set_tolec_timeout(now); | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Sysfs interface functions | ||
1141 | */ | ||
1142 | static struct sysdev_class etr_sysclass = { | ||
1143 | set_kset_name("etr") | ||
1144 | }; | ||
1145 | |||
1146 | static struct sys_device etr_port0_dev = { | ||
1147 | .id = 0, | ||
1148 | .cls = &etr_sysclass, | ||
1149 | }; | ||
1150 | |||
1151 | static struct sys_device etr_port1_dev = { | ||
1152 | .id = 1, | ||
1153 | .cls = &etr_sysclass, | ||
1154 | }; | ||
1155 | |||
1156 | /* | ||
1157 | * ETR class attributes | ||
1158 | */ | ||
1159 | static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf) | ||
1160 | { | ||
1161 | return sprintf(buf, "%i\n", etr_port0.esw.p); | ||
1162 | } | ||
1163 | |||
1164 | static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); | ||
1165 | |||
1166 | static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf) | ||
1167 | { | ||
1168 | char *mode_str; | ||
1169 | |||
1170 | if (etr_mode_is_pps(etr_eacr)) | ||
1171 | mode_str = "pps"; | ||
1172 | else if (etr_mode_is_etr(etr_eacr)) | ||
1173 | mode_str = "etr"; | ||
1174 | else | ||
1175 | mode_str = "local"; | ||
1176 | return sprintf(buf, "%s\n", mode_str); | ||
1177 | } | ||
1178 | |||
1179 | static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL); | ||
1180 | |||
1181 | /* | ||
1182 | * ETR port attributes | ||
1183 | */ | ||
1184 | static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev) | ||
1185 | { | ||
1186 | if (dev == &etr_port0_dev) | ||
1187 | return etr_port0_online ? &etr_port0 : NULL; | ||
1188 | else | ||
1189 | return etr_port1_online ? &etr_port1 : NULL; | ||
1190 | } | ||
1191 | |||
1192 | static ssize_t etr_online_show(struct sys_device *dev, char *buf) | ||
1193 | { | ||
1194 | unsigned int online; | ||
1195 | |||
1196 | online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online; | ||
1197 | return sprintf(buf, "%i\n", online); | ||
1198 | } | ||
1199 | |||
1200 | static ssize_t etr_online_store(struct sys_device *dev, | ||
1201 | const char *buf, size_t count) | ||
1202 | { | ||
1203 | unsigned int value; | ||
1204 | |||
1205 | value = simple_strtoul(buf, NULL, 0); | ||
1206 | if (value != 0 && value != 1) | ||
1207 | return -EINVAL; | ||
1208 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
1209 | return -ENOSYS; | ||
1210 | if (dev == &etr_port0_dev) { | ||
1211 | if (etr_port0_online == value) | ||
1212 | return count; /* Nothing to do. */ | ||
1213 | etr_port0_online = value; | ||
1214 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | ||
1215 | tasklet_hi_schedule(&etr_tasklet); | ||
1216 | } else { | ||
1217 | if (etr_port1_online == value) | ||
1218 | return count; /* Nothing to do. */ | ||
1219 | etr_port1_online = value; | ||
1220 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | ||
1221 | tasklet_hi_schedule(&etr_tasklet); | ||
1222 | } | ||
1223 | return count; | ||
1224 | } | ||
1225 | |||
1226 | static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store); | ||
1227 | |||
1228 | static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf) | ||
1229 | { | ||
1230 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? | ||
1231 | etr_eacr.e0 : etr_eacr.e1); | ||
1232 | } | ||
1233 | |||
1234 | static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); | ||
1235 | |||
1236 | static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf) | ||
1237 | { | ||
1238 | if (!etr_port0_online && !etr_port1_online) | ||
1239 | /* Status word is not uptodate if both ports are offline. */ | ||
1240 | return -ENODATA; | ||
1241 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? | ||
1242 | etr_port0.esw.psc0 : etr_port0.esw.psc1); | ||
1243 | } | ||
1244 | |||
1245 | static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL); | ||
1246 | |||
1247 | static ssize_t etr_untuned_show(struct sys_device *dev, char *buf) | ||
1248 | { | ||
1249 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1250 | |||
1251 | if (!aib || !aib->slsw.v1) | ||
1252 | return -ENODATA; | ||
1253 | return sprintf(buf, "%i\n", aib->edf1.u); | ||
1254 | } | ||
1255 | |||
1256 | static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL); | ||
1257 | |||
1258 | static ssize_t etr_network_id_show(struct sys_device *dev, char *buf) | ||
1259 | { | ||
1260 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1261 | |||
1262 | if (!aib || !aib->slsw.v1) | ||
1263 | return -ENODATA; | ||
1264 | return sprintf(buf, "%i\n", aib->edf1.net_id); | ||
1265 | } | ||
1266 | |||
1267 | static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL); | ||
1268 | |||
1269 | static ssize_t etr_id_show(struct sys_device *dev, char *buf) | ||
1270 | { | ||
1271 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1272 | |||
1273 | if (!aib || !aib->slsw.v1) | ||
1274 | return -ENODATA; | ||
1275 | return sprintf(buf, "%i\n", aib->edf1.etr_id); | ||
1276 | } | ||
1277 | |||
1278 | static SYSDEV_ATTR(id, 0400, etr_id_show, NULL); | ||
1279 | |||
1280 | static ssize_t etr_port_number_show(struct sys_device *dev, char *buf) | ||
1281 | { | ||
1282 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1283 | |||
1284 | if (!aib || !aib->slsw.v1) | ||
1285 | return -ENODATA; | ||
1286 | return sprintf(buf, "%i\n", aib->edf1.etr_pn); | ||
1287 | } | ||
1288 | |||
1289 | static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL); | ||
1290 | |||
1291 | static ssize_t etr_coupled_show(struct sys_device *dev, char *buf) | ||
1292 | { | ||
1293 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1294 | |||
1295 | if (!aib || !aib->slsw.v3) | ||
1296 | return -ENODATA; | ||
1297 | return sprintf(buf, "%i\n", aib->edf3.c); | ||
1298 | } | ||
1299 | |||
1300 | static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL); | ||
1301 | |||
1302 | static ssize_t etr_local_time_show(struct sys_device *dev, char *buf) | ||
1303 | { | ||
1304 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1305 | |||
1306 | if (!aib || !aib->slsw.v3) | ||
1307 | return -ENODATA; | ||
1308 | return sprintf(buf, "%i\n", aib->edf3.blto); | ||
1309 | } | ||
1310 | |||
1311 | static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL); | ||
1312 | |||
1313 | static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf) | ||
1314 | { | ||
1315 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1316 | |||
1317 | if (!aib || !aib->slsw.v3) | ||
1318 | return -ENODATA; | ||
1319 | return sprintf(buf, "%i\n", aib->edf3.buo); | ||
1320 | } | ||
1321 | |||
1322 | static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL); | ||
1323 | |||
1324 | static struct sysdev_attribute *etr_port_attributes[] = { | ||
1325 | &attr_online, | ||
1326 | &attr_stepping_control, | ||
1327 | &attr_state_code, | ||
1328 | &attr_untuned, | ||
1329 | &attr_network, | ||
1330 | &attr_id, | ||
1331 | &attr_port, | ||
1332 | &attr_coupled, | ||
1333 | &attr_local_time, | ||
1334 | &attr_utc_offset, | ||
1335 | NULL | ||
1336 | }; | ||
1337 | |||
1338 | static int __init etr_register_port(struct sys_device *dev) | ||
1339 | { | ||
1340 | struct sysdev_attribute **attr; | ||
1341 | int rc; | ||
1342 | |||
1343 | rc = sysdev_register(dev); | ||
1344 | if (rc) | ||
1345 | goto out; | ||
1346 | for (attr = etr_port_attributes; *attr; attr++) { | ||
1347 | rc = sysdev_create_file(dev, *attr); | ||
1348 | if (rc) | ||
1349 | goto out_unreg; | ||
1350 | } | ||
1351 | return 0; | ||
1352 | out_unreg: | ||
1353 | for (; attr >= etr_port_attributes; attr--) | ||
1354 | sysdev_remove_file(dev, *attr); | ||
1355 | sysdev_unregister(dev); | ||
1356 | out: | ||
1357 | return rc; | ||
1358 | } | ||
1359 | |||
1360 | static void __init etr_unregister_port(struct sys_device *dev) | ||
1361 | { | ||
1362 | struct sysdev_attribute **attr; | ||
1363 | |||
1364 | for (attr = etr_port_attributes; *attr; attr++) | ||
1365 | sysdev_remove_file(dev, *attr); | ||
1366 | sysdev_unregister(dev); | ||
1367 | } | ||
1368 | |||
1369 | static int __init etr_init_sysfs(void) | ||
1370 | { | ||
1371 | int rc; | ||
1372 | |||
1373 | rc = sysdev_class_register(&etr_sysclass); | ||
1374 | if (rc) | ||
1375 | goto out; | ||
1376 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port); | ||
1377 | if (rc) | ||
1378 | goto out_unreg_class; | ||
1379 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode); | ||
1380 | if (rc) | ||
1381 | goto out_remove_stepping_port; | ||
1382 | rc = etr_register_port(&etr_port0_dev); | ||
1383 | if (rc) | ||
1384 | goto out_remove_stepping_mode; | ||
1385 | rc = etr_register_port(&etr_port1_dev); | ||
1386 | if (rc) | ||
1387 | goto out_remove_port0; | ||
1388 | return 0; | ||
1389 | |||
1390 | out_remove_port0: | ||
1391 | etr_unregister_port(&etr_port0_dev); | ||
1392 | out_remove_stepping_mode: | ||
1393 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode); | ||
1394 | out_remove_stepping_port: | ||
1395 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port); | ||
1396 | out_unreg_class: | ||
1397 | sysdev_class_unregister(&etr_sysclass); | ||
1398 | out: | ||
1399 | return rc; | ||
348 | } | 1400 | } |
349 | 1401 | ||
1402 | device_initcall(etr_init_sysfs); | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 3cbb0dcf1f1d..f0e5a320e2ec 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer) | |||
283 | return buffer; | 283 | return buffer; |
284 | } | 284 | } |
285 | 285 | ||
286 | DEFINE_SPINLOCK(die_lock); | 286 | static DEFINE_SPINLOCK(die_lock); |
287 | 287 | ||
288 | void die(const char * str, struct pt_regs * regs, long err) | 288 | void die(const char * str, struct pt_regs * regs, long err) |
289 | { | 289 | { |
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs) | |||
364 | force_sig(SIGTRAP, current); | 364 | force_sig(SIGTRAP, current); |
365 | } | 365 | } |
366 | 366 | ||
367 | asmlinkage void | 367 | static void default_trap_handler(struct pt_regs * regs, long interruption_code) |
368 | default_trap_handler(struct pt_regs * regs, long interruption_code) | ||
369 | { | 368 | { |
370 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 369 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
371 | local_irq_enable(); | 370 | local_irq_enable(); |
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code) | |||
376 | } | 375 | } |
377 | 376 | ||
378 | #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ | 377 | #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ |
379 | asmlinkage void name(struct pt_regs * regs, long interruption_code) \ | 378 | static void name(struct pt_regs * regs, long interruption_code) \ |
380 | { \ | 379 | { \ |
381 | siginfo_t info; \ | 380 | siginfo_t info; \ |
382 | info.si_signo = signr; \ | 381 | info.si_signo = signr; \ |
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location, | |||
442 | "floating point exception", regs, &si); | 441 | "floating point exception", regs, &si); |
443 | } | 442 | } |
444 | 443 | ||
445 | asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | 444 | static void illegal_op(struct pt_regs * regs, long interruption_code) |
446 | { | 445 | { |
447 | siginfo_t info; | 446 | siginfo_t info; |
448 | __u8 opcode[6]; | 447 | __u8 opcode[6]; |
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | |||
491 | #endif | 490 | #endif |
492 | } else | 491 | } else |
493 | signal = SIGILL; | 492 | signal = SIGILL; |
494 | } else | 493 | } else { |
495 | signal = SIGILL; | 494 | /* |
495 | * If we get an illegal op in kernel mode, send it through the | ||
496 | * kprobes notifier. If kprobes doesn't pick it up, SIGILL | ||
497 | */ | ||
498 | if (notify_die(DIE_BPT, "bpt", regs, interruption_code, | ||
499 | 3, SIGTRAP) != NOTIFY_STOP) | ||
500 | signal = SIGILL; | ||
501 | } | ||
496 | 502 | ||
497 | #ifdef CONFIG_MATHEMU | 503 | #ifdef CONFIG_MATHEMU |
498 | if (signal == SIGFPE) | 504 | if (signal == SIGFPE) |
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, | |||
585 | ILL_ILLOPN, get_check_address(regs)); | 591 | ILL_ILLOPN, get_check_address(regs)); |
586 | #endif | 592 | #endif |
587 | 593 | ||
588 | asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) | 594 | static void data_exception(struct pt_regs * regs, long interruption_code) |
589 | { | 595 | { |
590 | __u16 __user *location; | 596 | __u16 __user *location; |
591 | int signal = 0; | 597 | int signal = 0; |
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) | |||
675 | } | 681 | } |
676 | } | 682 | } |
677 | 683 | ||
678 | asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) | 684 | static void space_switch_exception(struct pt_regs * regs, long int_code) |
679 | { | 685 | { |
680 | siginfo_t info; | 686 | siginfo_t info; |
681 | 687 | ||
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index fe0f2e97ba7b..a48907392522 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -31,18 +31,19 @@ SECTIONS | |||
31 | 31 | ||
32 | _etext = .; /* End of text section */ | 32 | _etext = .; /* End of text section */ |
33 | 33 | ||
34 | . = ALIGN(16); /* Exception table */ | ||
35 | __start___ex_table = .; | ||
36 | __ex_table : { *(__ex_table) } | ||
37 | __stop___ex_table = .; | ||
38 | |||
39 | RODATA | 34 | RODATA |
40 | 35 | ||
41 | #ifdef CONFIG_SHARED_KERNEL | 36 | #ifdef CONFIG_SHARED_KERNEL |
42 | . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ | 37 | . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ |
38 | #endif | ||
43 | 39 | ||
40 | . = ALIGN(4096); | ||
44 | _eshared = .; /* End of shareable data */ | 41 | _eshared = .; /* End of shareable data */ |
45 | #endif | 42 | |
43 | . = ALIGN(16); /* Exception table */ | ||
44 | __start___ex_table = .; | ||
45 | __ex_table : { *(__ex_table) } | ||
46 | __stop___ex_table = .; | ||
46 | 47 | ||
47 | .data : { /* Data */ | 48 | .data : { /* Data */ |
48 | *(.data) | 49 | *(.data) |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 21baaf5496d6..9d5b02801b46 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
26 | 26 | ||
27 | static ext_int_info_t ext_int_info_timer; | 27 | static ext_int_info_t ext_int_info_timer; |
28 | DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | 28 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); |
29 | 29 | ||
30 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 30 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
31 | /* | 31 | /* |
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer); | |||
524 | void init_cpu_vtimer(void) | 524 | void init_cpu_vtimer(void) |
525 | { | 525 | { |
526 | struct vtimer_queue *vt_list; | 526 | struct vtimer_queue *vt_list; |
527 | unsigned long cr0; | ||
528 | 527 | ||
529 | /* kick the virtual timer */ | 528 | /* kick the virtual timer */ |
530 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; | 529 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; |
531 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; | 530 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; |
532 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | 531 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); |
533 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); | 532 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); |
534 | __ctl_store(cr0, 0, 0); | 533 | |
535 | cr0 |= 0x400; | 534 | /* enable cpu timer interrupts */ |
536 | __ctl_load(cr0, 0, 0); | 535 | __ctl_set_bit(0,10); |
537 | 536 | ||
538 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | 537 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); |
539 | INIT_LIST_HEAD(&vt_list->list); | 538 | INIT_LIST_HEAD(&vt_list->list); |
@@ -572,6 +571,7 @@ void __init vtime_init(void) | |||
572 | if (register_idle_notifier(&vtimer_idle_nb)) | 571 | if (register_idle_notifier(&vtimer_idle_nb)) |
573 | panic("Couldn't register idle notifier"); | 572 | panic("Couldn't register idle notifier"); |
574 | 573 | ||
574 | /* Enable cpu timer interrupts on the boot cpu. */ | ||
575 | init_cpu_vtimer(); | 575 | init_cpu_vtimer(); |
576 | } | 576 | } |
577 | 577 | ||
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index b5f94cf3bde8..7a44fed21b35 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o | 7 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o |
8 | lib-$(CONFIG_32BIT) += div64.o | 8 | lib-$(CONFIG_32BIT) += div64.o |
9 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o | 9 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o |
10 | lib-$(CONFIG_SMP) += spinlock.o | 10 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 027c4742a001..02854449b74b 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/delay.c | 2 | * arch/s390/lib/delay.c |
3 | * Precise Delay Loops for S390 | 3 | * Precise Delay Loops for S390 |
4 | * | 4 | * |
5 | * S390 version | 5 | * S390 version |
@@ -13,10 +13,8 @@ | |||
13 | 13 | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | 16 | #include <linux/timex.h> | |
17 | #ifdef CONFIG_SMP | 17 | #include <linux/irqflags.h> |
18 | #include <asm/smp.h> | ||
19 | #endif | ||
20 | 18 | ||
21 | void __delay(unsigned long loops) | 19 | void __delay(unsigned long loops) |
22 | { | 20 | { |
@@ -31,17 +29,39 @@ void __delay(unsigned long loops) | |||
31 | } | 29 | } |
32 | 30 | ||
33 | /* | 31 | /* |
34 | * Waits for 'usecs' microseconds using the tod clock, giving up the time slice | 32 | * Waits for 'usecs' microseconds using the TOD clock comparator. |
35 | * of the virtual PU inbetween to avoid congestion. | ||
36 | */ | 33 | */ |
37 | void __udelay(unsigned long usecs) | 34 | void __udelay(unsigned long usecs) |
38 | { | 35 | { |
39 | uint64_t start_cc; | 36 | u64 end, time, jiffy_timer = 0; |
37 | unsigned long flags, cr0, mask, dummy; | ||
38 | |||
39 | local_irq_save(flags); | ||
40 | if (raw_irqs_disabled_flags(flags)) { | ||
41 | jiffy_timer = S390_lowcore.jiffy_timer; | ||
42 | S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); | ||
43 | __ctl_store(cr0, 0, 0); | ||
44 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | ||
45 | __ctl_load(dummy , 0, 0); | ||
46 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | ||
47 | } else | ||
48 | mask = psw_kernel_bits | PSW_MASK_WAIT | | ||
49 | PSW_MASK_EXT | PSW_MASK_IO; | ||
50 | |||
51 | end = get_clock() + ((u64) usecs << 12); | ||
52 | do { | ||
53 | time = end < S390_lowcore.jiffy_timer ? | ||
54 | end : S390_lowcore.jiffy_timer; | ||
55 | set_clock_comparator(time); | ||
56 | trace_hardirqs_on(); | ||
57 | __load_psw_mask(mask); | ||
58 | local_irq_disable(); | ||
59 | } while (get_clock() < end); | ||
40 | 60 | ||
41 | if (usecs == 0) | 61 | if (raw_irqs_disabled_flags(flags)) { |
42 | return; | 62 | __ctl_load(cr0, 0, 0); |
43 | start_cc = get_clock(); | 63 | S390_lowcore.jiffy_timer = jiffy_timer; |
44 | do { | 64 | } |
45 | cpu_relax(); | 65 | set_clock_comparator(S390_lowcore.jiffy_timer); |
46 | } while (((get_clock() - start_cc)/4096) < usecs); | 66 | local_irq_restore(flags); |
47 | } | 67 | } |
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S new file mode 100644 index 000000000000..eb1df632e749 --- /dev/null +++ b/arch/s390/lib/qrnnd.S | |||
@@ -0,0 +1,77 @@ | |||
1 | # S/390 __udiv_qrnnd | ||
2 | |||
3 | # r2 : &__r | ||
4 | # r3 : upper half of 64 bit word n | ||
5 | # r4 : lower half of 64 bit word n | ||
6 | # r5 : divisor d | ||
7 | # the reminder r of the division is to be stored to &__r and | ||
8 | # the quotient q is to be returned | ||
9 | |||
10 | .text | ||
11 | .globl __udiv_qrnnd | ||
12 | __udiv_qrnnd: | ||
13 | st %r2,24(%r15) # store pointer to reminder for later | ||
14 | lr %r0,%r3 # reload n | ||
15 | lr %r1,%r4 | ||
16 | ltr %r2,%r5 # reload and test divisor | ||
17 | jp 5f | ||
18 | # divisor >= 0x80000000 | ||
19 | srdl %r0,2 # n/4 | ||
20 | srl %r2,1 # d/2 | ||
21 | slr %r1,%r2 # special case if last bit of d is set | ||
22 | brc 3,0f # (n/4) div (n/2) can overflow by 1 | ||
23 | ahi %r0,-1 # trick: subtract n/2, then divide | ||
24 | 0: dr %r0,%r2 # signed division | ||
25 | ahi %r1,1 # trick part 2: add 1 to the quotient | ||
26 | # now (n >> 2) = (d >> 1) * %r1 + %r0 | ||
27 | lhi %r3,1 | ||
28 | nr %r3,%r1 # test last bit of q | ||
29 | jz 1f | ||
30 | alr %r0,%r2 # add (d>>1) to r | ||
31 | 1: srl %r1,1 # q >>= 1 | ||
32 | # now (n >> 2) = (d&-2) * %r1 + %r0 | ||
33 | lhi %r3,1 | ||
34 | nr %r3,%r5 # test last bit of d | ||
35 | jz 2f | ||
36 | slr %r0,%r1 # r -= q | ||
37 | brc 3,2f # borrow ? | ||
38 | alr %r0,%r5 # r += d | ||
39 | ahi %r1,-1 | ||
40 | 2: # now (n >> 2) = d * %r1 + %r0 | ||
41 | alr %r1,%r1 # q <<= 1 | ||
42 | alr %r0,%r0 # r <<= 1 | ||
43 | brc 12,3f # overflow on r ? | ||
44 | slr %r0,%r5 # r -= d | ||
45 | ahi %r1,1 # q += 1 | ||
46 | 3: lhi %r3,2 | ||
47 | nr %r3,%r4 # test next to last bit of n | ||
48 | jz 4f | ||
49 | ahi %r0,1 # r += 1 | ||
50 | 4: clr %r0,%r5 # r >= d ? | ||
51 | jl 6f | ||
52 | slr %r0,%r5 # r -= d | ||
53 | ahi %r1,1 # q += 1 | ||
54 | # now (n >> 1) = d * %r1 + %r0 | ||
55 | j 6f | ||
56 | 5: # divisor < 0x80000000 | ||
57 | srdl %r0,1 | ||
58 | dr %r0,%r2 # signed division | ||
59 | # now (n >> 1) = d * %r1 + %r0 | ||
60 | 6: alr %r1,%r1 # q <<= 1 | ||
61 | alr %r0,%r0 # r <<= 1 | ||
62 | brc 12,7f # overflow on r ? | ||
63 | slr %r0,%r5 # r -= d | ||
64 | ahi %r1,1 # q += 1 | ||
65 | 7: lhi %r3,1 | ||
66 | nr %r3,%r4 # isolate last bit of n | ||
67 | alr %r0,%r3 # r += (n & 1) | ||
68 | clr %r0,%r5 # r >= d ? | ||
69 | jl 8f | ||
70 | slr %r0,%r5 # r -= d | ||
71 | ahi %r1,1 # q += 1 | ||
72 | 8: # now n = d * %r1 + %r0 | ||
73 | l %r2,24(%r15) | ||
74 | st %r0,0(%r2) | ||
75 | lr %r2,%r1 | ||
76 | br %r14 | ||
77 | .end __udiv_qrnnd | ||
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h new file mode 100644 index 000000000000..126011df14f1 --- /dev/null +++ b/arch/s390/lib/uaccess.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * arch/s390/uaccess.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef __ARCH_S390_LIB_UACCESS_H | ||
9 | #define __ARCH_S390_LIB_UACCESS_H | ||
10 | |||
11 | extern size_t copy_from_user_std(size_t, const void __user *, void *); | ||
12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | ||
13 | extern size_t strnlen_user_std(size_t, const char __user *); | ||
14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | ||
15 | extern int futex_atomic_cmpxchg_std(int __user *, int, int); | ||
16 | extern int futex_atomic_op_std(int, int __user *, int, int *); | ||
17 | |||
18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); | ||
19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); | ||
20 | extern int futex_atomic_op_pt(int, int __user *, int, int *); | ||
21 | extern int futex_atomic_cmpxchg_pt(int __user *, int, int); | ||
22 | |||
23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ | ||
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index f9a23d57eb79..6d8772339d76 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | #include <asm/futex.h> | 14 | #include <asm/futex.h> |
15 | #include "uaccess.h" | ||
15 | 16 | ||
16 | #ifndef __s390x__ | 17 | #ifndef __s390x__ |
17 | #define AHI "ahi" | 18 | #define AHI "ahi" |
@@ -27,10 +28,7 @@ | |||
27 | #define SLR "slgr" | 28 | #define SLR "slgr" |
28 | #endif | 29 | #endif |
29 | 30 | ||
30 | extern size_t copy_from_user_std(size_t, const void __user *, void *); | 31 | static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) |
31 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | ||
32 | |||
33 | size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) | ||
34 | { | 32 | { |
35 | register unsigned long reg0 asm("0") = 0x81UL; | 33 | register unsigned long reg0 asm("0") = 0x81UL; |
36 | unsigned long tmp1, tmp2; | 34 | unsigned long tmp1, tmp2; |
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) | |||
69 | return size; | 67 | return size; |
70 | } | 68 | } |
71 | 69 | ||
72 | size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) | 70 | static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) |
73 | { | 71 | { |
74 | if (size <= 256) | 72 | if (size <= 256) |
75 | return copy_from_user_std(size, ptr, x); | 73 | return copy_from_user_std(size, ptr, x); |
76 | return copy_from_user_mvcos(size, ptr, x); | 74 | return copy_from_user_mvcos(size, ptr, x); |
77 | } | 75 | } |
78 | 76 | ||
79 | size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) | 77 | static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) |
80 | { | 78 | { |
81 | register unsigned long reg0 asm("0") = 0x810000UL; | 79 | register unsigned long reg0 asm("0") = 0x810000UL; |
82 | unsigned long tmp1, tmp2; | 80 | unsigned long tmp1, tmp2; |
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) | |||
105 | return size; | 103 | return size; |
106 | } | 104 | } |
107 | 105 | ||
108 | size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) | 106 | static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, |
107 | const void *x) | ||
109 | { | 108 | { |
110 | if (size <= 256) | 109 | if (size <= 256) |
111 | return copy_to_user_std(size, ptr, x); | 110 | return copy_to_user_std(size, ptr, x); |
112 | return copy_to_user_mvcos(size, ptr, x); | 111 | return copy_to_user_mvcos(size, ptr, x); |
113 | } | 112 | } |
114 | 113 | ||
115 | size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) | 114 | static size_t copy_in_user_mvcos(size_t size, void __user *to, |
115 | const void __user *from) | ||
116 | { | 116 | { |
117 | register unsigned long reg0 asm("0") = 0x810081UL; | 117 | register unsigned long reg0 asm("0") = 0x810081UL; |
118 | unsigned long tmp1, tmp2; | 118 | unsigned long tmp1, tmp2; |
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) | |||
134 | return size; | 134 | return size; |
135 | } | 135 | } |
136 | 136 | ||
137 | size_t clear_user_mvcos(size_t size, void __user *to) | 137 | static size_t clear_user_mvcos(size_t size, void __user *to) |
138 | { | 138 | { |
139 | register unsigned long reg0 asm("0") = 0x810000UL; | 139 | register unsigned long reg0 asm("0") = 0x810000UL; |
140 | unsigned long tmp1, tmp2; | 140 | unsigned long tmp1, tmp2; |
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to) | |||
162 | return size; | 162 | return size; |
163 | } | 163 | } |
164 | 164 | ||
165 | extern size_t strnlen_user_std(size_t, const char __user *); | 165 | static size_t strnlen_user_mvcos(size_t count, const char __user *src) |
166 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | 166 | { |
167 | extern int futex_atomic_op(int, int __user *, int, int *); | 167 | char buf[256]; |
168 | extern int futex_atomic_cmpxchg(int __user *, int, int); | 168 | int rc; |
169 | size_t done, len, len_str; | ||
170 | |||
171 | done = 0; | ||
172 | do { | ||
173 | len = min(count - done, (size_t) 256); | ||
174 | rc = uaccess.copy_from_user(len, src + done, buf); | ||
175 | if (unlikely(rc == len)) | ||
176 | return 0; | ||
177 | len -= rc; | ||
178 | len_str = strnlen(buf, len); | ||
179 | done += len_str; | ||
180 | } while ((len_str == len) && (done < count)); | ||
181 | return done + 1; | ||
182 | } | ||
183 | |||
184 | static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, | ||
185 | char *dst) | ||
186 | { | ||
187 | int rc; | ||
188 | size_t done, len, len_str; | ||
189 | |||
190 | done = 0; | ||
191 | do { | ||
192 | len = min(count - done, (size_t) 4096); | ||
193 | rc = uaccess.copy_from_user(len, src + done, dst); | ||
194 | if (unlikely(rc == len)) | ||
195 | return -EFAULT; | ||
196 | len -= rc; | ||
197 | len_str = strnlen(dst, len); | ||
198 | done += len_str; | ||
199 | } while ((len_str == len) && (done < count)); | ||
200 | return done; | ||
201 | } | ||
169 | 202 | ||
170 | struct uaccess_ops uaccess_mvcos = { | 203 | struct uaccess_ops uaccess_mvcos = { |
171 | .copy_from_user = copy_from_user_mvcos_check, | 204 | .copy_from_user = copy_from_user_mvcos_check, |
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = { | |||
176 | .clear_user = clear_user_mvcos, | 209 | .clear_user = clear_user_mvcos, |
177 | .strnlen_user = strnlen_user_std, | 210 | .strnlen_user = strnlen_user_std, |
178 | .strncpy_from_user = strncpy_from_user_std, | 211 | .strncpy_from_user = strncpy_from_user_std, |
179 | .futex_atomic_op = futex_atomic_op, | 212 | .futex_atomic_op = futex_atomic_op_std, |
180 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg, | 213 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, |
214 | }; | ||
215 | |||
216 | #ifdef CONFIG_S390_SWITCH_AMODE | ||
217 | struct uaccess_ops uaccess_mvcos_switch = { | ||
218 | .copy_from_user = copy_from_user_mvcos, | ||
219 | .copy_from_user_small = copy_from_user_mvcos, | ||
220 | .copy_to_user = copy_to_user_mvcos, | ||
221 | .copy_to_user_small = copy_to_user_mvcos, | ||
222 | .copy_in_user = copy_in_user_mvcos, | ||
223 | .clear_user = clear_user_mvcos, | ||
224 | .strnlen_user = strnlen_user_mvcos, | ||
225 | .strncpy_from_user = strncpy_from_user_mvcos, | ||
226 | .futex_atomic_op = futex_atomic_op_pt, | ||
227 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, | ||
181 | }; | 228 | }; |
229 | #endif | ||
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 49c3e46b4065..63181671e3e3 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/lib/uaccess_pt.c | 2 | * arch/s390/lib/uaccess_pt.c |
3 | * | 3 | * |
4 | * User access functions based on page table walks. | 4 | * User access functions based on page table walks for enhanced |
5 | * system layout without hardware support. | ||
5 | * | 6 | * |
6 | * Copyright IBM Corp. 2006 | 7 | * Copyright IBM Corp. 2006 |
7 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | 8 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) |
@@ -12,9 +13,10 @@ | |||
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
14 | #include <asm/futex.h> | 15 | #include <asm/futex.h> |
16 | #include "uaccess.h" | ||
15 | 17 | ||
16 | static inline int __handle_fault(struct mm_struct *mm, unsigned long address, | 18 | static int __handle_fault(struct mm_struct *mm, unsigned long address, |
17 | int write_access) | 19 | int write_access) |
18 | { | 20 | { |
19 | struct vm_area_struct *vma; | 21 | struct vm_area_struct *vma; |
20 | int ret = -EFAULT; | 22 | int ret = -EFAULT; |
@@ -79,8 +81,8 @@ out_sigbus: | |||
79 | return ret; | 81 | return ret; |
80 | } | 82 | } |
81 | 83 | ||
82 | static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | 84 | static size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
83 | size_t n, int write_user) | 85 | size_t n, int write_user) |
84 | { | 86 | { |
85 | struct mm_struct *mm = current->mm; | 87 | struct mm_struct *mm = current->mm; |
86 | unsigned long offset, pfn, done, size; | 88 | unsigned long offset, pfn, done, size; |
@@ -133,6 +135,49 @@ fault: | |||
133 | goto retry; | 135 | goto retry; |
134 | } | 136 | } |
135 | 137 | ||
138 | /* | ||
139 | * Do DAT for user address by page table walk, return kernel address. | ||
140 | * This function needs to be called with current->mm->page_table_lock held. | ||
141 | */ | ||
142 | static unsigned long __dat_user_addr(unsigned long uaddr) | ||
143 | { | ||
144 | struct mm_struct *mm = current->mm; | ||
145 | unsigned long pfn, ret; | ||
146 | pgd_t *pgd; | ||
147 | pmd_t *pmd; | ||
148 | pte_t *pte; | ||
149 | int rc; | ||
150 | |||
151 | ret = 0; | ||
152 | retry: | ||
153 | pgd = pgd_offset(mm, uaddr); | ||
154 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
155 | goto fault; | ||
156 | |||
157 | pmd = pmd_offset(pgd, uaddr); | ||
158 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
159 | goto fault; | ||
160 | |||
161 | pte = pte_offset_map(pmd, uaddr); | ||
162 | if (!pte || !pte_present(*pte)) | ||
163 | goto fault; | ||
164 | |||
165 | pfn = pte_pfn(*pte); | ||
166 | if (!pfn_valid(pfn)) | ||
167 | goto out; | ||
168 | |||
169 | ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); | ||
170 | out: | ||
171 | return ret; | ||
172 | fault: | ||
173 | spin_unlock(&mm->page_table_lock); | ||
174 | rc = __handle_fault(mm, uaddr, 0); | ||
175 | spin_lock(&mm->page_table_lock); | ||
176 | if (rc) | ||
177 | goto out; | ||
178 | goto retry; | ||
179 | } | ||
180 | |||
136 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) | 181 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) |
137 | { | 182 | { |
138 | size_t rc; | 183 | size_t rc; |
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | |||
155 | } | 200 | } |
156 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | 201 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); |
157 | } | 202 | } |
203 | |||
204 | static size_t clear_user_pt(size_t n, void __user *to) | ||
205 | { | ||
206 | long done, size, ret; | ||
207 | |||
208 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
209 | memset((void __kernel __force *) to, 0, n); | ||
210 | return 0; | ||
211 | } | ||
212 | done = 0; | ||
213 | do { | ||
214 | if (n - done > PAGE_SIZE) | ||
215 | size = PAGE_SIZE; | ||
216 | else | ||
217 | size = n - done; | ||
218 | ret = __user_copy_pt((unsigned long) to + done, | ||
219 | &empty_zero_page, size, 1); | ||
220 | done += size; | ||
221 | if (ret) | ||
222 | return ret + n - done; | ||
223 | } while (done < n); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static size_t strnlen_user_pt(size_t count, const char __user *src) | ||
228 | { | ||
229 | char *addr; | ||
230 | unsigned long uaddr = (unsigned long) src; | ||
231 | struct mm_struct *mm = current->mm; | ||
232 | unsigned long offset, pfn, done, len; | ||
233 | pgd_t *pgd; | ||
234 | pmd_t *pmd; | ||
235 | pte_t *pte; | ||
236 | size_t len_str; | ||
237 | |||
238 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
239 | return strnlen((const char __kernel __force *) src, count) + 1; | ||
240 | done = 0; | ||
241 | retry: | ||
242 | spin_lock(&mm->page_table_lock); | ||
243 | do { | ||
244 | pgd = pgd_offset(mm, uaddr); | ||
245 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
246 | goto fault; | ||
247 | |||
248 | pmd = pmd_offset(pgd, uaddr); | ||
249 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
250 | goto fault; | ||
251 | |||
252 | pte = pte_offset_map(pmd, uaddr); | ||
253 | if (!pte || !pte_present(*pte)) | ||
254 | goto fault; | ||
255 | |||
256 | pfn = pte_pfn(*pte); | ||
257 | if (!pfn_valid(pfn)) { | ||
258 | done = -1; | ||
259 | goto out; | ||
260 | } | ||
261 | |||
262 | offset = uaddr & (PAGE_SIZE-1); | ||
263 | addr = (char *)(pfn << PAGE_SHIFT) + offset; | ||
264 | len = min(count - done, PAGE_SIZE - offset); | ||
265 | len_str = strnlen(addr, len); | ||
266 | done += len_str; | ||
267 | uaddr += len_str; | ||
268 | } while ((len_str == len) && (done < count)); | ||
269 | out: | ||
270 | spin_unlock(&mm->page_table_lock); | ||
271 | return done + 1; | ||
272 | fault: | ||
273 | spin_unlock(&mm->page_table_lock); | ||
274 | if (__handle_fault(mm, uaddr, 0)) { | ||
275 | return 0; | ||
276 | } | ||
277 | goto retry; | ||
278 | } | ||
279 | |||
280 | static size_t strncpy_from_user_pt(size_t count, const char __user *src, | ||
281 | char *dst) | ||
282 | { | ||
283 | size_t n = strnlen_user_pt(count, src); | ||
284 | |||
285 | if (!n) | ||
286 | return -EFAULT; | ||
287 | if (n > count) | ||
288 | n = count; | ||
289 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
290 | memcpy(dst, (const char __kernel __force *) src, n); | ||
291 | if (dst[n-1] == '\0') | ||
292 | return n-1; | ||
293 | else | ||
294 | return n; | ||
295 | } | ||
296 | if (__user_copy_pt((unsigned long) src, dst, n, 0)) | ||
297 | return -EFAULT; | ||
298 | if (dst[n-1] == '\0') | ||
299 | return n-1; | ||
300 | else | ||
301 | return n; | ||
302 | } | ||
303 | |||
304 | static size_t copy_in_user_pt(size_t n, void __user *to, | ||
305 | const void __user *from) | ||
306 | { | ||
307 | struct mm_struct *mm = current->mm; | ||
308 | unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, | ||
309 | uaddr, done, size; | ||
310 | unsigned long uaddr_from = (unsigned long) from; | ||
311 | unsigned long uaddr_to = (unsigned long) to; | ||
312 | pgd_t *pgd_from, *pgd_to; | ||
313 | pmd_t *pmd_from, *pmd_to; | ||
314 | pte_t *pte_from, *pte_to; | ||
315 | int write_user; | ||
316 | |||
317 | done = 0; | ||
318 | retry: | ||
319 | spin_lock(&mm->page_table_lock); | ||
320 | do { | ||
321 | pgd_from = pgd_offset(mm, uaddr_from); | ||
322 | if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) { | ||
323 | uaddr = uaddr_from; | ||
324 | write_user = 0; | ||
325 | goto fault; | ||
326 | } | ||
327 | pgd_to = pgd_offset(mm, uaddr_to); | ||
328 | if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) { | ||
329 | uaddr = uaddr_to; | ||
330 | write_user = 1; | ||
331 | goto fault; | ||
332 | } | ||
333 | |||
334 | pmd_from = pmd_offset(pgd_from, uaddr_from); | ||
335 | if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) { | ||
336 | uaddr = uaddr_from; | ||
337 | write_user = 0; | ||
338 | goto fault; | ||
339 | } | ||
340 | pmd_to = pmd_offset(pgd_to, uaddr_to); | ||
341 | if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) { | ||
342 | uaddr = uaddr_to; | ||
343 | write_user = 1; | ||
344 | goto fault; | ||
345 | } | ||
346 | |||
347 | pte_from = pte_offset_map(pmd_from, uaddr_from); | ||
348 | if (!pte_from || !pte_present(*pte_from)) { | ||
349 | uaddr = uaddr_from; | ||
350 | write_user = 0; | ||
351 | goto fault; | ||
352 | } | ||
353 | pte_to = pte_offset_map(pmd_to, uaddr_to); | ||
354 | if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { | ||
355 | uaddr = uaddr_to; | ||
356 | write_user = 1; | ||
357 | goto fault; | ||
358 | } | ||
359 | |||
360 | pfn_from = pte_pfn(*pte_from); | ||
361 | if (!pfn_valid(pfn_from)) | ||
362 | goto out; | ||
363 | pfn_to = pte_pfn(*pte_to); | ||
364 | if (!pfn_valid(pfn_to)) | ||
365 | goto out; | ||
366 | |||
367 | offset_from = uaddr_from & (PAGE_SIZE-1); | ||
368 | offset_to = uaddr_from & (PAGE_SIZE-1); | ||
369 | offset_max = max(offset_from, offset_to); | ||
370 | size = min(n - done, PAGE_SIZE - offset_max); | ||
371 | |||
372 | memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, | ||
373 | (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); | ||
374 | done += size; | ||
375 | uaddr_from += size; | ||
376 | uaddr_to += size; | ||
377 | } while (done < n); | ||
378 | out: | ||
379 | spin_unlock(&mm->page_table_lock); | ||
380 | return n - done; | ||
381 | fault: | ||
382 | spin_unlock(&mm->page_table_lock); | ||
383 | if (__handle_fault(mm, uaddr, write_user)) | ||
384 | return n - done; | ||
385 | goto retry; | ||
386 | } | ||
387 | |||
388 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ | ||
389 | asm volatile("0: l %1,0(%6)\n" \ | ||
390 | "1: " insn \ | ||
391 | "2: cs %1,%2,0(%6)\n" \ | ||
392 | "3: jl 1b\n" \ | ||
393 | " lhi %0,0\n" \ | ||
394 | "4:\n" \ | ||
395 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
396 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
397 | "=m" (*uaddr) \ | ||
398 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
399 | "m" (*uaddr) : "cc" ); | ||
400 | |||
401 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | ||
402 | { | ||
403 | int oldval = 0, newval, ret; | ||
404 | |||
405 | spin_lock(¤t->mm->page_table_lock); | ||
406 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
407 | if (!uaddr) { | ||
408 | spin_unlock(¤t->mm->page_table_lock); | ||
409 | return -EFAULT; | ||
410 | } | ||
411 | get_page(virt_to_page(uaddr)); | ||
412 | spin_unlock(¤t->mm->page_table_lock); | ||
413 | switch (op) { | ||
414 | case FUTEX_OP_SET: | ||
415 | __futex_atomic_op("lr %2,%5\n", | ||
416 | ret, oldval, newval, uaddr, oparg); | ||
417 | break; | ||
418 | case FUTEX_OP_ADD: | ||
419 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
420 | ret, oldval, newval, uaddr, oparg); | ||
421 | break; | ||
422 | case FUTEX_OP_OR: | ||
423 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
424 | ret, oldval, newval, uaddr, oparg); | ||
425 | break; | ||
426 | case FUTEX_OP_ANDN: | ||
427 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
428 | ret, oldval, newval, uaddr, oparg); | ||
429 | break; | ||
430 | case FUTEX_OP_XOR: | ||
431 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
432 | ret, oldval, newval, uaddr, oparg); | ||
433 | break; | ||
434 | default: | ||
435 | ret = -ENOSYS; | ||
436 | } | ||
437 | put_page(virt_to_page(uaddr)); | ||
438 | *old = oldval; | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | ||
443 | { | ||
444 | int ret; | ||
445 | |||
446 | spin_lock(¤t->mm->page_table_lock); | ||
447 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
448 | if (!uaddr) { | ||
449 | spin_unlock(¤t->mm->page_table_lock); | ||
450 | return -EFAULT; | ||
451 | } | ||
452 | get_page(virt_to_page(uaddr)); | ||
453 | spin_unlock(¤t->mm->page_table_lock); | ||
454 | asm volatile(" cs %1,%4,0(%5)\n" | ||
455 | "0: lr %0,%1\n" | ||
456 | "1:\n" | ||
457 | EX_TABLE(0b,1b) | ||
458 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
459 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
460 | : "cc", "memory" ); | ||
461 | put_page(virt_to_page(uaddr)); | ||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | struct uaccess_ops uaccess_pt = { | ||
466 | .copy_from_user = copy_from_user_pt, | ||
467 | .copy_from_user_small = copy_from_user_pt, | ||
468 | .copy_to_user = copy_to_user_pt, | ||
469 | .copy_to_user_small = copy_to_user_pt, | ||
470 | .copy_in_user = copy_in_user_pt, | ||
471 | .clear_user = clear_user_pt, | ||
472 | .strnlen_user = strnlen_user_pt, | ||
473 | .strncpy_from_user = strncpy_from_user_pt, | ||
474 | .futex_atomic_op = futex_atomic_op_pt, | ||
475 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, | ||
476 | }; | ||
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index 56a0214e9928..28c4500a58d0 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <asm/futex.h> | 15 | #include <asm/futex.h> |
16 | #include "uaccess.h" | ||
16 | 17 | ||
17 | #ifndef __s390x__ | 18 | #ifndef __s390x__ |
18 | #define AHI "ahi" | 19 | #define AHI "ahi" |
@@ -28,9 +29,6 @@ | |||
28 | #define SLR "slgr" | 29 | #define SLR "slgr" |
29 | #endif | 30 | #endif |
30 | 31 | ||
31 | extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to); | ||
32 | extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from); | ||
33 | |||
34 | size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) | 32 | size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) |
35 | { | 33 | { |
36 | unsigned long tmp1, tmp2; | 34 | unsigned long tmp1, tmp2; |
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) | |||
72 | return size; | 70 | return size; |
73 | } | 71 | } |
74 | 72 | ||
75 | size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) | 73 | static size_t copy_from_user_std_check(size_t size, const void __user *ptr, |
74 | void *x) | ||
76 | { | 75 | { |
77 | if (size <= 1024) | 76 | if (size <= 1024) |
78 | return copy_from_user_std(size, ptr, x); | 77 | return copy_from_user_std(size, ptr, x); |
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) | |||
110 | return size; | 109 | return size; |
111 | } | 110 | } |
112 | 111 | ||
113 | size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) | 112 | static size_t copy_to_user_std_check(size_t size, void __user *ptr, |
113 | const void *x) | ||
114 | { | 114 | { |
115 | if (size <= 1024) | 115 | if (size <= 1024) |
116 | return copy_to_user_std(size, ptr, x); | 116 | return copy_to_user_std(size, ptr, x); |
117 | return copy_to_user_pt(size, ptr, x); | 117 | return copy_to_user_pt(size, ptr, x); |
118 | } | 118 | } |
119 | 119 | ||
120 | size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) | 120 | static size_t copy_in_user_std(size_t size, void __user *to, |
121 | const void __user *from) | ||
121 | { | 122 | { |
122 | unsigned long tmp1; | 123 | unsigned long tmp1; |
123 | 124 | ||
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) | |||
148 | return size; | 149 | return size; |
149 | } | 150 | } |
150 | 151 | ||
151 | size_t clear_user_std(size_t size, void __user *to) | 152 | static size_t clear_user_std(size_t size, void __user *to) |
152 | { | 153 | { |
153 | unsigned long tmp1, tmp2; | 154 | unsigned long tmp1, tmp2; |
154 | 155 | ||
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) | |||
254 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
255 | "m" (*uaddr) : "cc"); | 256 | "m" (*uaddr) : "cc"); |
256 | 257 | ||
257 | int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | 258 | int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) |
258 | { | 259 | { |
259 | int oldval = 0, newval, ret; | 260 | int oldval = 0, newval, ret; |
260 | 261 | ||
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | |||
286 | return ret; | 287 | return ret; |
287 | } | 288 | } |
288 | 289 | ||
289 | int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) | 290 | int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) |
290 | { | 291 | { |
291 | int ret; | 292 | int ret; |
292 | 293 | ||
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = { | |||
311 | .clear_user = clear_user_std, | 312 | .clear_user = clear_user_std, |
312 | .strnlen_user = strnlen_user_std, | 313 | .strnlen_user = strnlen_user_std, |
313 | .strncpy_from_user = strncpy_from_user_std, | 314 | .strncpy_from_user = strncpy_from_user_std, |
314 | .futex_atomic_op = futex_atomic_op, | 315 | .futex_atomic_op = futex_atomic_op_std, |
315 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg, | 316 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, |
316 | }; | 317 | }; |
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile index c10df144f2ab..73b3e72efc46 100644 --- a/arch/s390/math-emu/Makefile +++ b/arch/s390/math-emu/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the FPU instruction emulation. | 2 | # Makefile for the FPU instruction emulation. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_MATHEMU) := math.o qrnnd.o | 5 | obj-$(CONFIG_MATHEMU) := math.o |
6 | 6 | ||
7 | EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w | 7 | EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w |
8 | EXTRA_AFLAGS := -traditional | 8 | EXTRA_AFLAGS := -traditional |
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c index 6b9aec5a2c18..3ee78ccb617d 100644 --- a/arch/s390/math-emu/math.c +++ b/arch/s390/math-emu/math.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/lowcore.h> | 16 | #include <asm/lowcore.h> |
17 | 17 | ||
18 | #include "sfp-util.h" | 18 | #include <asm/sfp-util.h> |
19 | #include <math-emu/soft-fp.h> | 19 | #include <math-emu/soft-fp.h> |
20 | #include <math-emu/single.h> | 20 | #include <math-emu/single.h> |
21 | #include <math-emu/double.h> | 21 | #include <math-emu/double.h> |
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S deleted file mode 100644 index b01c2b648e22..000000000000 --- a/arch/s390/math-emu/qrnnd.S +++ /dev/null | |||
@@ -1,77 +0,0 @@ | |||
1 | # S/390 __udiv_qrnnd | ||
2 | |||
3 | # r2 : &__r | ||
4 | # r3 : upper half of 64 bit word n | ||
5 | # r4 : lower half of 64 bit word n | ||
6 | # r5 : divisor d | ||
7 | # the reminder r of the division is to be stored to &__r and | ||
8 | # the quotient q is to be returned | ||
9 | |||
10 | .text | ||
11 | .globl __udiv_qrnnd | ||
12 | __udiv_qrnnd: | ||
13 | st %r2,24(%r15) # store pointer to reminder for later | ||
14 | lr %r0,%r3 # reload n | ||
15 | lr %r1,%r4 | ||
16 | ltr %r2,%r5 # reload and test divisor | ||
17 | jp 5f | ||
18 | # divisor >= 0x80000000 | ||
19 | srdl %r0,2 # n/4 | ||
20 | srl %r2,1 # d/2 | ||
21 | slr %r1,%r2 # special case if last bit of d is set | ||
22 | brc 3,0f # (n/4) div (n/2) can overflow by 1 | ||
23 | ahi %r0,-1 # trick: subtract n/2, then divide | ||
24 | 0: dr %r0,%r2 # signed division | ||
25 | ahi %r1,1 # trick part 2: add 1 to the quotient | ||
26 | # now (n >> 2) = (d >> 1) * %r1 + %r0 | ||
27 | lhi %r3,1 | ||
28 | nr %r3,%r1 # test last bit of q | ||
29 | jz 1f | ||
30 | alr %r0,%r2 # add (d>>1) to r | ||
31 | 1: srl %r1,1 # q >>= 1 | ||
32 | # now (n >> 2) = (d&-2) * %r1 + %r0 | ||
33 | lhi %r3,1 | ||
34 | nr %r3,%r5 # test last bit of d | ||
35 | jz 2f | ||
36 | slr %r0,%r1 # r -= q | ||
37 | brc 3,2f # borrow ? | ||
38 | alr %r0,%r5 # r += d | ||
39 | ahi %r1,-1 | ||
40 | 2: # now (n >> 2) = d * %r1 + %r0 | ||
41 | alr %r1,%r1 # q <<= 1 | ||
42 | alr %r0,%r0 # r <<= 1 | ||
43 | brc 12,3f # overflow on r ? | ||
44 | slr %r0,%r5 # r -= d | ||
45 | ahi %r1,1 # q += 1 | ||
46 | 3: lhi %r3,2 | ||
47 | nr %r3,%r4 # test next to last bit of n | ||
48 | jz 4f | ||
49 | ahi %r0,1 # r += 1 | ||
50 | 4: clr %r0,%r5 # r >= d ? | ||
51 | jl 6f | ||
52 | slr %r0,%r5 # r -= d | ||
53 | ahi %r1,1 # q += 1 | ||
54 | # now (n >> 1) = d * %r1 + %r0 | ||
55 | j 6f | ||
56 | 5: # divisor < 0x80000000 | ||
57 | srdl %r0,1 | ||
58 | dr %r0,%r2 # signed division | ||
59 | # now (n >> 1) = d * %r1 + %r0 | ||
60 | 6: alr %r1,%r1 # q <<= 1 | ||
61 | alr %r0,%r0 # r <<= 1 | ||
62 | brc 12,7f # overflow on r ? | ||
63 | slr %r0,%r5 # r -= d | ||
64 | ahi %r1,1 # q += 1 | ||
65 | 7: lhi %r3,1 | ||
66 | nr %r3,%r4 # isolate last bit of n | ||
67 | alr %r0,%r3 # r += (n & 1) | ||
68 | clr %r0,%r5 # r >= d ? | ||
69 | jl 8f | ||
70 | slr %r0,%r5 # r -= d | ||
71 | ahi %r1,1 # q += 1 | ||
72 | 8: # now n = d * %r1 + %r0 | ||
73 | l %r2,24(%r15) | ||
74 | st %r0,0(%r2) | ||
75 | lr %r2,%r1 | ||
76 | br %r14 | ||
77 | .end __udiv_qrnnd | ||
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 607f50ead1fd..f93a056869bc 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -245,7 +245,7 @@ cmm_set_timeout(long nr, long seconds) | |||
245 | cmm_set_timer(); | 245 | cmm_set_timer(); |
246 | } | 246 | } |
247 | 247 | ||
248 | static inline int | 248 | static int |
249 | cmm_skip_blanks(char *cp, char **endp) | 249 | cmm_skip_blanks(char *cp, char **endp) |
250 | { | 250 | { |
251 | char *str; | 251 | char *str; |
@@ -414,7 +414,7 @@ cmm_smsg_target(char *from, char *msg) | |||
414 | } | 414 | } |
415 | #endif | 415 | #endif |
416 | 416 | ||
417 | struct ctl_table_header *cmm_sysctl_header; | 417 | static struct ctl_table_header *cmm_sysctl_header; |
418 | 418 | ||
419 | static int | 419 | static int |
420 | cmm_init (void) | 420 | cmm_init (void) |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 775bf19e742b..394980b05e6f 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
17 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
18 | #include <linux/ioport.h> | ||
18 | #include <asm/page.h> | 19 | #include <asm/page.h> |
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
20 | #include <asm/ebcdic.h> | 21 | #include <asm/ebcdic.h> |
@@ -70,6 +71,7 @@ struct qin64 { | |||
70 | struct dcss_segment { | 71 | struct dcss_segment { |
71 | struct list_head list; | 72 | struct list_head list; |
72 | char dcss_name[8]; | 73 | char dcss_name[8]; |
74 | char res_name[15]; | ||
73 | unsigned long start_addr; | 75 | unsigned long start_addr; |
74 | unsigned long end; | 76 | unsigned long end; |
75 | atomic_t ref_count; | 77 | atomic_t ref_count; |
@@ -77,6 +79,7 @@ struct dcss_segment { | |||
77 | unsigned int vm_segtype; | 79 | unsigned int vm_segtype; |
78 | struct qrange range[6]; | 80 | struct qrange range[6]; |
79 | int segcnt; | 81 | int segcnt; |
82 | struct resource *res; | ||
80 | }; | 83 | }; |
81 | 84 | ||
82 | static DEFINE_MUTEX(dcss_lock); | 85 | static DEFINE_MUTEX(dcss_lock); |
@@ -88,7 +91,7 @@ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", | |||
88 | * Create the 8 bytes, ebcdic VM segment name from | 91 | * Create the 8 bytes, ebcdic VM segment name from |
89 | * an ascii name. | 92 | * an ascii name. |
90 | */ | 93 | */ |
91 | static void inline | 94 | static void |
92 | dcss_mkname(char *name, char *dcss_name) | 95 | dcss_mkname(char *name, char *dcss_name) |
93 | { | 96 | { |
94 | int i; | 97 | int i; |
@@ -303,6 +306,29 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
303 | goto out_free; | 306 | goto out_free; |
304 | } | 307 | } |
305 | 308 | ||
309 | seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
310 | if (seg->res == NULL) { | ||
311 | rc = -ENOMEM; | ||
312 | goto out_shared; | ||
313 | } | ||
314 | seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
315 | seg->res->start = seg->start_addr; | ||
316 | seg->res->end = seg->end; | ||
317 | memcpy(&seg->res_name, seg->dcss_name, 8); | ||
318 | EBCASC(seg->res_name, 8); | ||
319 | seg->res_name[8] = '\0'; | ||
320 | strncat(seg->res_name, " (DCSS)", 7); | ||
321 | seg->res->name = seg->res_name; | ||
322 | rc = seg->vm_segtype; | ||
323 | if (rc == SEG_TYPE_SC || | ||
324 | ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) | ||
325 | seg->res->flags |= IORESOURCE_READONLY; | ||
326 | if (request_resource(&iomem_resource, seg->res)) { | ||
327 | rc = -EBUSY; | ||
328 | kfree(seg->res); | ||
329 | goto out_shared; | ||
330 | } | ||
331 | |||
306 | if (do_nonshared) | 332 | if (do_nonshared) |
307 | dcss_command = DCSS_LOADNSR; | 333 | dcss_command = DCSS_LOADNSR; |
308 | else | 334 | else |
@@ -316,12 +342,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
316 | rc = dcss_diag_translate_rc (seg->end); | 342 | rc = dcss_diag_translate_rc (seg->end); |
317 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 343 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, |
318 | &seg->start_addr, &seg->end); | 344 | &seg->start_addr, &seg->end); |
319 | goto out_shared; | 345 | goto out_resource; |
320 | } | 346 | } |
321 | seg->do_nonshared = do_nonshared; | 347 | seg->do_nonshared = do_nonshared; |
322 | atomic_set(&seg->ref_count, 1); | 348 | atomic_set(&seg->ref_count, 1); |
323 | list_add(&seg->list, &dcss_list); | 349 | list_add(&seg->list, &dcss_list); |
324 | rc = seg->vm_segtype; | ||
325 | *addr = seg->start_addr; | 350 | *addr = seg->start_addr; |
326 | *end = seg->end; | 351 | *end = seg->end; |
327 | if (do_nonshared) | 352 | if (do_nonshared) |
@@ -329,12 +354,16 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
329 | "type %s in non-shared mode\n", name, | 354 | "type %s in non-shared mode\n", name, |
330 | (void*)seg->start_addr, (void*)seg->end, | 355 | (void*)seg->start_addr, (void*)seg->end, |
331 | segtype_string[seg->vm_segtype]); | 356 | segtype_string[seg->vm_segtype]); |
332 | else | 357 | else { |
333 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | 358 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " |
334 | "type %s in shared mode\n", name, | 359 | "type %s in shared mode\n", name, |
335 | (void*)seg->start_addr, (void*)seg->end, | 360 | (void*)seg->start_addr, (void*)seg->end, |
336 | segtype_string[seg->vm_segtype]); | 361 | segtype_string[seg->vm_segtype]); |
362 | } | ||
337 | goto out; | 363 | goto out; |
364 | out_resource: | ||
365 | release_resource(seg->res); | ||
366 | kfree(seg->res); | ||
338 | out_shared: | 367 | out_shared: |
339 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 368 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
340 | out_free: | 369 | out_free: |
@@ -401,6 +430,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr, | |||
401 | * -ENOENT : no such segment (segment gone!) | 430 | * -ENOENT : no such segment (segment gone!) |
402 | * -EAGAIN : segment is in use by other exploiters, try later | 431 | * -EAGAIN : segment is in use by other exploiters, try later |
403 | * -EINVAL : no segment with the given name is currently loaded - name invalid | 432 | * -EINVAL : no segment with the given name is currently loaded - name invalid |
433 | * -EBUSY : segment can temporarily not be used (overlaps with dcss) | ||
404 | * 0 : operation succeeded | 434 | * 0 : operation succeeded |
405 | */ | 435 | */ |
406 | int | 436 | int |
@@ -428,12 +458,24 @@ segment_modify_shared (char *name, int do_nonshared) | |||
428 | rc = -EAGAIN; | 458 | rc = -EAGAIN; |
429 | goto out_unlock; | 459 | goto out_unlock; |
430 | } | 460 | } |
431 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 461 | release_resource(seg->res); |
432 | &dummy, &dummy); | 462 | if (do_nonshared) { |
433 | if (do_nonshared) | ||
434 | dcss_command = DCSS_LOADNSR; | 463 | dcss_command = DCSS_LOADNSR; |
435 | else | 464 | seg->res->flags &= ~IORESOURCE_READONLY; |
436 | dcss_command = DCSS_LOADNOLY; | 465 | } else { |
466 | dcss_command = DCSS_LOADNOLY; | ||
467 | if (seg->vm_segtype == SEG_TYPE_SR || | ||
468 | seg->vm_segtype == SEG_TYPE_ER) | ||
469 | seg->res->flags |= IORESOURCE_READONLY; | ||
470 | } | ||
471 | if (request_resource(&iomem_resource, seg->res)) { | ||
472 | PRINT_WARN("segment_modify_shared: could not reload segment %s" | ||
473 | " - overlapping resources\n", name); | ||
474 | rc = -EBUSY; | ||
475 | kfree(seg->res); | ||
476 | goto out_del; | ||
477 | } | ||
478 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | ||
437 | diag_cc = dcss_diag(dcss_command, seg->dcss_name, | 479 | diag_cc = dcss_diag(dcss_command, seg->dcss_name, |
438 | &seg->start_addr, &seg->end); | 480 | &seg->start_addr, &seg->end); |
439 | if (diag_cc > 1) { | 481 | if (diag_cc > 1) { |
@@ -446,9 +488,9 @@ segment_modify_shared (char *name, int do_nonshared) | |||
446 | rc = 0; | 488 | rc = 0; |
447 | goto out_unlock; | 489 | goto out_unlock; |
448 | out_del: | 490 | out_del: |
491 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | ||
449 | list_del(&seg->list); | 492 | list_del(&seg->list); |
450 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 493 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
451 | &dummy, &dummy); | ||
452 | kfree(seg); | 494 | kfree(seg); |
453 | out_unlock: | 495 | out_unlock: |
454 | mutex_unlock(&dcss_lock); | 496 | mutex_unlock(&dcss_lock); |
@@ -478,6 +520,8 @@ segment_unload(char *name) | |||
478 | } | 520 | } |
479 | if (atomic_dec_return(&seg->ref_count) != 0) | 521 | if (atomic_dec_return(&seg->ref_count) != 0) |
480 | goto out_unlock; | 522 | goto out_unlock; |
523 | release_resource(seg->res); | ||
524 | kfree(seg->res); | ||
481 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 525 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
482 | list_del(&seg->list); | 526 | list_del(&seg->list); |
483 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | 527 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index cd85e34d8703..9ff143e87746 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -52,7 +52,7 @@ extern int sysctl_userprocess_debug; | |||
52 | extern void die(const char *,struct pt_regs *,long); | 52 | extern void die(const char *,struct pt_regs *,long); |
53 | 53 | ||
54 | #ifdef CONFIG_KPROBES | 54 | #ifdef CONFIG_KPROBES |
55 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | 55 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
56 | int register_page_fault_notifier(struct notifier_block *nb) | 56 | int register_page_fault_notifier(struct notifier_block *nb) |
57 | { | 57 | { |
58 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | 58 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); |
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code) | |||
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Check which address space the address belongs to. | 139 | * Check which address space the address belongs to. |
140 | * Returns 1 for user space and 0 for kernel space. | 140 | * May return 1 or 2 for user space and 0 for kernel space. |
141 | * Returns 2 for user space in primary addressing mode with | ||
142 | * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on. | ||
141 | */ | 143 | */ |
142 | static inline int check_user_space(struct pt_regs *regs, int error_code) | 144 | static inline int check_user_space(struct pt_regs *regs, int error_code) |
143 | { | 145 | { |
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code) | |||
154 | return __check_access_register(regs, error_code); | 156 | return __check_access_register(regs, error_code); |
155 | if (descriptor == 2) | 157 | if (descriptor == 2) |
156 | return current->thread.mm_segment.ar4; | 158 | return current->thread.mm_segment.ar4; |
157 | return descriptor != 0; | 159 | return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; |
158 | } | 160 | } |
159 | 161 | ||
160 | /* | 162 | /* |
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, | |||
183 | force_sig_info(SIGSEGV, &si, current); | 185 | force_sig_info(SIGSEGV, &si, current); |
184 | } | 186 | } |
185 | 187 | ||
188 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
189 | extern long sys_sigreturn(struct pt_regs *regs); | ||
190 | extern long sys_rt_sigreturn(struct pt_regs *regs); | ||
191 | extern long sys32_sigreturn(struct pt_regs *regs); | ||
192 | extern long sys32_rt_sigreturn(struct pt_regs *regs); | ||
193 | |||
194 | static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, | ||
195 | int rt) | ||
196 | { | ||
197 | up_read(&mm->mmap_sem); | ||
198 | clear_tsk_thread_flag(current, TIF_SINGLE_STEP); | ||
199 | #ifdef CONFIG_COMPAT | ||
200 | if (test_tsk_thread_flag(current, TIF_31BIT)) { | ||
201 | if (rt) | ||
202 | sys32_rt_sigreturn(regs); | ||
203 | else | ||
204 | sys32_sigreturn(regs); | ||
205 | return; | ||
206 | } | ||
207 | #endif /* CONFIG_COMPAT */ | ||
208 | if (rt) | ||
209 | sys_rt_sigreturn(regs); | ||
210 | else | ||
211 | sys_sigreturn(regs); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | static int signal_return(struct mm_struct *mm, struct pt_regs *regs, | ||
216 | unsigned long address, unsigned long error_code) | ||
217 | { | ||
218 | pgd_t *pgd; | ||
219 | pmd_t *pmd; | ||
220 | pte_t *pte; | ||
221 | u16 *instruction; | ||
222 | unsigned long pfn, uaddr = regs->psw.addr; | ||
223 | |||
224 | spin_lock(&mm->page_table_lock); | ||
225 | pgd = pgd_offset(mm, uaddr); | ||
226 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
227 | goto out_fault; | ||
228 | pmd = pmd_offset(pgd, uaddr); | ||
229 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
230 | goto out_fault; | ||
231 | pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr); | ||
232 | if (!pte || !pte_present(*pte)) | ||
233 | goto out_fault; | ||
234 | pfn = pte_pfn(*pte); | ||
235 | if (!pfn_valid(pfn)) | ||
236 | goto out_fault; | ||
237 | spin_unlock(&mm->page_table_lock); | ||
238 | |||
239 | instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1))); | ||
240 | if (*instruction == 0x0a77) | ||
241 | do_sigreturn(mm, regs, 0); | ||
242 | else if (*instruction == 0x0aad) | ||
243 | do_sigreturn(mm, regs, 1); | ||
244 | else { | ||
245 | printk("- XXX - do_exception: task = %s, primary, NO EXEC " | ||
246 | "-> SIGSEGV\n", current->comm); | ||
247 | up_read(&mm->mmap_sem); | ||
248 | current->thread.prot_addr = address; | ||
249 | current->thread.trap_no = error_code; | ||
250 | do_sigsegv(regs, error_code, SEGV_MAPERR, address); | ||
251 | } | ||
252 | return 0; | ||
253 | out_fault: | ||
254 | spin_unlock(&mm->page_table_lock); | ||
255 | return -EFAULT; | ||
256 | } | ||
257 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
258 | |||
186 | /* | 259 | /* |
187 | * This routine handles page faults. It determines the address, | 260 | * This routine handles page faults. It determines the address, |
188 | * and the problem, and then passes it off to one of the appropriate | 261 | * and the problem, and then passes it off to one of the appropriate |
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
260 | vma = find_vma(mm, address); | 333 | vma = find_vma(mm, address); |
261 | if (!vma) | 334 | if (!vma) |
262 | goto bad_area; | 335 | goto bad_area; |
336 | |||
337 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
338 | if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) | ||
339 | if (!signal_return(mm, regs, address, error_code)) | ||
340 | /* | ||
341 | * signal_return() has done an up_read(&mm->mmap_sem) | ||
342 | * if it returns 0. | ||
343 | */ | ||
344 | return; | ||
345 | #endif | ||
346 | |||
263 | if (vma->vm_start <= address) | 347 | if (vma->vm_start <= address) |
264 | goto good_area; | 348 | goto good_area; |
265 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 349 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
@@ -452,8 +536,7 @@ void pfault_fini(void) | |||
452 | : : "a" (&refbk), "m" (refbk) : "cc"); | 536 | : : "a" (&refbk), "m" (refbk) : "cc"); |
453 | } | 537 | } |
454 | 538 | ||
455 | asmlinkage void | 539 | static void pfault_interrupt(__u16 error_code) |
456 | pfault_interrupt(__u16 error_code) | ||
457 | { | 540 | { |
458 | struct task_struct *tsk; | 541 | struct task_struct *tsk; |
459 | __u16 subcode; | 542 | __u16 subcode; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 4bb21be3b007..b3e7c45efb63 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
27 | #include <linux/poison.h> | 27 | #include <linux/poison.h> |
28 | 28 | #include <linux/initrd.h> | |
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/system.h> | 30 | #include <asm/system.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void) | |||
95 | pte_t new_pte; | 95 | pte_t new_pte; |
96 | unsigned long address, end; | 96 | unsigned long address, end; |
97 | 97 | ||
98 | address = ((unsigned long)&__start_rodata) & PAGE_MASK; | 98 | address = ((unsigned long)&_stext) & PAGE_MASK; |
99 | end = PFN_ALIGN((unsigned long)&__end_rodata); | 99 | end = PFN_ALIGN((unsigned long)&_eshared); |
100 | 100 | ||
101 | for (; address < end; address += PAGE_SIZE) { | 101 | for (; address < end; address += PAGE_SIZE) { |
102 | pgd = pgd_offset_k(address); | 102 | pgd = pgd_offset_k(address); |
103 | pmd = pmd_offset(pgd, address); | 103 | pmd = pmd_offset(pgd, address); |
104 | pte = pte_offset_kernel(pmd, address); | 104 | pte = pte_offset_kernel(pmd, address); |
105 | new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); | 105 | new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); |
106 | set_pte(pte, new_pte); | 106 | *pte = new_pte; |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | extern void vmem_map_init(void); | ||
111 | |||
112 | /* | 110 | /* |
113 | * paging_init() sets up the page tables | 111 | * paging_init() sets up the page tables |
114 | */ | 112 | */ |
@@ -125,11 +123,11 @@ void __init paging_init(void) | |||
125 | #ifdef CONFIG_64BIT | 123 | #ifdef CONFIG_64BIT |
126 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; | 124 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; |
127 | for (i = 0; i < PTRS_PER_PGD; i++) | 125 | for (i = 0; i < PTRS_PER_PGD; i++) |
128 | pgd_clear(pg_dir + i); | 126 | pgd_clear_kernel(pg_dir + i); |
129 | #else | 127 | #else |
130 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | 128 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; |
131 | for (i = 0; i < PTRS_PER_PGD; i++) | 129 | for (i = 0; i < PTRS_PER_PGD; i++) |
132 | pmd_clear((pmd_t *)(pg_dir + i)); | 130 | pmd_clear_kernel((pmd_t *)(pg_dir + i)); |
133 | #endif | 131 | #endif |
134 | vmem_map_init(); | 132 | vmem_map_init(); |
135 | setup_ro_region(); | 133 | setup_ro_region(); |
@@ -174,10 +172,8 @@ void __init mem_init(void) | |||
174 | datasize >>10, | 172 | datasize >>10, |
175 | initsize >> 10); | 173 | initsize >> 10); |
176 | printk("Write protected kernel read-only data: %#lx - %#lx\n", | 174 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
177 | (unsigned long)&__start_rodata, | 175 | (unsigned long)&_stext, |
178 | PFN_ALIGN((unsigned long)&__end_rodata) - 1); | 176 | PFN_ALIGN((unsigned long)&_eshared) - 1); |
179 | printk("Virtual memmap size: %ldk\n", | ||
180 | (max_pfn * sizeof(struct page)) >> 10); | ||
181 | } | 177 | } |
182 | 178 | ||
183 | void free_initmem(void) | 179 | void free_initmem(void) |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index cd3d93e8c211..92a565190028 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
82 | if (!pmd) | 82 | if (!pmd) |
83 | return NULL; | 83 | return NULL; |
84 | for (i = 0; i < PTRS_PER_PMD; i++) | 84 | for (i = 0; i < PTRS_PER_PMD; i++) |
85 | pmd_clear(pmd + i); | 85 | pmd_clear_kernel(pmd + i); |
86 | return pmd; | 86 | return pmd; |
87 | } | 87 | } |
88 | 88 | ||
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void) | |||
97 | return NULL; | 97 | return NULL; |
98 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; | 98 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; |
99 | for (i = 0; i < PTRS_PER_PTE; i++) | 99 | for (i = 0; i < PTRS_PER_PTE; i++) |
100 | set_pte(pte + i, empty_pte); | 100 | pte[i] = empty_pte; |
101 | return pte; | 101 | return pte; |
102 | } | 102 | } |
103 | 103 | ||
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size) | |||
119 | pm_dir = vmem_pmd_alloc(); | 119 | pm_dir = vmem_pmd_alloc(); |
120 | if (!pm_dir) | 120 | if (!pm_dir) |
121 | goto out; | 121 | goto out; |
122 | pgd_populate(&init_mm, pg_dir, pm_dir); | 122 | pgd_populate_kernel(&init_mm, pg_dir, pm_dir); |
123 | } | 123 | } |
124 | 124 | ||
125 | pm_dir = pmd_offset(pg_dir, address); | 125 | pm_dir = pmd_offset(pg_dir, address); |
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size) | |||
132 | 132 | ||
133 | pt_dir = pte_offset_kernel(pm_dir, address); | 133 | pt_dir = pte_offset_kernel(pm_dir, address); |
134 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); | 134 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); |
135 | set_pte(pt_dir, pte); | 135 | *pt_dir = pte; |
136 | } | 136 | } |
137 | ret = 0; | 137 | ret = 0; |
138 | out: | 138 | out: |
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
161 | if (pmd_none(*pm_dir)) | 161 | if (pmd_none(*pm_dir)) |
162 | continue; | 162 | continue; |
163 | pt_dir = pte_offset_kernel(pm_dir, address); | 163 | pt_dir = pte_offset_kernel(pm_dir, address); |
164 | set_pte(pt_dir, pte); | 164 | *pt_dir = pte; |
165 | } | 165 | } |
166 | flush_tlb_kernel_range(start, start + size); | 166 | flush_tlb_kernel_range(start, start + size); |
167 | } | 167 | } |
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
191 | pm_dir = vmem_pmd_alloc(); | 191 | pm_dir = vmem_pmd_alloc(); |
192 | if (!pm_dir) | 192 | if (!pm_dir) |
193 | goto out; | 193 | goto out; |
194 | pgd_populate(&init_mm, pg_dir, pm_dir); | 194 | pgd_populate_kernel(&init_mm, pg_dir, pm_dir); |
195 | } | 195 | } |
196 | 196 | ||
197 | pm_dir = pmd_offset(pg_dir, address); | 197 | pm_dir = pmd_offset(pg_dir, address); |
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
210 | if (!new_page) | 210 | if (!new_page) |
211 | goto out; | 211 | goto out; |
212 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | 212 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |
213 | set_pte(pt_dir, pte); | 213 | *pt_dir = pte; |
214 | } | 214 | } |
215 | } | 215 | } |
216 | ret = 0; | 216 | ret = 0; |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 92ba249f3a5b..918b4d845f93 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -74,14 +74,6 @@ config CRYPTO_SHA1 | |||
74 | help | 74 | help |
75 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | 75 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). |
76 | 76 | ||
77 | config CRYPTO_SHA1_S390 | ||
78 | tristate "SHA1 digest algorithm (s390)" | ||
79 | depends on S390 | ||
80 | select CRYPTO_ALGAPI | ||
81 | help | ||
82 | This is the s390 hardware accelerated implementation of the | ||
83 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | ||
84 | |||
85 | config CRYPTO_SHA256 | 77 | config CRYPTO_SHA256 |
86 | tristate "SHA256 digest algorithm" | 78 | tristate "SHA256 digest algorithm" |
87 | select CRYPTO_ALGAPI | 79 | select CRYPTO_ALGAPI |
@@ -91,17 +83,6 @@ config CRYPTO_SHA256 | |||
91 | This version of SHA implements a 256 bit hash with 128 bits of | 83 | This version of SHA implements a 256 bit hash with 128 bits of |
92 | security against collision attacks. | 84 | security against collision attacks. |
93 | 85 | ||
94 | config CRYPTO_SHA256_S390 | ||
95 | tristate "SHA256 digest algorithm (s390)" | ||
96 | depends on S390 | ||
97 | select CRYPTO_ALGAPI | ||
98 | help | ||
99 | This is the s390 hardware accelerated implementation of the | ||
100 | SHA256 secure hash standard (DFIPS 180-2). | ||
101 | |||
102 | This version of SHA implements a 256 bit hash with 128 bits of | ||
103 | security against collision attacks. | ||
104 | |||
105 | config CRYPTO_SHA512 | 86 | config CRYPTO_SHA512 |
106 | tristate "SHA384 and SHA512 digest algorithms" | 87 | tristate "SHA384 and SHA512 digest algorithms" |
107 | select CRYPTO_ALGAPI | 88 | select CRYPTO_ALGAPI |
@@ -187,14 +168,6 @@ config CRYPTO_DES | |||
187 | help | 168 | help |
188 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | 169 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). |
189 | 170 | ||
190 | config CRYPTO_DES_S390 | ||
191 | tristate "DES and Triple DES cipher algorithms (s390)" | ||
192 | depends on S390 | ||
193 | select CRYPTO_ALGAPI | ||
194 | select CRYPTO_BLKCIPHER | ||
195 | help | ||
196 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | ||
197 | |||
198 | config CRYPTO_BLOWFISH | 171 | config CRYPTO_BLOWFISH |
199 | tristate "Blowfish cipher algorithm" | 172 | tristate "Blowfish cipher algorithm" |
200 | select CRYPTO_ALGAPI | 173 | select CRYPTO_ALGAPI |
@@ -336,28 +309,6 @@ config CRYPTO_AES_X86_64 | |||
336 | 309 | ||
337 | See <http://csrc.nist.gov/encryption/aes/> for more information. | 310 | See <http://csrc.nist.gov/encryption/aes/> for more information. |
338 | 311 | ||
339 | config CRYPTO_AES_S390 | ||
340 | tristate "AES cipher algorithms (s390)" | ||
341 | depends on S390 | ||
342 | select CRYPTO_ALGAPI | ||
343 | select CRYPTO_BLKCIPHER | ||
344 | help | ||
345 | This is the s390 hardware accelerated implementation of the | ||
346 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | ||
347 | algorithm. | ||
348 | |||
349 | Rijndael appears to be consistently a very good performer in | ||
350 | both hardware and software across a wide range of computing | ||
351 | environments regardless of its use in feedback or non-feedback | ||
352 | modes. Its key setup time is excellent, and its key agility is | ||
353 | good. Rijndael's very low memory requirements make it very well | ||
354 | suited for restricted-space environments, in which it also | ||
355 | demonstrates excellent performance. Rijndael's operations are | ||
356 | among the easiest to defend against power and timing attacks. | ||
357 | |||
358 | On s390 the System z9-109 currently only supports the key size | ||
359 | of 128 bit. | ||
360 | |||
361 | config CRYPTO_CAST5 | 312 | config CRYPTO_CAST5 |
362 | tristate "CAST5 (CAST-128) cipher algorithm" | 313 | tristate "CAST5 (CAST-128) cipher algorithm" |
363 | select CRYPTO_ALGAPI | 314 | select CRYPTO_ALGAPI |
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 879250d3d069..ff8c4beaace4 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig | |||
@@ -51,6 +51,8 @@ config CRYPTO_DEV_PADLOCK_SHA | |||
51 | If unsure say M. The compiled module will be | 51 | If unsure say M. The compiled module will be |
52 | called padlock-sha.ko | 52 | called padlock-sha.ko |
53 | 53 | ||
54 | source "arch/s390/crypto/Kconfig" | ||
55 | |||
54 | config CRYPTO_DEV_GEODE | 56 | config CRYPTO_DEV_GEODE |
55 | tristate "Support for the Geode LX AES engine" | 57 | tristate "Support for the Geode LX AES engine" |
56 | depends on CRYPTO && X86_32 && PCI | 58 | depends on CRYPTO && X86_32 && PCI |
diff --git a/drivers/s390/Kconfig b/drivers/s390/Kconfig index ae89b9b88743..165af398fdea 100644 --- a/drivers/s390/Kconfig +++ b/drivers/s390/Kconfig | |||
@@ -103,14 +103,8 @@ config CCW_CONSOLE | |||
103 | depends on TN3215_CONSOLE || TN3270_CONSOLE | 103 | depends on TN3215_CONSOLE || TN3270_CONSOLE |
104 | default y | 104 | default y |
105 | 105 | ||
106 | config SCLP | ||
107 | bool "Support for SCLP" | ||
108 | help | ||
109 | Include support for the SCLP interface to the service element. | ||
110 | |||
111 | config SCLP_TTY | 106 | config SCLP_TTY |
112 | bool "Support for SCLP line mode terminal" | 107 | bool "Support for SCLP line mode terminal" |
113 | depends on SCLP | ||
114 | help | 108 | help |
115 | Include support for IBM SCLP line-mode terminals. | 109 | Include support for IBM SCLP line-mode terminals. |
116 | 110 | ||
@@ -123,7 +117,6 @@ config SCLP_CONSOLE | |||
123 | 117 | ||
124 | config SCLP_VT220_TTY | 118 | config SCLP_VT220_TTY |
125 | bool "Support for SCLP VT220-compatible terminal" | 119 | bool "Support for SCLP VT220-compatible terminal" |
126 | depends on SCLP | ||
127 | help | 120 | help |
128 | Include support for an IBM SCLP VT220-compatible terminal. | 121 | Include support for an IBM SCLP VT220-compatible terminal. |
129 | 122 | ||
@@ -136,7 +129,6 @@ config SCLP_VT220_CONSOLE | |||
136 | 129 | ||
137 | config SCLP_CPI | 130 | config SCLP_CPI |
138 | tristate "Control-Program Identification" | 131 | tristate "Control-Program Identification" |
139 | depends on SCLP | ||
140 | help | 132 | help |
141 | This option enables the hardware console interface for system | 133 | This option enables the hardware console interface for system |
142 | identification. This is commonly used for workload management and | 134 | identification. This is commonly used for workload management and |
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 9803c9352d78..5a888704a8d0 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile | |||
@@ -2,6 +2,8 @@ | |||
2 | # Makefile for the S/390 specific device drivers | 2 | # Makefile for the S/390 specific device drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | ||
6 | |||
5 | obj-y += s390mach.o sysinfo.o s390_rdev.o | 7 | obj-y += s390mach.o sysinfo.o s390_rdev.o |
6 | obj-y += cio/ block/ char/ crypto/ net/ scsi/ | 8 | obj-y += cio/ block/ char/ crypto/ net/ scsi/ |
7 | 9 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 492b68bcd7cc..eb5dc62f0d9c 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -37,6 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | debug_info_t *dasd_debug_area; | 38 | debug_info_t *dasd_debug_area; |
39 | struct dasd_discipline *dasd_diag_discipline_pointer; | 39 | struct dasd_discipline *dasd_diag_discipline_pointer; |
40 | void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | ||
40 | 41 | ||
41 | MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); | 42 | MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); |
42 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," | 43 | MODULE_DESCRIPTION("Linux on S/390 DASD device driver," |
@@ -51,7 +52,6 @@ static int dasd_alloc_queue(struct dasd_device * device); | |||
51 | static void dasd_setup_queue(struct dasd_device * device); | 52 | static void dasd_setup_queue(struct dasd_device * device); |
52 | static void dasd_free_queue(struct dasd_device * device); | 53 | static void dasd_free_queue(struct dasd_device * device); |
53 | static void dasd_flush_request_queue(struct dasd_device *); | 54 | static void dasd_flush_request_queue(struct dasd_device *); |
54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | ||
55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); | 55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); |
56 | static void dasd_tasklet(struct dasd_device *); | 56 | static void dasd_tasklet(struct dasd_device *); |
57 | static void do_kick_device(struct work_struct *); | 57 | static void do_kick_device(struct work_struct *); |
@@ -483,7 +483,7 @@ unsigned int dasd_profile_level = DASD_PROFILE_OFF; | |||
483 | /* | 483 | /* |
484 | * Add profiling information for cqr before execution. | 484 | * Add profiling information for cqr before execution. |
485 | */ | 485 | */ |
486 | static inline void | 486 | static void |
487 | dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, | 487 | dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, |
488 | struct request *req) | 488 | struct request *req) |
489 | { | 489 | { |
@@ -505,7 +505,7 @@ dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, | |||
505 | /* | 505 | /* |
506 | * Add profiling information for cqr after execution. | 506 | * Add profiling information for cqr after execution. |
507 | */ | 507 | */ |
508 | static inline void | 508 | static void |
509 | dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, | 509 | dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, |
510 | struct request *req) | 510 | struct request *req) |
511 | { | 511 | { |
@@ -1022,8 +1022,6 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1022 | irb->scsw.cstat == 0 && | 1022 | irb->scsw.cstat == 0 && |
1023 | !irb->esw.esw0.erw.cons) | 1023 | !irb->esw.esw0.erw.cons) |
1024 | era = dasd_era_none; | 1024 | era = dasd_era_none; |
1025 | else if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) | ||
1026 | era = dasd_era_fatal; /* don't recover this request */ | ||
1027 | else if (irb->esw.esw0.erw.cons) | 1025 | else if (irb->esw.esw0.erw.cons) |
1028 | era = device->discipline->examine_error(cqr, irb); | 1026 | era = device->discipline->examine_error(cqr, irb); |
1029 | else | 1027 | else |
@@ -1104,7 +1102,7 @@ __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) | |||
1104 | /* | 1102 | /* |
1105 | * Process ccw request queue. | 1103 | * Process ccw request queue. |
1106 | */ | 1104 | */ |
1107 | static inline void | 1105 | static void |
1108 | __dasd_process_ccw_queue(struct dasd_device * device, | 1106 | __dasd_process_ccw_queue(struct dasd_device * device, |
1109 | struct list_head *final_queue) | 1107 | struct list_head *final_queue) |
1110 | { | 1108 | { |
@@ -1127,7 +1125,9 @@ restart: | |||
1127 | cqr->status = DASD_CQR_FAILED; | 1125 | cqr->status = DASD_CQR_FAILED; |
1128 | cqr->stopclk = get_clock(); | 1126 | cqr->stopclk = get_clock(); |
1129 | } else { | 1127 | } else { |
1130 | if (cqr->irb.esw.esw0.erw.cons) { | 1128 | if (cqr->irb.esw.esw0.erw.cons && |
1129 | test_bit(DASD_CQR_FLAGS_USE_ERP, | ||
1130 | &cqr->flags)) { | ||
1131 | erp_fn = device->discipline-> | 1131 | erp_fn = device->discipline-> |
1132 | erp_action(cqr); | 1132 | erp_action(cqr); |
1133 | erp_fn(cqr); | 1133 | erp_fn(cqr); |
@@ -1181,7 +1181,7 @@ dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) | |||
1181 | /* | 1181 | /* |
1182 | * Fetch requests from the block device queue. | 1182 | * Fetch requests from the block device queue. |
1183 | */ | 1183 | */ |
1184 | static inline void | 1184 | static void |
1185 | __dasd_process_blk_queue(struct dasd_device * device) | 1185 | __dasd_process_blk_queue(struct dasd_device * device) |
1186 | { | 1186 | { |
1187 | request_queue_t *queue; | 1187 | request_queue_t *queue; |
@@ -1232,6 +1232,19 @@ __dasd_process_blk_queue(struct dasd_device * device) | |||
1232 | if (IS_ERR(cqr)) { | 1232 | if (IS_ERR(cqr)) { |
1233 | if (PTR_ERR(cqr) == -ENOMEM) | 1233 | if (PTR_ERR(cqr) == -ENOMEM) |
1234 | break; /* terminate request queue loop */ | 1234 | break; /* terminate request queue loop */ |
1235 | if (PTR_ERR(cqr) == -EAGAIN) { | ||
1236 | /* | ||
1237 | * The current request cannot be build right | ||
1238 | * now, we have to try later. If this request | ||
1239 | * is the head-of-queue we stop the device | ||
1240 | * for 1/2 second. | ||
1241 | */ | ||
1242 | if (!list_empty(&device->ccw_queue)) | ||
1243 | break; | ||
1244 | device->stopped |= DASD_STOPPED_PENDING; | ||
1245 | dasd_set_timer(device, HZ/2); | ||
1246 | break; | ||
1247 | } | ||
1235 | DBF_DEV_EVENT(DBF_ERR, device, | 1248 | DBF_DEV_EVENT(DBF_ERR, device, |
1236 | "CCW creation failed (rc=%ld) " | 1249 | "CCW creation failed (rc=%ld) " |
1237 | "on request %p", | 1250 | "on request %p", |
@@ -1254,7 +1267,7 @@ __dasd_process_blk_queue(struct dasd_device * device) | |||
1254 | * Take a look at the first request on the ccw queue and check | 1267 | * Take a look at the first request on the ccw queue and check |
1255 | * if it reached its expire time. If so, terminate the IO. | 1268 | * if it reached its expire time. If so, terminate the IO. |
1256 | */ | 1269 | */ |
1257 | static inline void | 1270 | static void |
1258 | __dasd_check_expire(struct dasd_device * device) | 1271 | __dasd_check_expire(struct dasd_device * device) |
1259 | { | 1272 | { |
1260 | struct dasd_ccw_req *cqr; | 1273 | struct dasd_ccw_req *cqr; |
@@ -1285,7 +1298,7 @@ __dasd_check_expire(struct dasd_device * device) | |||
1285 | * Take a look at the first request on the ccw queue and check | 1298 | * Take a look at the first request on the ccw queue and check |
1286 | * if it needs to be started. | 1299 | * if it needs to be started. |
1287 | */ | 1300 | */ |
1288 | static inline void | 1301 | static void |
1289 | __dasd_start_head(struct dasd_device * device) | 1302 | __dasd_start_head(struct dasd_device * device) |
1290 | { | 1303 | { |
1291 | struct dasd_ccw_req *cqr; | 1304 | struct dasd_ccw_req *cqr; |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 4d01040c2c63..8b9d68f6e016 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -170,7 +170,6 @@ dasd_3990_erp_examine(struct dasd_ccw_req * cqr, struct irb * irb) | |||
170 | /* log the erp chain if fatal error occurred */ | 170 | /* log the erp chain if fatal error occurred */ |
171 | if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { | 171 | if ((era == dasd_era_fatal) && (device->state >= DASD_STATE_READY)) { |
172 | dasd_log_sense(cqr, irb); | 172 | dasd_log_sense(cqr, irb); |
173 | dasd_log_ccw(cqr, 0, irb->scsw.cpa); | ||
174 | } | 173 | } |
175 | 174 | ||
176 | return era; | 175 | return era; |
@@ -2640,7 +2639,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2640 | 2639 | ||
2641 | struct dasd_ccw_req *erp = NULL; | 2640 | struct dasd_ccw_req *erp = NULL; |
2642 | struct dasd_device *device = cqr->device; | 2641 | struct dasd_device *device = cqr->device; |
2643 | __u32 cpa = cqr->irb.scsw.cpa; | ||
2644 | struct dasd_ccw_req *temp_erp = NULL; | 2642 | struct dasd_ccw_req *temp_erp = NULL; |
2645 | 2643 | ||
2646 | if (device->features & DASD_FEATURE_ERPLOG) { | 2644 | if (device->features & DASD_FEATURE_ERPLOG) { |
@@ -2706,9 +2704,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) | |||
2706 | } | 2704 | } |
2707 | } | 2705 | } |
2708 | 2706 | ||
2709 | if (erp->status == DASD_CQR_FAILED) | ||
2710 | dasd_log_ccw(erp, 1, cpa); | ||
2711 | |||
2712 | /* enqueue added ERP request */ | 2707 | /* enqueue added ERP request */ |
2713 | if (erp->status == DASD_CQR_FILLED) { | 2708 | if (erp->status == DASD_CQR_FILLED) { |
2714 | erp->status = DASD_CQR_QUEUED; | 2709 | erp->status = DASD_CQR_QUEUED; |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 5943266152f5..ed70852cc915 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -136,7 +136,7 @@ __setup ("dasd=", dasd_call_setup); | |||
136 | /* | 136 | /* |
137 | * Read a device busid/devno from a string. | 137 | * Read a device busid/devno from a string. |
138 | */ | 138 | */ |
139 | static inline int | 139 | static int |
140 | dasd_busid(char **str, int *id0, int *id1, int *devno) | 140 | dasd_busid(char **str, int *id0, int *id1, int *devno) |
141 | { | 141 | { |
142 | int val, old_style; | 142 | int val, old_style; |
@@ -182,7 +182,7 @@ dasd_busid(char **str, int *id0, int *id1, int *devno) | |||
182 | * only one: "ro" for read-only devices. The default feature set | 182 | * only one: "ro" for read-only devices. The default feature set |
183 | * is empty (value 0). | 183 | * is empty (value 0). |
184 | */ | 184 | */ |
185 | static inline int | 185 | static int |
186 | dasd_feature_list(char *str, char **endp) | 186 | dasd_feature_list(char *str, char **endp) |
187 | { | 187 | { |
188 | int features, len, rc; | 188 | int features, len, rc; |
@@ -341,7 +341,7 @@ dasd_parse_range( char *parsestring ) { | |||
341 | return ERR_PTR(-EINVAL); | 341 | return ERR_PTR(-EINVAL); |
342 | } | 342 | } |
343 | 343 | ||
344 | static inline char * | 344 | static char * |
345 | dasd_parse_next_element( char *parsestring ) { | 345 | dasd_parse_next_element( char *parsestring ) { |
346 | char * residual_str; | 346 | char * residual_str; |
347 | residual_str = dasd_parse_keyword(parsestring); | 347 | residual_str = dasd_parse_keyword(parsestring); |
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 53db58a68617..ab782bb46ac1 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL"); | |||
43 | #define DIAG_MAX_RETRIES 32 | 43 | #define DIAG_MAX_RETRIES 32 |
44 | #define DIAG_TIMEOUT 50 * HZ | 44 | #define DIAG_TIMEOUT 50 * HZ |
45 | 45 | ||
46 | struct dasd_discipline dasd_diag_discipline; | 46 | static struct dasd_discipline dasd_diag_discipline; |
47 | 47 | ||
48 | struct dasd_diag_private { | 48 | struct dasd_diag_private { |
49 | struct dasd_diag_characteristics rdc_data; | 49 | struct dasd_diag_characteristics rdc_data; |
@@ -90,7 +90,7 @@ static inline int dia250(void *iob, int cmd) | |||
90 | * block offset. On success, return zero and set end_block to contain the | 90 | * block offset. On success, return zero and set end_block to contain the |
91 | * number of blocks on the device minus the specified offset. Return non-zero | 91 | * number of blocks on the device minus the specified offset. Return non-zero |
92 | * otherwise. */ | 92 | * otherwise. */ |
93 | static __inline__ int | 93 | static inline int |
94 | mdsk_init_io(struct dasd_device *device, unsigned int blocksize, | 94 | mdsk_init_io(struct dasd_device *device, unsigned int blocksize, |
95 | blocknum_t offset, blocknum_t *end_block) | 95 | blocknum_t offset, blocknum_t *end_block) |
96 | { | 96 | { |
@@ -117,7 +117,7 @@ mdsk_init_io(struct dasd_device *device, unsigned int blocksize, | |||
117 | 117 | ||
118 | /* Remove block I/O environment for device. Return zero on success, non-zero | 118 | /* Remove block I/O environment for device. Return zero on success, non-zero |
119 | * otherwise. */ | 119 | * otherwise. */ |
120 | static __inline__ int | 120 | static inline int |
121 | mdsk_term_io(struct dasd_device * device) | 121 | mdsk_term_io(struct dasd_device * device) |
122 | { | 122 | { |
123 | struct dasd_diag_private *private; | 123 | struct dasd_diag_private *private; |
@@ -576,7 +576,7 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req, | |||
576 | "dump sense not available for DIAG data"); | 576 | "dump sense not available for DIAG data"); |
577 | } | 577 | } |
578 | 578 | ||
579 | struct dasd_discipline dasd_diag_discipline = { | 579 | static struct dasd_discipline dasd_diag_discipline = { |
580 | .owner = THIS_MODULE, | 580 | .owner = THIS_MODULE, |
581 | .name = "DIAG", | 581 | .name = "DIAG", |
582 | .ebcname = "DIAG", | 582 | .ebcname = "DIAG", |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fdaa471e845f..cecab2274a6e 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -134,44 +134,7 @@ ceil_quot(unsigned int d1, unsigned int d2) | |||
134 | return (d1 + (d2 - 1)) / d2; | 134 | return (d1 + (d2 - 1)) / d2; |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline int | 137 | static unsigned int |
138 | bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl) | ||
139 | { | ||
140 | unsigned int fl1, fl2, int1, int2; | ||
141 | int bpr; | ||
142 | |||
143 | switch (rdc->formula) { | ||
144 | case 0x01: | ||
145 | fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc)); | ||
146 | fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0, | ||
147 | ECKD_F1(rdc)); | ||
148 | bpr = fl1 + fl2; | ||
149 | break; | ||
150 | case 0x02: | ||
151 | int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); | ||
152 | int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1); | ||
153 | fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl + | ||
154 | ECKD_F6(rdc) + ECKD_F4(rdc) * int1, | ||
155 | ECKD_F1(rdc)); | ||
156 | fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl + | ||
157 | ECKD_F6(rdc) + ECKD_F4(rdc) * int2, | ||
158 | ECKD_F1(rdc)); | ||
159 | bpr = fl1 + fl2; | ||
160 | break; | ||
161 | default: | ||
162 | bpr = 0; | ||
163 | break; | ||
164 | } | ||
165 | return bpr; | ||
166 | } | ||
167 | |||
168 | static inline unsigned int | ||
169 | bytes_per_track(struct dasd_eckd_characteristics *rdc) | ||
170 | { | ||
171 | return *(unsigned int *) (rdc->byte_per_track) >> 8; | ||
172 | } | ||
173 | |||
174 | static inline unsigned int | ||
175 | recs_per_track(struct dasd_eckd_characteristics * rdc, | 138 | recs_per_track(struct dasd_eckd_characteristics * rdc, |
176 | unsigned int kl, unsigned int dl) | 139 | unsigned int kl, unsigned int dl) |
177 | { | 140 | { |
@@ -204,37 +167,39 @@ recs_per_track(struct dasd_eckd_characteristics * rdc, | |||
204 | return 0; | 167 | return 0; |
205 | } | 168 | } |
206 | 169 | ||
207 | static inline void | 170 | static int |
208 | check_XRC (struct ccw1 *de_ccw, | 171 | check_XRC (struct ccw1 *de_ccw, |
209 | struct DE_eckd_data *data, | 172 | struct DE_eckd_data *data, |
210 | struct dasd_device *device) | 173 | struct dasd_device *device) |
211 | { | 174 | { |
212 | struct dasd_eckd_private *private; | 175 | struct dasd_eckd_private *private; |
176 | int rc; | ||
213 | 177 | ||
214 | private = (struct dasd_eckd_private *) device->private; | 178 | private = (struct dasd_eckd_private *) device->private; |
179 | if (!private->rdc_data.facilities.XRC_supported) | ||
180 | return 0; | ||
215 | 181 | ||
216 | /* switch on System Time Stamp - needed for XRC Support */ | 182 | /* switch on System Time Stamp - needed for XRC Support */ |
217 | if (private->rdc_data.facilities.XRC_supported) { | 183 | data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ |
218 | 184 | data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ | |
219 | data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ | ||
220 | data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ | ||
221 | |||
222 | data->ep_sys_time = get_clock (); | ||
223 | |||
224 | de_ccw->count = sizeof (struct DE_eckd_data); | ||
225 | de_ccw->flags |= CCW_FLAG_SLI; | ||
226 | } | ||
227 | 185 | ||
228 | return; | 186 | rc = get_sync_clock(&data->ep_sys_time); |
187 | /* Ignore return code if sync clock is switched off. */ | ||
188 | if (rc == -ENOSYS || rc == -EACCES) | ||
189 | rc = 0; | ||
229 | 190 | ||
230 | } /* end check_XRC */ | 191 | de_ccw->count = sizeof (struct DE_eckd_data); |
192 | de_ccw->flags |= CCW_FLAG_SLI; | ||
193 | return rc; | ||
194 | } | ||
231 | 195 | ||
232 | static inline void | 196 | static int |
233 | define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | 197 | define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, |
234 | int totrk, int cmd, struct dasd_device * device) | 198 | int totrk, int cmd, struct dasd_device * device) |
235 | { | 199 | { |
236 | struct dasd_eckd_private *private; | 200 | struct dasd_eckd_private *private; |
237 | struct ch_t geo, beg, end; | 201 | struct ch_t geo, beg, end; |
202 | int rc = 0; | ||
238 | 203 | ||
239 | private = (struct dasd_eckd_private *) device->private; | 204 | private = (struct dasd_eckd_private *) device->private; |
240 | 205 | ||
@@ -263,12 +228,12 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
263 | case DASD_ECKD_CCW_WRITE_KD_MT: | 228 | case DASD_ECKD_CCW_WRITE_KD_MT: |
264 | data->mask.perm = 0x02; | 229 | data->mask.perm = 0x02; |
265 | data->attributes.operation = private->attrib.operation; | 230 | data->attributes.operation = private->attrib.operation; |
266 | check_XRC (ccw, data, device); | 231 | rc = check_XRC (ccw, data, device); |
267 | break; | 232 | break; |
268 | case DASD_ECKD_CCW_WRITE_CKD: | 233 | case DASD_ECKD_CCW_WRITE_CKD: |
269 | case DASD_ECKD_CCW_WRITE_CKD_MT: | 234 | case DASD_ECKD_CCW_WRITE_CKD_MT: |
270 | data->attributes.operation = DASD_BYPASS_CACHE; | 235 | data->attributes.operation = DASD_BYPASS_CACHE; |
271 | check_XRC (ccw, data, device); | 236 | rc = check_XRC (ccw, data, device); |
272 | break; | 237 | break; |
273 | case DASD_ECKD_CCW_ERASE: | 238 | case DASD_ECKD_CCW_ERASE: |
274 | case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: | 239 | case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: |
@@ -276,7 +241,7 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
276 | data->mask.perm = 0x3; | 241 | data->mask.perm = 0x3; |
277 | data->mask.auth = 0x1; | 242 | data->mask.auth = 0x1; |
278 | data->attributes.operation = DASD_BYPASS_CACHE; | 243 | data->attributes.operation = DASD_BYPASS_CACHE; |
279 | check_XRC (ccw, data, device); | 244 | rc = check_XRC (ccw, data, device); |
280 | break; | 245 | break; |
281 | default: | 246 | default: |
282 | DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); | 247 | DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); |
@@ -312,9 +277,10 @@ define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, | |||
312 | data->beg_ext.head = beg.head; | 277 | data->beg_ext.head = beg.head; |
313 | data->end_ext.cyl = end.cyl; | 278 | data->end_ext.cyl = end.cyl; |
314 | data->end_ext.head = end.head; | 279 | data->end_ext.head = end.head; |
280 | return rc; | ||
315 | } | 281 | } |
316 | 282 | ||
317 | static inline void | 283 | static void |
318 | locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, | 284 | locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, |
319 | int rec_on_trk, int no_rec, int cmd, | 285 | int rec_on_trk, int no_rec, int cmd, |
320 | struct dasd_device * device, int reclen) | 286 | struct dasd_device * device, int reclen) |
@@ -548,7 +514,7 @@ dasd_eckd_read_conf(struct dasd_device *device) | |||
548 | /* | 514 | /* |
549 | * Build CP for Perform Subsystem Function - SSC. | 515 | * Build CP for Perform Subsystem Function - SSC. |
550 | */ | 516 | */ |
551 | struct dasd_ccw_req * | 517 | static struct dasd_ccw_req * |
552 | dasd_eckd_build_psf_ssc(struct dasd_device *device) | 518 | dasd_eckd_build_psf_ssc(struct dasd_device *device) |
553 | { | 519 | { |
554 | struct dasd_ccw_req *cqr; | 520 | struct dasd_ccw_req *cqr; |
@@ -1200,7 +1166,12 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req) | |||
1200 | return cqr; | 1166 | return cqr; |
1201 | ccw = cqr->cpaddr; | 1167 | ccw = cqr->cpaddr; |
1202 | /* First ccw is define extent. */ | 1168 | /* First ccw is define extent. */ |
1203 | define_extent(ccw++, cqr->data, first_trk, last_trk, cmd, device); | 1169 | if (define_extent(ccw++, cqr->data, first_trk, |
1170 | last_trk, cmd, device) == -EAGAIN) { | ||
1171 | /* Clock not in sync and XRC is enabled. Try again later. */ | ||
1172 | dasd_sfree_request(cqr, device); | ||
1173 | return ERR_PTR(-EAGAIN); | ||
1174 | } | ||
1204 | /* Build locate_record+read/write/ccws. */ | 1175 | /* Build locate_record+read/write/ccws. */ |
1205 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); | 1176 | idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data)); |
1206 | LO_data = (struct LO_eckd_data *) (idaws + cidaw); | 1177 | LO_data = (struct LO_eckd_data *) (idaws + cidaw); |
@@ -1380,7 +1351,7 @@ dasd_eckd_release(struct dasd_device *device) | |||
1380 | cqr->device = device; | 1351 | cqr->device = device; |
1381 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1352 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1382 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1353 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1383 | cqr->retries = 0; | 1354 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
1384 | cqr->expires = 2 * HZ; | 1355 | cqr->expires = 2 * HZ; |
1385 | cqr->buildclk = get_clock(); | 1356 | cqr->buildclk = get_clock(); |
1386 | cqr->status = DASD_CQR_FILLED; | 1357 | cqr->status = DASD_CQR_FILLED; |
@@ -1420,7 +1391,7 @@ dasd_eckd_reserve(struct dasd_device *device) | |||
1420 | cqr->device = device; | 1391 | cqr->device = device; |
1421 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1392 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1422 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1393 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1423 | cqr->retries = 0; | 1394 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
1424 | cqr->expires = 2 * HZ; | 1395 | cqr->expires = 2 * HZ; |
1425 | cqr->buildclk = get_clock(); | 1396 | cqr->buildclk = get_clock(); |
1426 | cqr->status = DASD_CQR_FILLED; | 1397 | cqr->status = DASD_CQR_FILLED; |
@@ -1459,7 +1430,7 @@ dasd_eckd_steal_lock(struct dasd_device *device) | |||
1459 | cqr->device = device; | 1430 | cqr->device = device; |
1460 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); | 1431 | clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); |
1461 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); | 1432 | set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); |
1462 | cqr->retries = 0; | 1433 | cqr->retries = 2; /* set retry counter to enable basic ERP */ |
1463 | cqr->expires = 2 * HZ; | 1434 | cqr->expires = 2 * HZ; |
1464 | cqr->buildclk = get_clock(); | 1435 | cqr->buildclk = get_clock(); |
1465 | cqr->status = DASD_CQR_FILLED; | 1436 | cqr->status = DASD_CQR_FILLED; |
@@ -1609,7 +1580,7 @@ dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp) | |||
1609 | * Dump the range of CCWs into 'page' buffer | 1580 | * Dump the range of CCWs into 'page' buffer |
1610 | * and return number of printed chars. | 1581 | * and return number of printed chars. |
1611 | */ | 1582 | */ |
1612 | static inline int | 1583 | static int |
1613 | dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) | 1584 | dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) |
1614 | { | 1585 | { |
1615 | int len, count; | 1586 | int len, count; |
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index e0bf30ebb215..6cedc914077e 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c | |||
@@ -658,18 +658,24 @@ static struct file_operations dasd_eer_fops = { | |||
658 | .owner = THIS_MODULE, | 658 | .owner = THIS_MODULE, |
659 | }; | 659 | }; |
660 | 660 | ||
661 | static struct miscdevice dasd_eer_dev = { | 661 | static struct miscdevice *dasd_eer_dev = NULL; |
662 | .minor = MISC_DYNAMIC_MINOR, | ||
663 | .name = "dasd_eer", | ||
664 | .fops = &dasd_eer_fops, | ||
665 | }; | ||
666 | 662 | ||
667 | int __init dasd_eer_init(void) | 663 | int __init dasd_eer_init(void) |
668 | { | 664 | { |
669 | int rc; | 665 | int rc; |
670 | 666 | ||
671 | rc = misc_register(&dasd_eer_dev); | 667 | dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); |
668 | if (!dasd_eer_dev) | ||
669 | return -ENOMEM; | ||
670 | |||
671 | dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; | ||
672 | dasd_eer_dev->name = "dasd_eer"; | ||
673 | dasd_eer_dev->fops = &dasd_eer_fops; | ||
674 | |||
675 | rc = misc_register(dasd_eer_dev); | ||
672 | if (rc) { | 676 | if (rc) { |
677 | kfree(dasd_eer_dev); | ||
678 | dasd_eer_dev = NULL; | ||
673 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " | 679 | MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " |
674 | "register misc device"); | 680 | "register misc device"); |
675 | return rc; | 681 | return rc; |
@@ -680,5 +686,9 @@ int __init dasd_eer_init(void) | |||
680 | 686 | ||
681 | void dasd_eer_exit(void) | 687 | void dasd_eer_exit(void) |
682 | { | 688 | { |
683 | WARN_ON(misc_deregister(&dasd_eer_dev) != 0); | 689 | if (dasd_eer_dev) { |
690 | WARN_ON(misc_deregister(dasd_eer_dev) != 0); | ||
691 | kfree(dasd_eer_dev); | ||
692 | dasd_eer_dev = NULL; | ||
693 | } | ||
684 | } | 694 | } |
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c index 58a65097922b..caa5d91420f8 100644 --- a/drivers/s390/block/dasd_erp.c +++ b/drivers/s390/block/dasd_erp.c | |||
@@ -152,25 +152,6 @@ dasd_default_erp_postaction(struct dasd_ccw_req * cqr) | |||
152 | 152 | ||
153 | } /* end default_erp_postaction */ | 153 | } /* end default_erp_postaction */ |
154 | 154 | ||
155 | /* | ||
156 | * Print the hex dump of the memory used by a request. This includes | ||
157 | * all error recovery ccws that have been chained in from of the | ||
158 | * real request. | ||
159 | */ | ||
160 | static inline void | ||
161 | hex_dump_memory(struct dasd_device *device, void *data, int len) | ||
162 | { | ||
163 | int *pint; | ||
164 | |||
165 | pint = (int *) data; | ||
166 | while (len > 0) { | ||
167 | DEV_MESSAGE(KERN_ERR, device, "%p: %08x %08x %08x %08x", | ||
168 | pint, pint[0], pint[1], pint[2], pint[3]); | ||
169 | pint += 4; | ||
170 | len -= 16; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | void | 155 | void |
175 | dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) | 156 | dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) |
176 | { | 157 | { |
@@ -182,69 +163,8 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb) | |||
182 | device->discipline->dump_sense(device, cqr, irb); | 163 | device->discipline->dump_sense(device, cqr, irb); |
183 | } | 164 | } |
184 | 165 | ||
185 | void | ||
186 | dasd_log_ccw(struct dasd_ccw_req * cqr, int caller, __u32 cpa) | ||
187 | { | ||
188 | struct dasd_device *device; | ||
189 | struct dasd_ccw_req *lcqr; | ||
190 | struct ccw1 *ccw; | ||
191 | int cplength; | ||
192 | |||
193 | device = cqr->device; | ||
194 | /* log the channel program */ | ||
195 | for (lcqr = cqr; lcqr != NULL; lcqr = lcqr->refers) { | ||
196 | DEV_MESSAGE(KERN_ERR, device, | ||
197 | "(%s) ERP chain report for req: %p", | ||
198 | caller == 0 ? "EXAMINE" : "ACTION", lcqr); | ||
199 | hex_dump_memory(device, lcqr, sizeof(struct dasd_ccw_req)); | ||
200 | |||
201 | cplength = 1; | ||
202 | ccw = lcqr->cpaddr; | ||
203 | while (ccw++->flags & (CCW_FLAG_DC | CCW_FLAG_CC)) | ||
204 | cplength++; | ||
205 | |||
206 | if (cplength > 40) { /* log only parts of the CP */ | ||
207 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
208 | "Start of channel program:"); | ||
209 | hex_dump_memory(device, lcqr->cpaddr, | ||
210 | 40*sizeof(struct ccw1)); | ||
211 | |||
212 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
213 | "End of channel program:"); | ||
214 | hex_dump_memory(device, lcqr->cpaddr + cplength - 10, | ||
215 | 10*sizeof(struct ccw1)); | ||
216 | } else { /* log the whole CP */ | ||
217 | DEV_MESSAGE(KERN_ERR, device, "%s", | ||
218 | "Channel program (complete):"); | ||
219 | hex_dump_memory(device, lcqr->cpaddr, | ||
220 | cplength*sizeof(struct ccw1)); | ||
221 | } | ||
222 | |||
223 | if (lcqr != cqr) | ||
224 | continue; | ||
225 | |||
226 | /* | ||
227 | * Log bytes arround failed CCW but only if we did | ||
228 | * not log the whole CP of the CCW is outside the | ||
229 | * logged CP. | ||
230 | */ | ||
231 | if (cplength > 40 || | ||
232 | ((addr_t) cpa < (addr_t) lcqr->cpaddr && | ||
233 | (addr_t) cpa > (addr_t) (lcqr->cpaddr + cplength + 4))) { | ||
234 | |||
235 | DEV_MESSAGE(KERN_ERR, device, | ||
236 | "Failed CCW (%p) (area):", | ||
237 | (void *) (long) cpa); | ||
238 | hex_dump_memory(device, cqr->cpaddr - 10, | ||
239 | 20*sizeof(struct ccw1)); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | } /* end log_erp_chain */ | ||
244 | |||
245 | EXPORT_SYMBOL(dasd_default_erp_action); | 166 | EXPORT_SYMBOL(dasd_default_erp_action); |
246 | EXPORT_SYMBOL(dasd_default_erp_postaction); | 167 | EXPORT_SYMBOL(dasd_default_erp_postaction); |
247 | EXPORT_SYMBOL(dasd_alloc_erp_request); | 168 | EXPORT_SYMBOL(dasd_alloc_erp_request); |
248 | EXPORT_SYMBOL(dasd_free_erp_request); | 169 | EXPORT_SYMBOL(dasd_free_erp_request); |
249 | EXPORT_SYMBOL(dasd_log_sense); | 170 | EXPORT_SYMBOL(dasd_log_sense); |
250 | EXPORT_SYMBOL(dasd_log_ccw); | ||
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index b857fd5893fd..be0909e39226 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -75,7 +75,7 @@ static struct ccw_driver dasd_fba_driver = { | |||
75 | .notify = dasd_generic_notify, | 75 | .notify = dasd_generic_notify, |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static inline void | 78 | static void |
79 | define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, | 79 | define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, |
80 | int blksize, int beg, int nr) | 80 | int blksize, int beg, int nr) |
81 | { | 81 | { |
@@ -95,7 +95,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw, | |||
95 | data->ext_end = nr - 1; | 95 | data->ext_end = nr - 1; |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline void | 98 | static void |
99 | locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, | 99 | locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw, |
100 | int block_nr, int block_ct) | 100 | int block_nr, int block_ct) |
101 | { | 101 | { |
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index d163632101d2..47ba4462708d 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
@@ -147,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device) | |||
147 | */ | 147 | */ |
148 | memset(&bpart, 0, sizeof(struct blkpg_partition)); | 148 | memset(&bpart, 0, sizeof(struct blkpg_partition)); |
149 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); | 149 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); |
150 | barg.data = (void __user *) &bpart; | 150 | barg.data = (void __force __user *) &bpart; |
151 | barg.op = BLKPG_DEL_PARTITION; | 151 | barg.op = BLKPG_DEL_PARTITION; |
152 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) | 152 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) |
153 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); | 153 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index fb725e3b08fe..a2cc69e11410 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -559,7 +559,6 @@ struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int, | |||
559 | struct dasd_device *); | 559 | struct dasd_device *); |
560 | void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); | 560 | void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *); |
561 | void dasd_log_sense(struct dasd_ccw_req *, struct irb *); | 561 | void dasd_log_sense(struct dasd_ccw_req *, struct irb *); |
562 | void dasd_log_ccw(struct dasd_ccw_req *, int, __u32); | ||
563 | 562 | ||
564 | /* externals in dasd_3370_erp.c */ | 563 | /* externals in dasd_3370_erp.c */ |
565 | dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); | 564 | dasd_era_t dasd_3370_erp_examine(struct dasd_ccw_req *, struct irb *); |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index bfa010f6dab2..8b7e11815d70 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -28,7 +28,7 @@ static struct proc_dir_entry *dasd_proc_root_entry = NULL; | |||
28 | static struct proc_dir_entry *dasd_devices_entry = NULL; | 28 | static struct proc_dir_entry *dasd_devices_entry = NULL; |
29 | static struct proc_dir_entry *dasd_statistics_entry = NULL; | 29 | static struct proc_dir_entry *dasd_statistics_entry = NULL; |
30 | 30 | ||
31 | static inline char * | 31 | static char * |
32 | dasd_get_user_string(const char __user *user_buf, size_t user_len) | 32 | dasd_get_user_string(const char __user *user_buf, size_t user_len) |
33 | { | 33 | { |
34 | char *buffer; | 34 | char *buffer; |
@@ -154,7 +154,7 @@ static struct file_operations dasd_devices_file_ops = { | |||
154 | .release = seq_release, | 154 | .release = seq_release, |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static inline int | 157 | static int |
158 | dasd_calc_metrics(char *page, char **start, off_t off, | 158 | dasd_calc_metrics(char *page, char **start, off_t off, |
159 | int count, int *eof, int len) | 159 | int count, int *eof, int len) |
160 | { | 160 | { |
@@ -167,8 +167,8 @@ dasd_calc_metrics(char *page, char **start, off_t off, | |||
167 | return len; | 167 | return len; |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline char * | 170 | static char * |
171 | dasd_statistics_array(char *str, int *array, int shift) | 171 | dasd_statistics_array(char *str, unsigned int *array, int shift) |
172 | { | 172 | { |
173 | int i; | 173 | int i; |
174 | 174 | ||
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index be9b05347b4f..1340451ea408 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -102,7 +102,7 @@ dcssblk_release_segment(struct device *dev) | |||
102 | * device needs to be enqueued before the semaphore is | 102 | * device needs to be enqueued before the semaphore is |
103 | * freed. | 103 | * freed. |
104 | */ | 104 | */ |
105 | static inline int | 105 | static int |
106 | dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) | 106 | dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info) |
107 | { | 107 | { |
108 | int minor, found; | 108 | int minor, found; |
@@ -230,7 +230,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
230 | SEGMENT_SHARED); | 230 | SEGMENT_SHARED); |
231 | if (rc < 0) { | 231 | if (rc < 0) { |
232 | BUG_ON(rc == -EINVAL); | 232 | BUG_ON(rc == -EINVAL); |
233 | if (rc == -EIO || rc == -ENOENT) | 233 | if (rc != -EAGAIN) |
234 | goto removeseg; | 234 | goto removeseg; |
235 | } else { | 235 | } else { |
236 | dev_info->is_shared = 1; | 236 | dev_info->is_shared = 1; |
@@ -253,7 +253,7 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
253 | SEGMENT_EXCLUSIVE); | 253 | SEGMENT_EXCLUSIVE); |
254 | if (rc < 0) { | 254 | if (rc < 0) { |
255 | BUG_ON(rc == -EINVAL); | 255 | BUG_ON(rc == -EINVAL); |
256 | if (rc == -EIO || rc == -ENOENT) | 256 | if (rc != -EAGAIN) |
257 | goto removeseg; | 257 | goto removeseg; |
258 | } else { | 258 | } else { |
259 | dev_info->is_shared = 0; | 259 | dev_info->is_shared = 0; |
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index c3e97b4fc186..293e667b50f2 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # S/390 character devices | 2 | # S/390 character devices |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += ctrlchar.o keyboard.o defkeymap.o | 5 | obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ |
6 | sclp_info.o | ||
6 | 7 | ||
7 | obj-$(CONFIG_TN3270) += raw3270.o | 8 | obj-$(CONFIG_TN3270) += raw3270.o |
8 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o | 9 | obj-$(CONFIG_TN3270_CONSOLE) += con3270.o |
@@ -11,7 +12,6 @@ obj-$(CONFIG_TN3270_FS) += fs3270.o | |||
11 | 12 | ||
12 | obj-$(CONFIG_TN3215) += con3215.o | 13 | obj-$(CONFIG_TN3215) += con3215.o |
13 | 14 | ||
14 | obj-$(CONFIG_SCLP) += sclp.o sclp_rw.o sclp_quiesce.o | ||
15 | obj-$(CONFIG_SCLP_TTY) += sclp_tty.o | 15 | obj-$(CONFIG_SCLP_TTY) += sclp_tty.o |
16 | obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o | 16 | obj-$(CONFIG_SCLP_CONSOLE) += sclp_con.o |
17 | obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o | 17 | obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 25b5d7a66417..9a328f14a641 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -1121,7 +1121,7 @@ static const struct tty_operations tty3215_ops = { | |||
1121 | * 3215 tty registration code called from tty_init(). | 1121 | * 3215 tty registration code called from tty_init(). |
1122 | * Most kernel services (incl. kmalloc) are available at this poimt. | 1122 | * Most kernel services (incl. kmalloc) are available at this poimt. |
1123 | */ | 1123 | */ |
1124 | int __init | 1124 | static int __init |
1125 | tty3215_init(void) | 1125 | tty3215_init(void) |
1126 | { | 1126 | { |
1127 | struct tty_driver *driver; | 1127 | struct tty_driver *driver; |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 7566be890688..8e7f2d7633d6 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -69,8 +69,7 @@ static void con3270_update(struct con3270 *); | |||
69 | /* | 69 | /* |
70 | * Setup timeout for a device. On timeout trigger an update. | 70 | * Setup timeout for a device. On timeout trigger an update. |
71 | */ | 71 | */ |
72 | void | 72 | static void con3270_set_timer(struct con3270 *cp, int expires) |
73 | con3270_set_timer(struct con3270 *cp, int expires) | ||
74 | { | 73 | { |
75 | if (expires == 0) { | 74 | if (expires == 0) { |
76 | if (timer_pending(&cp->timer)) | 75 | if (timer_pending(&cp->timer)) |
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index 17027d918cf7..564baca01b7c 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -5,6 +5,8 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/keyboard.h> | 6 | #include <linux/keyboard.h> |
7 | #include <linux/kd.h> | 7 | #include <linux/kd.h> |
8 | #include <linux/kbd_kern.h> | ||
9 | #include <linux/kbd_diacr.h> | ||
8 | 10 | ||
9 | u_short plain_map[NR_KEYS] = { | 11 | u_short plain_map[NR_KEYS] = { |
10 | 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, | 12 | 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 0893d306ae80..e1a746269c4c 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include "raw3270.h" | 23 | #include "raw3270.h" |
24 | #include "ctrlchar.h" | 24 | #include "ctrlchar.h" |
25 | 25 | ||
26 | struct raw3270_fn fs3270_fn; | 26 | static struct raw3270_fn fs3270_fn; |
27 | 27 | ||
28 | struct fs3270 { | 28 | struct fs3270 { |
29 | struct raw3270_view view; | 29 | struct raw3270_view view; |
@@ -401,7 +401,7 @@ fs3270_release(struct raw3270_view *view) | |||
401 | } | 401 | } |
402 | 402 | ||
403 | /* View to a 3270 device. Can be console, tty or fullscreen. */ | 403 | /* View to a 3270 device. Can be console, tty or fullscreen. */ |
404 | struct raw3270_fn fs3270_fn = { | 404 | static struct raw3270_fn fs3270_fn = { |
405 | .activate = fs3270_activate, | 405 | .activate = fs3270_activate, |
406 | .deactivate = fs3270_deactivate, | 406 | .deactivate = fs3270_deactivate, |
407 | .intv = (void *) fs3270_irq, | 407 | .intv = (void *) fs3270_irq, |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 3e86fd1756e5..f62f9a4e8950 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
@@ -148,6 +148,7 @@ kbd_ascebc(struct kbd_data *kbd, unsigned char *ascebc) | |||
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
151 | #if 0 | ||
151 | /* | 152 | /* |
152 | * Generate ebcdic -> ascii translation table from kbd_data. | 153 | * Generate ebcdic -> ascii translation table from kbd_data. |
153 | */ | 154 | */ |
@@ -173,6 +174,7 @@ kbd_ebcasc(struct kbd_data *kbd, unsigned char *ebcasc) | |||
173 | } | 174 | } |
174 | } | 175 | } |
175 | } | 176 | } |
177 | #endif | ||
176 | 178 | ||
177 | /* | 179 | /* |
178 | * We have a combining character DIACR here, followed by the character CH. | 180 | * We have a combining character DIACR here, followed by the character CH. |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index cdb24f528112..9e451acc6491 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -67,8 +67,8 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) | |||
67 | return -EINVAL; | 67 | return -EINVAL; |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, | 70 | static struct mon_buf *monwrite_find_hdr(struct mon_private *monpriv, |
71 | struct monwrite_hdr *monhdr) | 71 | struct monwrite_hdr *monhdr) |
72 | { | 72 | { |
73 | struct mon_buf *entry, *next; | 73 | struct mon_buf *entry, *next; |
74 | 74 | ||
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 7a84014f2037..8facd14adb7c 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | 31 | ||
32 | struct class *class3270; | 32 | static struct class *class3270; |
33 | 33 | ||
34 | /* The main 3270 data structure. */ | 34 | /* The main 3270 data structure. */ |
35 | struct raw3270 { | 35 | struct raw3270 { |
@@ -86,7 +86,7 @@ DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue); | |||
86 | /* | 86 | /* |
87 | * Encode array for 12 bit 3270 addresses. | 87 | * Encode array for 12 bit 3270 addresses. |
88 | */ | 88 | */ |
89 | unsigned char raw3270_ebcgraf[64] = { | 89 | static unsigned char raw3270_ebcgraf[64] = { |
90 | 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, | 90 | 0x40, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, |
91 | 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, | 91 | 0xc8, 0xc9, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, |
92 | 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, | 92 | 0x50, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 8a056df09d6b..f171de3b0b11 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -59,7 +59,8 @@ static volatile enum sclp_init_state_t { | |||
59 | /* Internal state: is a request active at the sclp? */ | 59 | /* Internal state: is a request active at the sclp? */ |
60 | static volatile enum sclp_running_state_t { | 60 | static volatile enum sclp_running_state_t { |
61 | sclp_running_state_idle, | 61 | sclp_running_state_idle, |
62 | sclp_running_state_running | 62 | sclp_running_state_running, |
63 | sclp_running_state_reset_pending | ||
63 | } sclp_running_state = sclp_running_state_idle; | 64 | } sclp_running_state = sclp_running_state_idle; |
64 | 65 | ||
65 | /* Internal state: is a read request pending? */ | 66 | /* Internal state: is a read request pending? */ |
@@ -88,15 +89,15 @@ static volatile enum sclp_mask_state_t { | |||
88 | 89 | ||
89 | /* Timeout intervals in seconds.*/ | 90 | /* Timeout intervals in seconds.*/ |
90 | #define SCLP_BUSY_INTERVAL 10 | 91 | #define SCLP_BUSY_INTERVAL 10 |
91 | #define SCLP_RETRY_INTERVAL 15 | 92 | #define SCLP_RETRY_INTERVAL 30 |
92 | 93 | ||
93 | static void sclp_process_queue(void); | 94 | static void sclp_process_queue(void); |
94 | static int sclp_init_mask(int calculate); | 95 | static int sclp_init_mask(int calculate); |
95 | static int sclp_init(void); | 96 | static int sclp_init(void); |
96 | 97 | ||
97 | /* Perform service call. Return 0 on success, non-zero otherwise. */ | 98 | /* Perform service call. Return 0 on success, non-zero otherwise. */ |
98 | static int | 99 | int |
99 | service_call(sclp_cmdw_t command, void *sccb) | 100 | sclp_service_call(sclp_cmdw_t command, void *sccb) |
100 | { | 101 | { |
101 | int cc; | 102 | int cc; |
102 | 103 | ||
@@ -113,19 +114,17 @@ service_call(sclp_cmdw_t command, void *sccb) | |||
113 | return 0; | 114 | return 0; |
114 | } | 115 | } |
115 | 116 | ||
116 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | 117 | static inline void __sclp_make_read_req(void); |
117 | * force restart of running request. */ | 118 | |
118 | static void | 119 | static void |
119 | sclp_request_timeout(unsigned long data) | 120 | __sclp_queue_read_req(void) |
120 | { | 121 | { |
121 | unsigned long flags; | 122 | if (sclp_reading_state == sclp_reading_state_idle) { |
122 | 123 | sclp_reading_state = sclp_reading_state_reading; | |
123 | if (data) { | 124 | __sclp_make_read_req(); |
124 | spin_lock_irqsave(&sclp_lock, flags); | 125 | /* Add request to head of queue */ |
125 | sclp_running_state = sclp_running_state_idle; | 126 | list_add(&sclp_read_req.list, &sclp_req_queue); |
126 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
127 | } | 127 | } |
128 | sclp_process_queue(); | ||
129 | } | 128 | } |
130 | 129 | ||
131 | /* Set up request retry timer. Called while sclp_lock is locked. */ | 130 | /* Set up request retry timer. Called while sclp_lock is locked. */ |
@@ -140,6 +139,29 @@ __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), | |||
140 | add_timer(&sclp_request_timer); | 139 | add_timer(&sclp_request_timer); |
141 | } | 140 | } |
142 | 141 | ||
142 | /* Request timeout handler. Restart the request queue. If DATA is non-zero, | ||
143 | * force restart of running request. */ | ||
144 | static void | ||
145 | sclp_request_timeout(unsigned long data) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | |||
149 | spin_lock_irqsave(&sclp_lock, flags); | ||
150 | if (data) { | ||
151 | if (sclp_running_state == sclp_running_state_running) { | ||
152 | /* Break running state and queue NOP read event request | ||
153 | * to get a defined interface state. */ | ||
154 | __sclp_queue_read_req(); | ||
155 | sclp_running_state = sclp_running_state_idle; | ||
156 | } | ||
157 | } else { | ||
158 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
159 | sclp_request_timeout, 0); | ||
160 | } | ||
161 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
162 | sclp_process_queue(); | ||
163 | } | ||
164 | |||
143 | /* Try to start a request. Return zero if the request was successfully | 165 | /* Try to start a request. Return zero if the request was successfully |
144 | * started or if it will be started at a later time. Return non-zero otherwise. | 166 | * started or if it will be started at a later time. Return non-zero otherwise. |
145 | * Called while sclp_lock is locked. */ | 167 | * Called while sclp_lock is locked. */ |
@@ -151,7 +173,7 @@ __sclp_start_request(struct sclp_req *req) | |||
151 | if (sclp_running_state != sclp_running_state_idle) | 173 | if (sclp_running_state != sclp_running_state_idle) |
152 | return 0; | 174 | return 0; |
153 | del_timer(&sclp_request_timer); | 175 | del_timer(&sclp_request_timer); |
154 | rc = service_call(req->command, req->sccb); | 176 | rc = sclp_service_call(req->command, req->sccb); |
155 | req->start_count++; | 177 | req->start_count++; |
156 | 178 | ||
157 | if (rc == 0) { | 179 | if (rc == 0) { |
@@ -191,7 +213,15 @@ sclp_process_queue(void) | |||
191 | rc = __sclp_start_request(req); | 213 | rc = __sclp_start_request(req); |
192 | if (rc == 0) | 214 | if (rc == 0) |
193 | break; | 215 | break; |
194 | /* Request failed. */ | 216 | /* Request failed */ |
217 | if (req->start_count > 1) { | ||
218 | /* Cannot abort already submitted request - could still | ||
219 | * be active at the SCLP */ | ||
220 | __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, | ||
221 | sclp_request_timeout, 0); | ||
222 | break; | ||
223 | } | ||
224 | /* Post-processing for aborted request */ | ||
195 | list_del(&req->list); | 225 | list_del(&req->list); |
196 | if (req->callback) { | 226 | if (req->callback) { |
197 | spin_unlock_irqrestore(&sclp_lock, flags); | 227 | spin_unlock_irqrestore(&sclp_lock, flags); |
@@ -221,7 +251,8 @@ sclp_add_request(struct sclp_req *req) | |||
221 | list_add_tail(&req->list, &sclp_req_queue); | 251 | list_add_tail(&req->list, &sclp_req_queue); |
222 | rc = 0; | 252 | rc = 0; |
223 | /* Start if request is first in list */ | 253 | /* Start if request is first in list */ |
224 | if (req->list.prev == &sclp_req_queue) { | 254 | if (sclp_running_state == sclp_running_state_idle && |
255 | req->list.prev == &sclp_req_queue) { | ||
225 | rc = __sclp_start_request(req); | 256 | rc = __sclp_start_request(req); |
226 | if (rc) | 257 | if (rc) |
227 | list_del(&req->list); | 258 | list_del(&req->list); |
@@ -294,7 +325,7 @@ __sclp_make_read_req(void) | |||
294 | sccb = (struct sccb_header *) sclp_read_sccb; | 325 | sccb = (struct sccb_header *) sclp_read_sccb; |
295 | clear_page(sccb); | 326 | clear_page(sccb); |
296 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); | 327 | memset(&sclp_read_req, 0, sizeof(struct sclp_req)); |
297 | sclp_read_req.command = SCLP_CMDW_READDATA; | 328 | sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; |
298 | sclp_read_req.status = SCLP_REQ_QUEUED; | 329 | sclp_read_req.status = SCLP_REQ_QUEUED; |
299 | sclp_read_req.start_count = 0; | 330 | sclp_read_req.start_count = 0; |
300 | sclp_read_req.callback = sclp_read_cb; | 331 | sclp_read_req.callback = sclp_read_cb; |
@@ -334,6 +365,8 @@ sclp_interrupt_handler(__u16 code) | |||
334 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; | 365 | finished_sccb = S390_lowcore.ext_params & 0xfffffff8; |
335 | evbuf_pending = S390_lowcore.ext_params & 0x3; | 366 | evbuf_pending = S390_lowcore.ext_params & 0x3; |
336 | if (finished_sccb) { | 367 | if (finished_sccb) { |
368 | del_timer(&sclp_request_timer); | ||
369 | sclp_running_state = sclp_running_state_reset_pending; | ||
337 | req = __sclp_find_req(finished_sccb); | 370 | req = __sclp_find_req(finished_sccb); |
338 | if (req) { | 371 | if (req) { |
339 | /* Request post-processing */ | 372 | /* Request post-processing */ |
@@ -348,13 +381,8 @@ sclp_interrupt_handler(__u16 code) | |||
348 | sclp_running_state = sclp_running_state_idle; | 381 | sclp_running_state = sclp_running_state_idle; |
349 | } | 382 | } |
350 | if (evbuf_pending && sclp_receive_mask != 0 && | 383 | if (evbuf_pending && sclp_receive_mask != 0 && |
351 | sclp_reading_state == sclp_reading_state_idle && | 384 | sclp_activation_state == sclp_activation_state_active) |
352 | sclp_activation_state == sclp_activation_state_active ) { | 385 | __sclp_queue_read_req(); |
353 | sclp_reading_state = sclp_reading_state_reading; | ||
354 | __sclp_make_read_req(); | ||
355 | /* Add request to head of queue */ | ||
356 | list_add(&sclp_read_req.list, &sclp_req_queue); | ||
357 | } | ||
358 | spin_unlock(&sclp_lock); | 386 | spin_unlock(&sclp_lock); |
359 | sclp_process_queue(); | 387 | sclp_process_queue(); |
360 | } | 388 | } |
@@ -374,6 +402,7 @@ sclp_sync_wait(void) | |||
374 | unsigned long flags; | 402 | unsigned long flags; |
375 | unsigned long cr0, cr0_sync; | 403 | unsigned long cr0, cr0_sync; |
376 | u64 timeout; | 404 | u64 timeout; |
405 | int irq_context; | ||
377 | 406 | ||
378 | /* We'll be disabling timer interrupts, so we need a custom timeout | 407 | /* We'll be disabling timer interrupts, so we need a custom timeout |
379 | * mechanism */ | 408 | * mechanism */ |
@@ -386,7 +415,9 @@ sclp_sync_wait(void) | |||
386 | } | 415 | } |
387 | local_irq_save(flags); | 416 | local_irq_save(flags); |
388 | /* Prevent bottom half from executing once we force interrupts open */ | 417 | /* Prevent bottom half from executing once we force interrupts open */ |
389 | local_bh_disable(); | 418 | irq_context = in_interrupt(); |
419 | if (!irq_context) | ||
420 | local_bh_disable(); | ||
390 | /* Enable service-signal interruption, disable timer interrupts */ | 421 | /* Enable service-signal interruption, disable timer interrupts */ |
391 | trace_hardirqs_on(); | 422 | trace_hardirqs_on(); |
392 | __ctl_store(cr0, 0, 0); | 423 | __ctl_store(cr0, 0, 0); |
@@ -402,19 +433,19 @@ sclp_sync_wait(void) | |||
402 | get_clock() > timeout && | 433 | get_clock() > timeout && |
403 | del_timer(&sclp_request_timer)) | 434 | del_timer(&sclp_request_timer)) |
404 | sclp_request_timer.function(sclp_request_timer.data); | 435 | sclp_request_timer.function(sclp_request_timer.data); |
405 | barrier(); | ||
406 | cpu_relax(); | 436 | cpu_relax(); |
407 | } | 437 | } |
408 | local_irq_disable(); | 438 | local_irq_disable(); |
409 | __ctl_load(cr0, 0, 0); | 439 | __ctl_load(cr0, 0, 0); |
410 | _local_bh_enable(); | 440 | if (!irq_context) |
441 | _local_bh_enable(); | ||
411 | local_irq_restore(flags); | 442 | local_irq_restore(flags); |
412 | } | 443 | } |
413 | 444 | ||
414 | EXPORT_SYMBOL(sclp_sync_wait); | 445 | EXPORT_SYMBOL(sclp_sync_wait); |
415 | 446 | ||
416 | /* Dispatch changes in send and receive mask to registered listeners. */ | 447 | /* Dispatch changes in send and receive mask to registered listeners. */ |
417 | static inline void | 448 | static void |
418 | sclp_dispatch_state_change(void) | 449 | sclp_dispatch_state_change(void) |
419 | { | 450 | { |
420 | struct list_head *l; | 451 | struct list_head *l; |
@@ -597,7 +628,7 @@ __sclp_make_init_req(u32 receive_mask, u32 send_mask) | |||
597 | sccb = (struct init_sccb *) sclp_init_sccb; | 628 | sccb = (struct init_sccb *) sclp_init_sccb; |
598 | clear_page(sccb); | 629 | clear_page(sccb); |
599 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); | 630 | memset(&sclp_init_req, 0, sizeof(struct sclp_req)); |
600 | sclp_init_req.command = SCLP_CMDW_WRITEMASK; | 631 | sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; |
601 | sclp_init_req.status = SCLP_REQ_FILLED; | 632 | sclp_init_req.status = SCLP_REQ_FILLED; |
602 | sclp_init_req.start_count = 0; | 633 | sclp_init_req.start_count = 0; |
603 | sclp_init_req.callback = NULL; | 634 | sclp_init_req.callback = NULL; |
@@ -800,7 +831,7 @@ sclp_check_interface(void) | |||
800 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { | 831 | for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { |
801 | __sclp_make_init_req(0, 0); | 832 | __sclp_make_init_req(0, 0); |
802 | sccb = (struct init_sccb *) sclp_init_req.sccb; | 833 | sccb = (struct init_sccb *) sclp_init_req.sccb; |
803 | rc = service_call(sclp_init_req.command, sccb); | 834 | rc = sclp_service_call(sclp_init_req.command, sccb); |
804 | if (rc == -EIO) | 835 | if (rc == -EIO) |
805 | break; | 836 | break; |
806 | sclp_init_req.status = SCLP_REQ_RUNNING; | 837 | sclp_init_req.status = SCLP_REQ_RUNNING; |
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index 2c71d6ee7b5b..7d29ab45a6ed 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | 15 | #include <asm/sclp.h> | |
16 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
17 | 17 | ||
18 | /* maximum number of pages concerning our own memory management */ | 18 | /* maximum number of pages concerning our own memory management */ |
@@ -49,9 +49,11 @@ | |||
49 | 49 | ||
50 | typedef unsigned int sclp_cmdw_t; | 50 | typedef unsigned int sclp_cmdw_t; |
51 | 51 | ||
52 | #define SCLP_CMDW_READDATA 0x00770005 | 52 | #define SCLP_CMDW_READ_EVENT_DATA 0x00770005 |
53 | #define SCLP_CMDW_WRITEDATA 0x00760005 | 53 | #define SCLP_CMDW_WRITE_EVENT_DATA 0x00760005 |
54 | #define SCLP_CMDW_WRITEMASK 0x00780005 | 54 | #define SCLP_CMDW_WRITE_EVENT_MASK 0x00780005 |
55 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | ||
56 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | ||
55 | 57 | ||
56 | #define GDS_ID_MDSMU 0x1310 | 58 | #define GDS_ID_MDSMU 0x1310 |
57 | #define GDS_ID_MDSRouteInfo 0x1311 | 59 | #define GDS_ID_MDSRouteInfo 0x1311 |
@@ -66,13 +68,6 @@ typedef unsigned int sclp_cmdw_t; | |||
66 | 68 | ||
67 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ | 69 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ |
68 | 70 | ||
69 | struct sccb_header { | ||
70 | u16 length; | ||
71 | u8 function_code; | ||
72 | u8 control_mask[3]; | ||
73 | u16 response_code; | ||
74 | } __attribute__((packed)); | ||
75 | |||
76 | struct gds_subvector { | 71 | struct gds_subvector { |
77 | u8 length; | 72 | u8 length; |
78 | u8 key; | 73 | u8 key; |
@@ -131,6 +126,7 @@ void sclp_unregister(struct sclp_register *reg); | |||
131 | int sclp_remove_processed(struct sccb_header *sccb); | 126 | int sclp_remove_processed(struct sccb_header *sccb); |
132 | int sclp_deactivate(void); | 127 | int sclp_deactivate(void); |
133 | int sclp_reactivate(void); | 128 | int sclp_reactivate(void); |
129 | int sclp_service_call(sclp_cmdw_t command, void *sccb); | ||
134 | 130 | ||
135 | /* useful inlines */ | 131 | /* useful inlines */ |
136 | 132 | ||
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 86864f641716..ead1043d788e 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c | |||
@@ -66,7 +66,7 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) | |||
66 | } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); | 66 | } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void | 69 | static void |
70 | sclp_conbuf_emit(void) | 70 | sclp_conbuf_emit(void) |
71 | { | 71 | { |
72 | struct sclp_buffer* buffer; | 72 | struct sclp_buffer* buffer; |
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c index 4f873ae148b7..65aa2c85737f 100644 --- a/drivers/s390/char/sclp_cpi.c +++ b/drivers/s390/char/sclp_cpi.c | |||
@@ -169,7 +169,7 @@ cpi_prepare_req(void) | |||
169 | } | 169 | } |
170 | 170 | ||
171 | /* prepare request data structure presented to SCLP driver */ | 171 | /* prepare request data structure presented to SCLP driver */ |
172 | req->command = SCLP_CMDW_WRITEDATA; | 172 | req->command = SCLP_CMDW_WRITE_EVENT_DATA; |
173 | req->sccb = sccb; | 173 | req->sccb = sccb; |
174 | req->status = SCLP_REQ_FILLED; | 174 | req->status = SCLP_REQ_FILLED; |
175 | req->callback = cpi_callback; | 175 | req->callback = cpi_callback; |
diff --git a/drivers/s390/char/sclp_info.c b/drivers/s390/char/sclp_info.c new file mode 100644 index 000000000000..7bcbe643b087 --- /dev/null +++ b/drivers/s390/char/sclp_info.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * drivers/s390/char/sclp_info.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <asm/sclp.h> | ||
12 | #include "sclp.h" | ||
13 | |||
14 | struct sclp_readinfo_sccb s390_readinfo_sccb; | ||
15 | |||
16 | void __init sclp_readinfo_early(void) | ||
17 | { | ||
18 | sclp_cmdw_t command; | ||
19 | struct sccb_header *sccb; | ||
20 | int ret; | ||
21 | |||
22 | __ctl_set_bit(0, 9); /* enable service signal subclass mask */ | ||
23 | |||
24 | sccb = &s390_readinfo_sccb.header; | ||
25 | command = SCLP_CMDW_READ_SCP_INFO_FORCED; | ||
26 | while (1) { | ||
27 | u16 response; | ||
28 | |||
29 | memset(&s390_readinfo_sccb, 0, sizeof(s390_readinfo_sccb)); | ||
30 | sccb->length = sizeof(s390_readinfo_sccb); | ||
31 | sccb->control_mask[2] = 0x80; | ||
32 | |||
33 | ret = sclp_service_call(command, &s390_readinfo_sccb); | ||
34 | |||
35 | if (ret == -EIO) | ||
36 | goto out; | ||
37 | if (ret == -EBUSY) | ||
38 | continue; | ||
39 | |||
40 | __load_psw_mask(PSW_BASE_BITS | PSW_MASK_EXT | | ||
41 | PSW_MASK_WAIT | PSW_DEFAULT_KEY); | ||
42 | local_irq_disable(); | ||
43 | barrier(); | ||
44 | |||
45 | response = sccb->response_code; | ||
46 | |||
47 | if (response == 0x10) | ||
48 | break; | ||
49 | |||
50 | if (response != 0x1f0 || command == SCLP_CMDW_READ_SCP_INFO) | ||
51 | break; | ||
52 | |||
53 | command = SCLP_CMDW_READ_SCP_INFO; | ||
54 | } | ||
55 | out: | ||
56 | __ctl_clear_bit(0, 9); /* disable service signal subclass mask */ | ||
57 | } | ||
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 0c92d3909cca..2486783ea58e 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -460,7 +460,7 @@ sclp_emit_buffer(struct sclp_buffer *buffer, | |||
460 | sccb->msg_buf.header.type = EvTyp_PMsgCmd; | 460 | sccb->msg_buf.header.type = EvTyp_PMsgCmd; |
461 | else | 461 | else |
462 | return -ENOSYS; | 462 | return -ENOSYS; |
463 | buffer->request.command = SCLP_CMDW_WRITEDATA; | 463 | buffer->request.command = SCLP_CMDW_WRITE_EVENT_DATA; |
464 | buffer->request.status = SCLP_REQ_FILLED; | 464 | buffer->request.status = SCLP_REQ_FILLED; |
465 | buffer->request.callback = sclp_writedata_callback; | 465 | buffer->request.callback = sclp_writedata_callback; |
466 | buffer->request.callback_data = buffer; | 466 | buffer->request.callback_data = buffer; |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 2d173e5c8a09..90536f60bf50 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
@@ -721,7 +721,7 @@ static const struct tty_operations sclp_ops = { | |||
721 | .ioctl = sclp_tty_ioctl, | 721 | .ioctl = sclp_tty_ioctl, |
722 | }; | 722 | }; |
723 | 723 | ||
724 | int __init | 724 | static int __init |
725 | sclp_tty_init(void) | 725 | sclp_tty_init(void) |
726 | { | 726 | { |
727 | struct tty_driver *driver; | 727 | struct tty_driver *driver; |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 723bf4191bfe..544f137d70d7 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -207,7 +207,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) | |||
207 | request->sclp_req.status = SCLP_REQ_FAILED; | 207 | request->sclp_req.status = SCLP_REQ_FAILED; |
208 | return -EIO; | 208 | return -EIO; |
209 | } | 209 | } |
210 | request->sclp_req.command = SCLP_CMDW_WRITEDATA; | 210 | request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA; |
211 | request->sclp_req.status = SCLP_REQ_FILLED; | 211 | request->sclp_req.status = SCLP_REQ_FILLED; |
212 | request->sclp_req.callback = sclp_vt220_callback; | 212 | request->sclp_req.callback = sclp_vt220_callback; |
213 | request->sclp_req.callback_data = (void *) request; | 213 | request->sclp_req.callback_data = (void *) request; |
@@ -669,7 +669,7 @@ static const struct tty_operations sclp_vt220_ops = { | |||
669 | /* | 669 | /* |
670 | * Register driver with SCLP and Linux and initialize internal tty structures. | 670 | * Register driver with SCLP and Linux and initialize internal tty structures. |
671 | */ | 671 | */ |
672 | int __init | 672 | static int __init |
673 | sclp_vt220_tty_init(void) | 673 | sclp_vt220_tty_init(void) |
674 | { | 674 | { |
675 | struct tty_driver *driver; | 675 | struct tty_driver *driver; |
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index c9f1c4c8bb13..bb4ff537729d 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * tape device driver for 3480/3490E/3590 tapes. | 3 | * tape device driver for 3480/3490E/3590 tapes. |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2001,2006 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -99,7 +99,11 @@ enum tape_op { | |||
99 | TO_DIS, /* Tape display */ | 99 | TO_DIS, /* Tape display */ |
100 | TO_ASSIGN, /* Assign tape to channel path */ | 100 | TO_ASSIGN, /* Assign tape to channel path */ |
101 | TO_UNASSIGN, /* Unassign tape from channel path */ | 101 | TO_UNASSIGN, /* Unassign tape from channel path */ |
102 | TO_SIZE /* #entries in tape_op_t */ | 102 | TO_CRYPT_ON, /* Enable encrpytion */ |
103 | TO_CRYPT_OFF, /* Disable encrpytion */ | ||
104 | TO_KEKL_SET, /* Set KEK label */ | ||
105 | TO_KEKL_QUERY, /* Query KEK label */ | ||
106 | TO_SIZE, /* #entries in tape_op_t */ | ||
103 | }; | 107 | }; |
104 | 108 | ||
105 | /* Forward declaration */ | 109 | /* Forward declaration */ |
@@ -112,6 +116,7 @@ enum tape_request_status { | |||
112 | TAPE_REQUEST_IN_IO, /* request is currently in IO */ | 116 | TAPE_REQUEST_IN_IO, /* request is currently in IO */ |
113 | TAPE_REQUEST_DONE, /* request is completed. */ | 117 | TAPE_REQUEST_DONE, /* request is completed. */ |
114 | TAPE_REQUEST_CANCEL, /* request should be canceled. */ | 118 | TAPE_REQUEST_CANCEL, /* request should be canceled. */ |
119 | TAPE_REQUEST_LONG_BUSY, /* request has to be restarted after long busy */ | ||
115 | }; | 120 | }; |
116 | 121 | ||
117 | /* Tape CCW request */ | 122 | /* Tape CCW request */ |
@@ -164,10 +169,11 @@ struct tape_discipline { | |||
164 | * The discipline irq function either returns an error code (<0) which | 169 | * The discipline irq function either returns an error code (<0) which |
165 | * means that the request has failed with an error or one of the following: | 170 | * means that the request has failed with an error or one of the following: |
166 | */ | 171 | */ |
167 | #define TAPE_IO_SUCCESS 0 /* request successful */ | 172 | #define TAPE_IO_SUCCESS 0 /* request successful */ |
168 | #define TAPE_IO_PENDING 1 /* request still running */ | 173 | #define TAPE_IO_PENDING 1 /* request still running */ |
169 | #define TAPE_IO_RETRY 2 /* retry to current request */ | 174 | #define TAPE_IO_RETRY 2 /* retry to current request */ |
170 | #define TAPE_IO_STOP 3 /* stop the running request */ | 175 | #define TAPE_IO_STOP 3 /* stop the running request */ |
176 | #define TAPE_IO_LONG_BUSY 4 /* delay the running request */ | ||
171 | 177 | ||
172 | /* Char Frontend Data */ | 178 | /* Char Frontend Data */ |
173 | struct tape_char_data { | 179 | struct tape_char_data { |
@@ -242,6 +248,10 @@ struct tape_device { | |||
242 | 248 | ||
243 | /* Function to start or stop the next request later. */ | 249 | /* Function to start or stop the next request later. */ |
244 | struct delayed_work tape_dnr; | 250 | struct delayed_work tape_dnr; |
251 | |||
252 | /* Timer for long busy */ | ||
253 | struct timer_list lb_timeout; | ||
254 | |||
245 | }; | 255 | }; |
246 | 256 | ||
247 | /* Externals from tape_core.c */ | 257 | /* Externals from tape_core.c */ |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 9df912f63188..50f5edab83d7 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/char/tape_3590.c | 2 | * drivers/s390/char/tape_3590.c |
3 | * tape device discipline for 3590 tapes. | 3 | * tape device discipline for 3590 tapes. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2001,2006 | 5 | * Copyright IBM Corp. 2001,2006 |
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | 6 | * Author(s): Stefan Bader <shbader@de.ibm.com> |
7 | * Michael Holzheu <holzheu@de.ibm.com> | 7 | * Michael Holzheu <holzheu@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/bio.h> | 13 | #include <linux/bio.h> |
14 | #include <asm/ebcdic.h> | ||
14 | 15 | ||
15 | #define TAPE_DBF_AREA tape_3590_dbf | 16 | #define TAPE_DBF_AREA tape_3590_dbf |
16 | 17 | ||
@@ -30,7 +31,7 @@ EXPORT_SYMBOL(TAPE_DBF_AREA); | |||
30 | * - Read Device (buffered) log: BRA | 31 | * - Read Device (buffered) log: BRA |
31 | * - Read Library log: BRA | 32 | * - Read Library log: BRA |
32 | * - Swap Devices: BRA | 33 | * - Swap Devices: BRA |
33 | * - Long Busy: BRA | 34 | * - Long Busy: implemented |
34 | * - Special Intercept: BRA | 35 | * - Special Intercept: BRA |
35 | * - Read Alternate: implemented | 36 | * - Read Alternate: implemented |
36 | *******************************************************************/ | 37 | *******************************************************************/ |
@@ -94,6 +95,332 @@ static const char *tape_3590_msg[TAPE_3590_MAX_MSG] = { | |||
94 | [0xae] = "Subsystem environmental alert", | 95 | [0xae] = "Subsystem environmental alert", |
95 | }; | 96 | }; |
96 | 97 | ||
98 | static int crypt_supported(struct tape_device *device) | ||
99 | { | ||
100 | return TAPE390_CRYPT_SUPPORTED(TAPE_3590_CRYPT_INFO(device)); | ||
101 | } | ||
102 | |||
103 | static int crypt_enabled(struct tape_device *device) | ||
104 | { | ||
105 | return TAPE390_CRYPT_ON(TAPE_3590_CRYPT_INFO(device)); | ||
106 | } | ||
107 | |||
108 | static void ext_to_int_kekl(struct tape390_kekl *in, | ||
109 | struct tape3592_kekl *out) | ||
110 | { | ||
111 | int i; | ||
112 | |||
113 | memset(out, 0, sizeof(*out)); | ||
114 | if (in->type == TAPE390_KEKL_TYPE_HASH) | ||
115 | out->flags |= 0x40; | ||
116 | if (in->type_on_tape == TAPE390_KEKL_TYPE_HASH) | ||
117 | out->flags |= 0x80; | ||
118 | strncpy(out->label, in->label, 64); | ||
119 | for (i = strlen(in->label); i < sizeof(out->label); i++) | ||
120 | out->label[i] = ' '; | ||
121 | ASCEBC(out->label, sizeof(out->label)); | ||
122 | } | ||
123 | |||
124 | static void int_to_ext_kekl(struct tape3592_kekl *in, | ||
125 | struct tape390_kekl *out) | ||
126 | { | ||
127 | memset(out, 0, sizeof(*out)); | ||
128 | if(in->flags & 0x40) | ||
129 | out->type = TAPE390_KEKL_TYPE_HASH; | ||
130 | else | ||
131 | out->type = TAPE390_KEKL_TYPE_LABEL; | ||
132 | if(in->flags & 0x80) | ||
133 | out->type_on_tape = TAPE390_KEKL_TYPE_HASH; | ||
134 | else | ||
135 | out->type_on_tape = TAPE390_KEKL_TYPE_LABEL; | ||
136 | memcpy(out->label, in->label, sizeof(in->label)); | ||
137 | EBCASC(out->label, sizeof(in->label)); | ||
138 | strstrip(out->label); | ||
139 | } | ||
140 | |||
141 | static void int_to_ext_kekl_pair(struct tape3592_kekl_pair *in, | ||
142 | struct tape390_kekl_pair *out) | ||
143 | { | ||
144 | if (in->count == 0) { | ||
145 | out->kekl[0].type = TAPE390_KEKL_TYPE_NONE; | ||
146 | out->kekl[0].type_on_tape = TAPE390_KEKL_TYPE_NONE; | ||
147 | out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; | ||
148 | out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; | ||
149 | } else if (in->count == 1) { | ||
150 | int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); | ||
151 | out->kekl[1].type = TAPE390_KEKL_TYPE_NONE; | ||
152 | out->kekl[1].type_on_tape = TAPE390_KEKL_TYPE_NONE; | ||
153 | } else if (in->count == 2) { | ||
154 | int_to_ext_kekl(&in->kekl[0], &out->kekl[0]); | ||
155 | int_to_ext_kekl(&in->kekl[1], &out->kekl[1]); | ||
156 | } else { | ||
157 | printk("Invalid KEKL number: %d\n", in->count); | ||
158 | BUG(); | ||
159 | } | ||
160 | } | ||
161 | |||
162 | static int check_ext_kekl(struct tape390_kekl *kekl) | ||
163 | { | ||
164 | if (kekl->type == TAPE390_KEKL_TYPE_NONE) | ||
165 | goto invalid; | ||
166 | if (kekl->type > TAPE390_KEKL_TYPE_HASH) | ||
167 | goto invalid; | ||
168 | if (kekl->type_on_tape == TAPE390_KEKL_TYPE_NONE) | ||
169 | goto invalid; | ||
170 | if (kekl->type_on_tape > TAPE390_KEKL_TYPE_HASH) | ||
171 | goto invalid; | ||
172 | if ((kekl->type == TAPE390_KEKL_TYPE_HASH) && | ||
173 | (kekl->type_on_tape == TAPE390_KEKL_TYPE_LABEL)) | ||
174 | goto invalid; | ||
175 | |||
176 | return 0; | ||
177 | invalid: | ||
178 | return -EINVAL; | ||
179 | } | ||
180 | |||
181 | static int check_ext_kekl_pair(struct tape390_kekl_pair *kekls) | ||
182 | { | ||
183 | if (check_ext_kekl(&kekls->kekl[0])) | ||
184 | goto invalid; | ||
185 | if (check_ext_kekl(&kekls->kekl[1])) | ||
186 | goto invalid; | ||
187 | |||
188 | return 0; | ||
189 | invalid: | ||
190 | return -EINVAL; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * Query KEKLs | ||
195 | */ | ||
196 | static int tape_3592_kekl_query(struct tape_device *device, | ||
197 | struct tape390_kekl_pair *ext_kekls) | ||
198 | { | ||
199 | struct tape_request *request; | ||
200 | struct tape3592_kekl_query_order *order; | ||
201 | struct tape3592_kekl_query_data *int_kekls; | ||
202 | int rc; | ||
203 | |||
204 | DBF_EVENT(6, "tape3592_kekl_query\n"); | ||
205 | int_kekls = kmalloc(sizeof(*int_kekls), GFP_KERNEL|GFP_DMA); | ||
206 | if (!int_kekls) | ||
207 | return -ENOMEM; | ||
208 | request = tape_alloc_request(2, sizeof(*order)); | ||
209 | if (IS_ERR(request)) { | ||
210 | rc = PTR_ERR(request); | ||
211 | goto fail_malloc; | ||
212 | } | ||
213 | order = request->cpdata; | ||
214 | memset(order,0,sizeof(*order)); | ||
215 | order->code = 0xe2; | ||
216 | order->max_count = 2; | ||
217 | request->op = TO_KEKL_QUERY; | ||
218 | tape_ccw_cc(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); | ||
219 | tape_ccw_end(request->cpaddr + 1, READ_SS_DATA, sizeof(*int_kekls), | ||
220 | int_kekls); | ||
221 | rc = tape_do_io(device, request); | ||
222 | if (rc) | ||
223 | goto fail_request; | ||
224 | int_to_ext_kekl_pair(&int_kekls->kekls, ext_kekls); | ||
225 | |||
226 | rc = 0; | ||
227 | fail_request: | ||
228 | tape_free_request(request); | ||
229 | fail_malloc: | ||
230 | kfree(int_kekls); | ||
231 | return rc; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * IOCTL: Query KEKLs | ||
236 | */ | ||
237 | static int tape_3592_ioctl_kekl_query(struct tape_device *device, | ||
238 | unsigned long arg) | ||
239 | { | ||
240 | int rc; | ||
241 | struct tape390_kekl_pair *ext_kekls; | ||
242 | |||
243 | DBF_EVENT(6, "tape_3592_ioctl_kekl_query\n"); | ||
244 | if (!crypt_supported(device)) | ||
245 | return -ENOSYS; | ||
246 | if (!crypt_enabled(device)) | ||
247 | return -EUNATCH; | ||
248 | ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); | ||
249 | if (!ext_kekls) | ||
250 | return -ENOMEM; | ||
251 | rc = tape_3592_kekl_query(device, ext_kekls); | ||
252 | if (rc != 0) | ||
253 | goto fail; | ||
254 | if (copy_to_user((char __user *) arg, ext_kekls, sizeof(*ext_kekls))) { | ||
255 | rc = -EFAULT; | ||
256 | goto fail; | ||
257 | } | ||
258 | rc = 0; | ||
259 | fail: | ||
260 | kfree(ext_kekls); | ||
261 | return rc; | ||
262 | } | ||
263 | |||
264 | static int tape_3590_mttell(struct tape_device *device, int mt_count); | ||
265 | |||
266 | /* | ||
267 | * Set KEKLs | ||
268 | */ | ||
269 | static int tape_3592_kekl_set(struct tape_device *device, | ||
270 | struct tape390_kekl_pair *ext_kekls) | ||
271 | { | ||
272 | struct tape_request *request; | ||
273 | struct tape3592_kekl_set_order *order; | ||
274 | |||
275 | DBF_EVENT(6, "tape3592_kekl_set\n"); | ||
276 | if (check_ext_kekl_pair(ext_kekls)) { | ||
277 | DBF_EVENT(6, "invalid kekls\n"); | ||
278 | return -EINVAL; | ||
279 | } | ||
280 | if (tape_3590_mttell(device, 0) != 0) | ||
281 | return -EBADSLT; | ||
282 | request = tape_alloc_request(1, sizeof(*order)); | ||
283 | if (IS_ERR(request)) | ||
284 | return PTR_ERR(request); | ||
285 | order = request->cpdata; | ||
286 | memset(order, 0, sizeof(*order)); | ||
287 | order->code = 0xe3; | ||
288 | order->kekls.count = 2; | ||
289 | ext_to_int_kekl(&ext_kekls->kekl[0], &order->kekls.kekl[0]); | ||
290 | ext_to_int_kekl(&ext_kekls->kekl[1], &order->kekls.kekl[1]); | ||
291 | request->op = TO_KEKL_SET; | ||
292 | tape_ccw_end(request->cpaddr, PERF_SUBSYS_FUNC, sizeof(*order), order); | ||
293 | |||
294 | return tape_do_io_free(device, request); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * IOCTL: Set KEKLs | ||
299 | */ | ||
300 | static int tape_3592_ioctl_kekl_set(struct tape_device *device, | ||
301 | unsigned long arg) | ||
302 | { | ||
303 | int rc; | ||
304 | struct tape390_kekl_pair *ext_kekls; | ||
305 | |||
306 | DBF_EVENT(6, "tape_3592_ioctl_kekl_set\n"); | ||
307 | if (!crypt_supported(device)) | ||
308 | return -ENOSYS; | ||
309 | if (!crypt_enabled(device)) | ||
310 | return -EUNATCH; | ||
311 | ext_kekls = kmalloc(sizeof(*ext_kekls), GFP_KERNEL); | ||
312 | if (!ext_kekls) | ||
313 | return -ENOMEM; | ||
314 | if (copy_from_user(ext_kekls, (char __user *)arg, sizeof(*ext_kekls))) { | ||
315 | rc = -EFAULT; | ||
316 | goto out; | ||
317 | } | ||
318 | rc = tape_3592_kekl_set(device, ext_kekls); | ||
319 | out: | ||
320 | kfree(ext_kekls); | ||
321 | return rc; | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * Enable encryption | ||
326 | */ | ||
327 | static int tape_3592_enable_crypt(struct tape_device *device) | ||
328 | { | ||
329 | struct tape_request *request; | ||
330 | char *data; | ||
331 | |||
332 | DBF_EVENT(6, "tape_3592_enable_crypt\n"); | ||
333 | if (!crypt_supported(device)) | ||
334 | return -ENOSYS; | ||
335 | request = tape_alloc_request(2, 72); | ||
336 | if (IS_ERR(request)) | ||
337 | return PTR_ERR(request); | ||
338 | data = request->cpdata; | ||
339 | memset(data,0,72); | ||
340 | |||
341 | data[0] = 0x05; | ||
342 | data[36 + 0] = 0x03; | ||
343 | data[36 + 1] = 0x03; | ||
344 | data[36 + 4] = 0x40; | ||
345 | data[36 + 6] = 0x01; | ||
346 | data[36 + 14] = 0x2f; | ||
347 | data[36 + 18] = 0xc3; | ||
348 | data[36 + 35] = 0x72; | ||
349 | request->op = TO_CRYPT_ON; | ||
350 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | ||
351 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | ||
352 | return tape_do_io_free(device, request); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Disable encryption | ||
357 | */ | ||
358 | static int tape_3592_disable_crypt(struct tape_device *device) | ||
359 | { | ||
360 | struct tape_request *request; | ||
361 | char *data; | ||
362 | |||
363 | DBF_EVENT(6, "tape_3592_disable_crypt\n"); | ||
364 | if (!crypt_supported(device)) | ||
365 | return -ENOSYS; | ||
366 | request = tape_alloc_request(2, 72); | ||
367 | if (IS_ERR(request)) | ||
368 | return PTR_ERR(request); | ||
369 | data = request->cpdata; | ||
370 | memset(data,0,72); | ||
371 | |||
372 | data[0] = 0x05; | ||
373 | data[36 + 0] = 0x03; | ||
374 | data[36 + 1] = 0x03; | ||
375 | data[36 + 35] = 0x32; | ||
376 | |||
377 | request->op = TO_CRYPT_OFF; | ||
378 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | ||
379 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | ||
380 | |||
381 | return tape_do_io_free(device, request); | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * IOCTL: Set encryption status | ||
386 | */ | ||
387 | static int tape_3592_ioctl_crypt_set(struct tape_device *device, | ||
388 | unsigned long arg) | ||
389 | { | ||
390 | struct tape390_crypt_info info; | ||
391 | |||
392 | DBF_EVENT(6, "tape_3592_ioctl_crypt_set\n"); | ||
393 | if (!crypt_supported(device)) | ||
394 | return -ENOSYS; | ||
395 | if (copy_from_user(&info, (char __user *)arg, sizeof(info))) | ||
396 | return -EFAULT; | ||
397 | if (info.status & ~TAPE390_CRYPT_ON_MASK) | ||
398 | return -EINVAL; | ||
399 | if (info.status & TAPE390_CRYPT_ON_MASK) | ||
400 | return tape_3592_enable_crypt(device); | ||
401 | else | ||
402 | return tape_3592_disable_crypt(device); | ||
403 | } | ||
404 | |||
405 | static int tape_3590_sense_medium(struct tape_device *device); | ||
406 | |||
407 | /* | ||
408 | * IOCTL: Query enryption status | ||
409 | */ | ||
410 | static int tape_3592_ioctl_crypt_query(struct tape_device *device, | ||
411 | unsigned long arg) | ||
412 | { | ||
413 | DBF_EVENT(6, "tape_3592_ioctl_crypt_query\n"); | ||
414 | if (!crypt_supported(device)) | ||
415 | return -ENOSYS; | ||
416 | tape_3590_sense_medium(device); | ||
417 | if (copy_to_user((char __user *) arg, &TAPE_3590_CRYPT_INFO(device), | ||
418 | sizeof(TAPE_3590_CRYPT_INFO(device)))) | ||
419 | return -EFAULT; | ||
420 | else | ||
421 | return 0; | ||
422 | } | ||
423 | |||
97 | /* | 424 | /* |
98 | * 3590 IOCTL Overload | 425 | * 3590 IOCTL Overload |
99 | */ | 426 | */ |
@@ -109,6 +436,14 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) | |||
109 | 436 | ||
110 | return tape_std_display(device, &disp); | 437 | return tape_std_display(device, &disp); |
111 | } | 438 | } |
439 | case TAPE390_KEKL_SET: | ||
440 | return tape_3592_ioctl_kekl_set(device, arg); | ||
441 | case TAPE390_KEKL_QUERY: | ||
442 | return tape_3592_ioctl_kekl_query(device, arg); | ||
443 | case TAPE390_CRYPT_SET: | ||
444 | return tape_3592_ioctl_crypt_set(device, arg); | ||
445 | case TAPE390_CRYPT_QUERY: | ||
446 | return tape_3592_ioctl_crypt_query(device, arg); | ||
112 | default: | 447 | default: |
113 | return -EINVAL; /* no additional ioctls */ | 448 | return -EINVAL; /* no additional ioctls */ |
114 | } | 449 | } |
@@ -248,6 +583,12 @@ tape_3590_work_handler(struct work_struct *work) | |||
248 | case TO_READ_ATTMSG: | 583 | case TO_READ_ATTMSG: |
249 | tape_3590_read_attmsg(p->device); | 584 | tape_3590_read_attmsg(p->device); |
250 | break; | 585 | break; |
586 | case TO_CRYPT_ON: | ||
587 | tape_3592_enable_crypt(p->device); | ||
588 | break; | ||
589 | case TO_CRYPT_OFF: | ||
590 | tape_3592_disable_crypt(p->device); | ||
591 | break; | ||
251 | default: | 592 | default: |
252 | DBF_EVENT(3, "T3590: work handler undefined for " | 593 | DBF_EVENT(3, "T3590: work handler undefined for " |
253 | "operation 0x%02x\n", p->op); | 594 | "operation 0x%02x\n", p->op); |
@@ -365,6 +706,33 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) | |||
365 | } | 706 | } |
366 | #endif | 707 | #endif |
367 | 708 | ||
709 | static void tape_3590_med_state_set(struct tape_device *device, | ||
710 | struct tape_3590_med_sense *sense) | ||
711 | { | ||
712 | struct tape390_crypt_info *c_info; | ||
713 | |||
714 | c_info = &TAPE_3590_CRYPT_INFO(device); | ||
715 | |||
716 | if (sense->masst == MSENSE_UNASSOCIATED) { | ||
717 | tape_med_state_set(device, MS_UNLOADED); | ||
718 | TAPE_3590_CRYPT_INFO(device).medium_status = 0; | ||
719 | return; | ||
720 | } | ||
721 | if (sense->masst != MSENSE_ASSOCIATED_MOUNT) { | ||
722 | PRINT_ERR("Unknown medium state: %x\n", sense->masst); | ||
723 | return; | ||
724 | } | ||
725 | tape_med_state_set(device, MS_LOADED); | ||
726 | c_info->medium_status |= TAPE390_MEDIUM_LOADED_MASK; | ||
727 | if (sense->flags & MSENSE_CRYPT_MASK) { | ||
728 | PRINT_INFO("Medium is encrypted (%04x)\n", sense->flags); | ||
729 | c_info->medium_status |= TAPE390_MEDIUM_ENCRYPTED_MASK; | ||
730 | } else { | ||
731 | DBF_EVENT(6, "Medium is not encrypted %04x\n", sense->flags); | ||
732 | c_info->medium_status &= ~TAPE390_MEDIUM_ENCRYPTED_MASK; | ||
733 | } | ||
734 | } | ||
735 | |||
368 | /* | 736 | /* |
369 | * The done handler is called at device/channel end and wakes up the sleeping | 737 | * The done handler is called at device/channel end and wakes up the sleeping |
370 | * process | 738 | * process |
@@ -372,9 +740,10 @@ tape_3590_check_locate(struct tape_device *device, struct tape_request *request) | |||
372 | static int | 740 | static int |
373 | tape_3590_done(struct tape_device *device, struct tape_request *request) | 741 | tape_3590_done(struct tape_device *device, struct tape_request *request) |
374 | { | 742 | { |
375 | struct tape_3590_med_sense *sense; | 743 | struct tape_3590_disc_data *disc_data; |
376 | 744 | ||
377 | DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); | 745 | DBF_EVENT(6, "%s done\n", tape_op_verbose[request->op]); |
746 | disc_data = device->discdata; | ||
378 | 747 | ||
379 | switch (request->op) { | 748 | switch (request->op) { |
380 | case TO_BSB: | 749 | case TO_BSB: |
@@ -394,13 +763,20 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) | |||
394 | break; | 763 | break; |
395 | case TO_RUN: | 764 | case TO_RUN: |
396 | tape_med_state_set(device, MS_UNLOADED); | 765 | tape_med_state_set(device, MS_UNLOADED); |
766 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
397 | break; | 767 | break; |
398 | case TO_MSEN: | 768 | case TO_MSEN: |
399 | sense = (struct tape_3590_med_sense *) request->cpdata; | 769 | tape_3590_med_state_set(device, request->cpdata); |
400 | if (sense->masst == MSENSE_UNASSOCIATED) | 770 | break; |
401 | tape_med_state_set(device, MS_UNLOADED); | 771 | case TO_CRYPT_ON: |
402 | if (sense->masst == MSENSE_ASSOCIATED_MOUNT) | 772 | TAPE_3590_CRYPT_INFO(device).status |
403 | tape_med_state_set(device, MS_LOADED); | 773 | |= TAPE390_CRYPT_ON_MASK; |
774 | *(device->modeset_byte) |= 0x03; | ||
775 | break; | ||
776 | case TO_CRYPT_OFF: | ||
777 | TAPE_3590_CRYPT_INFO(device).status | ||
778 | &= ~TAPE390_CRYPT_ON_MASK; | ||
779 | *(device->modeset_byte) &= ~0x03; | ||
404 | break; | 780 | break; |
405 | case TO_RBI: /* RBI seems to succeed even without medium loaded. */ | 781 | case TO_RBI: /* RBI seems to succeed even without medium loaded. */ |
406 | case TO_NOP: /* Same to NOP. */ | 782 | case TO_NOP: /* Same to NOP. */ |
@@ -409,8 +785,9 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) | |||
409 | case TO_DIS: | 785 | case TO_DIS: |
410 | case TO_ASSIGN: | 786 | case TO_ASSIGN: |
411 | case TO_UNASSIGN: | 787 | case TO_UNASSIGN: |
412 | break; | ||
413 | case TO_SIZE: | 788 | case TO_SIZE: |
789 | case TO_KEKL_SET: | ||
790 | case TO_KEKL_QUERY: | ||
414 | break; | 791 | break; |
415 | } | 792 | } |
416 | return TAPE_IO_SUCCESS; | 793 | return TAPE_IO_SUCCESS; |
@@ -540,10 +917,8 @@ static int | |||
540 | tape_3590_erp_long_busy(struct tape_device *device, | 917 | tape_3590_erp_long_busy(struct tape_device *device, |
541 | struct tape_request *request, struct irb *irb) | 918 | struct tape_request *request, struct irb *irb) |
542 | { | 919 | { |
543 | /* FIXME: how about WAITING for a minute ? */ | 920 | DBF_EVENT(6, "Device is busy\n"); |
544 | PRINT_WARN("(%s): Device is busy! Please wait a minute!\n", | 921 | return TAPE_IO_LONG_BUSY; |
545 | device->cdev->dev.bus_id); | ||
546 | return tape_3590_erp_basic(device, request, irb, -EBUSY); | ||
547 | } | 922 | } |
548 | 923 | ||
549 | /* | 924 | /* |
@@ -951,6 +1326,34 @@ tape_3590_print_era_msg(struct tape_device *device, struct irb *irb) | |||
951 | device->cdev->dev.bus_id, sense->mc); | 1326 | device->cdev->dev.bus_id, sense->mc); |
952 | } | 1327 | } |
953 | 1328 | ||
1329 | static int tape_3590_crypt_error(struct tape_device *device, | ||
1330 | struct tape_request *request, struct irb *irb) | ||
1331 | { | ||
1332 | u8 cu_rc, ekm_rc1; | ||
1333 | u16 ekm_rc2; | ||
1334 | u32 drv_rc; | ||
1335 | char *bus_id, *sense; | ||
1336 | |||
1337 | sense = ((struct tape_3590_sense *) irb->ecw)->fmt.data; | ||
1338 | bus_id = device->cdev->dev.bus_id; | ||
1339 | cu_rc = sense[0]; | ||
1340 | drv_rc = *((u32*) &sense[5]) & 0xffffff; | ||
1341 | ekm_rc1 = sense[9]; | ||
1342 | ekm_rc2 = *((u16*) &sense[10]); | ||
1343 | if ((cu_rc == 0) && (ekm_rc2 == 0xee31)) | ||
1344 | /* key not defined on EKM */ | ||
1345 | return tape_3590_erp_basic(device, request, irb, -EKEYREJECTED); | ||
1346 | if ((cu_rc == 1) || (cu_rc == 2)) | ||
1347 | /* No connection to EKM */ | ||
1348 | return tape_3590_erp_basic(device, request, irb, -ENOTCONN); | ||
1349 | |||
1350 | PRINT_ERR("(%s): Unable to get encryption key from EKM\n", bus_id); | ||
1351 | PRINT_ERR("(%s): CU=%02X DRIVE=%06X EKM=%02X:%04X\n", bus_id, cu_rc, | ||
1352 | drv_rc, ekm_rc1, ekm_rc2); | ||
1353 | |||
1354 | return tape_3590_erp_basic(device, request, irb, -ENOKEY); | ||
1355 | } | ||
1356 | |||
954 | /* | 1357 | /* |
955 | * 3590 error Recovery routine: | 1358 | * 3590 error Recovery routine: |
956 | * If possible, it tries to recover from the error. If this is not possible, | 1359 | * If possible, it tries to recover from the error. If this is not possible, |
@@ -979,6 +1382,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
979 | 1382 | ||
980 | sense = (struct tape_3590_sense *) irb->ecw; | 1383 | sense = (struct tape_3590_sense *) irb->ecw; |
981 | 1384 | ||
1385 | DBF_EVENT(6, "Unit Check: RQC = %x\n", sense->rc_rqc); | ||
1386 | |||
982 | /* | 1387 | /* |
983 | * First check all RC-QRCs where we want to do something special | 1388 | * First check all RC-QRCs where we want to do something special |
984 | * - "break": basic error recovery is done | 1389 | * - "break": basic error recovery is done |
@@ -999,6 +1404,8 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
999 | case 0x2231: | 1404 | case 0x2231: |
1000 | tape_3590_print_era_msg(device, irb); | 1405 | tape_3590_print_era_msg(device, irb); |
1001 | return tape_3590_erp_special_interrupt(device, request, irb); | 1406 | return tape_3590_erp_special_interrupt(device, request, irb); |
1407 | case 0x2240: | ||
1408 | return tape_3590_crypt_error(device, request, irb); | ||
1002 | 1409 | ||
1003 | case 0x3010: | 1410 | case 0x3010: |
1004 | DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", | 1411 | DBF_EVENT(2, "(%08x): Backward at Beginning of Partition\n", |
@@ -1020,6 +1427,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1020 | DBF_EVENT(2, "(%08x): Rewind Unload complete\n", | 1427 | DBF_EVENT(2, "(%08x): Rewind Unload complete\n", |
1021 | device->cdev_id); | 1428 | device->cdev_id); |
1022 | tape_med_state_set(device, MS_UNLOADED); | 1429 | tape_med_state_set(device, MS_UNLOADED); |
1430 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
1023 | return tape_3590_erp_basic(device, request, irb, 0); | 1431 | return tape_3590_erp_basic(device, request, irb, 0); |
1024 | 1432 | ||
1025 | case 0x4010: | 1433 | case 0x4010: |
@@ -1030,9 +1438,15 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1030 | PRINT_WARN("(%s): Tape operation when medium not loaded\n", | 1438 | PRINT_WARN("(%s): Tape operation when medium not loaded\n", |
1031 | device->cdev->dev.bus_id); | 1439 | device->cdev->dev.bus_id); |
1032 | tape_med_state_set(device, MS_UNLOADED); | 1440 | tape_med_state_set(device, MS_UNLOADED); |
1441 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
1033 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); | 1442 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); |
1034 | case 0x4012: /* Device Long Busy */ | 1443 | case 0x4012: /* Device Long Busy */ |
1444 | /* XXX: Also use long busy handling here? */ | ||
1445 | DBF_EVENT(6, "(%08x): LONG BUSY\n", device->cdev_id); | ||
1035 | tape_3590_print_era_msg(device, irb); | 1446 | tape_3590_print_era_msg(device, irb); |
1447 | return tape_3590_erp_basic(device, request, irb, -EBUSY); | ||
1448 | case 0x4014: | ||
1449 | DBF_EVENT(6, "(%08x): Crypto LONG BUSY\n", device->cdev_id); | ||
1036 | return tape_3590_erp_long_busy(device, request, irb); | 1450 | return tape_3590_erp_long_busy(device, request, irb); |
1037 | 1451 | ||
1038 | case 0x5010: | 1452 | case 0x5010: |
@@ -1064,6 +1478,7 @@ tape_3590_unit_check(struct tape_device *device, struct tape_request *request, | |||
1064 | case 0x5120: | 1478 | case 0x5120: |
1065 | case 0x1120: | 1479 | case 0x1120: |
1066 | tape_med_state_set(device, MS_UNLOADED); | 1480 | tape_med_state_set(device, MS_UNLOADED); |
1481 | tape_3590_schedule_work(device, TO_CRYPT_OFF); | ||
1067 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); | 1482 | return tape_3590_erp_basic(device, request, irb, -ENOMEDIUM); |
1068 | 1483 | ||
1069 | case 0x6020: | 1484 | case 0x6020: |
@@ -1142,21 +1557,47 @@ tape_3590_setup_device(struct tape_device *device) | |||
1142 | { | 1557 | { |
1143 | int rc; | 1558 | int rc; |
1144 | struct tape_3590_disc_data *data; | 1559 | struct tape_3590_disc_data *data; |
1560 | char *rdc_data; | ||
1145 | 1561 | ||
1146 | DBF_EVENT(6, "3590 device setup\n"); | 1562 | DBF_EVENT(6, "3590 device setup\n"); |
1147 | data = kmalloc(sizeof(struct tape_3590_disc_data), | 1563 | data = kzalloc(sizeof(struct tape_3590_disc_data), GFP_KERNEL | GFP_DMA); |
1148 | GFP_KERNEL | GFP_DMA); | ||
1149 | if (data == NULL) | 1564 | if (data == NULL) |
1150 | return -ENOMEM; | 1565 | return -ENOMEM; |
1151 | data->read_back_op = READ_PREVIOUS; | 1566 | data->read_back_op = READ_PREVIOUS; |
1152 | device->discdata = data; | 1567 | device->discdata = data; |
1153 | 1568 | ||
1154 | if ((rc = tape_std_assign(device)) == 0) { | 1569 | rdc_data = kmalloc(64, GFP_KERNEL | GFP_DMA); |
1155 | /* Try to find out if medium is loaded */ | 1570 | if (!rdc_data) { |
1156 | if ((rc = tape_3590_sense_medium(device)) != 0) | 1571 | rc = -ENOMEM; |
1157 | DBF_LH(3, "3590 medium sense returned %d\n", rc); | 1572 | goto fail_kmalloc; |
1573 | } | ||
1574 | rc = read_dev_chars(device->cdev, (void**)&rdc_data, 64); | ||
1575 | if (rc) { | ||
1576 | DBF_LH(3, "Read device characteristics failed!\n"); | ||
1577 | goto fail_kmalloc; | ||
1578 | } | ||
1579 | rc = tape_std_assign(device); | ||
1580 | if (rc) | ||
1581 | goto fail_rdc_data; | ||
1582 | if (rdc_data[31] == 0x13) { | ||
1583 | PRINT_INFO("Device has crypto support\n"); | ||
1584 | data->crypt_info.capability |= TAPE390_CRYPT_SUPPORTED_MASK; | ||
1585 | tape_3592_disable_crypt(device); | ||
1586 | } else { | ||
1587 | DBF_EVENT(6, "Device has NO crypto support\n"); | ||
1158 | } | 1588 | } |
1589 | /* Try to find out if medium is loaded */ | ||
1590 | rc = tape_3590_sense_medium(device); | ||
1591 | if (rc) { | ||
1592 | DBF_LH(3, "3590 medium sense returned %d\n", rc); | ||
1593 | goto fail_rdc_data; | ||
1594 | } | ||
1595 | return 0; | ||
1159 | 1596 | ||
1597 | fail_rdc_data: | ||
1598 | kfree(rdc_data); | ||
1599 | fail_kmalloc: | ||
1600 | kfree(data); | ||
1160 | return rc; | 1601 | return rc; |
1161 | } | 1602 | } |
1162 | 1603 | ||
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h index cf274b9445a6..aa5138807af1 100644 --- a/drivers/s390/char/tape_3590.h +++ b/drivers/s390/char/tape_3590.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/char/tape_3590.h | 2 | * drivers/s390/char/tape_3590.h |
3 | * tape device discipline for 3590 tapes. | 3 | * tape device discipline for 3590 tapes. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2001,2006 | 5 | * Copyright IBM Corp. 2001,2006 |
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | 6 | * Author(s): Stefan Bader <shbader@de.ibm.com> |
7 | * Michael Holzheu <holzheu@de.ibm.com> | 7 | * Michael Holzheu <holzheu@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -38,16 +38,22 @@ | |||
38 | #define MSENSE_UNASSOCIATED 0x00 | 38 | #define MSENSE_UNASSOCIATED 0x00 |
39 | #define MSENSE_ASSOCIATED_MOUNT 0x01 | 39 | #define MSENSE_ASSOCIATED_MOUNT 0x01 |
40 | #define MSENSE_ASSOCIATED_UMOUNT 0x02 | 40 | #define MSENSE_ASSOCIATED_UMOUNT 0x02 |
41 | #define MSENSE_CRYPT_MASK 0x00000010 | ||
41 | 42 | ||
42 | #define TAPE_3590_MAX_MSG 0xb0 | 43 | #define TAPE_3590_MAX_MSG 0xb0 |
43 | 44 | ||
44 | /* Datatypes */ | 45 | /* Datatypes */ |
45 | 46 | ||
46 | struct tape_3590_disc_data { | 47 | struct tape_3590_disc_data { |
47 | unsigned char modeset_byte; | 48 | struct tape390_crypt_info crypt_info; |
48 | int read_back_op; | 49 | int read_back_op; |
49 | }; | 50 | }; |
50 | 51 | ||
52 | #define TAPE_3590_CRYPT_INFO(device) \ | ||
53 | ((struct tape_3590_disc_data*)(device->discdata))->crypt_info | ||
54 | #define TAPE_3590_READ_BACK_OP(device) \ | ||
55 | ((struct tape_3590_disc_data*)(device->discdata))->read_back_op | ||
56 | |||
51 | struct tape_3590_sense { | 57 | struct tape_3590_sense { |
52 | 58 | ||
53 | unsigned int command_rej:1; | 59 | unsigned int command_rej:1; |
@@ -118,7 +124,48 @@ struct tape_3590_sense { | |||
118 | struct tape_3590_med_sense { | 124 | struct tape_3590_med_sense { |
119 | unsigned int macst:4; | 125 | unsigned int macst:4; |
120 | unsigned int masst:4; | 126 | unsigned int masst:4; |
121 | char pad[127]; | 127 | char pad1[7]; |
128 | unsigned int flags; | ||
129 | char pad2[116]; | ||
130 | } __attribute__ ((packed)); | ||
131 | |||
132 | /* Datastructures for 3592 encryption support */ | ||
133 | |||
134 | struct tape3592_kekl { | ||
135 | __u8 flags; | ||
136 | char label[64]; | ||
137 | } __attribute__ ((packed)); | ||
138 | |||
139 | struct tape3592_kekl_pair { | ||
140 | __u8 count; | ||
141 | struct tape3592_kekl kekl[2]; | ||
142 | } __attribute__ ((packed)); | ||
143 | |||
144 | struct tape3592_kekl_query_data { | ||
145 | __u16 len; | ||
146 | __u8 fmt; | ||
147 | __u8 mc; | ||
148 | __u32 id; | ||
149 | __u8 flags; | ||
150 | struct tape3592_kekl_pair kekls; | ||
151 | char reserved[116]; | ||
152 | } __attribute__ ((packed)); | ||
153 | |||
154 | struct tape3592_kekl_query_order { | ||
155 | __u8 code; | ||
156 | __u8 flags; | ||
157 | char reserved1[2]; | ||
158 | __u8 max_count; | ||
159 | char reserved2[35]; | ||
160 | } __attribute__ ((packed)); | ||
161 | |||
162 | struct tape3592_kekl_set_order { | ||
163 | __u8 code; | ||
164 | __u8 flags; | ||
165 | char reserved1[2]; | ||
166 | __u8 op; | ||
167 | struct tape3592_kekl_pair kekls; | ||
168 | char reserved2[120]; | ||
122 | } __attribute__ ((packed)); | 169 | } __attribute__ ((packed)); |
123 | 170 | ||
124 | #endif /* _TAPE_3590_H */ | 171 | #endif /* _TAPE_3590_H */ |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index c8a89b3b87d4..dd0ecaed592e 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -73,7 +73,7 @@ tapeblock_trigger_requeue(struct tape_device *device) | |||
73 | /* | 73 | /* |
74 | * Post finished request. | 74 | * Post finished request. |
75 | */ | 75 | */ |
76 | static inline void | 76 | static void |
77 | tapeblock_end_request(struct request *req, int uptodate) | 77 | tapeblock_end_request(struct request *req, int uptodate) |
78 | { | 78 | { |
79 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) | 79 | if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) |
@@ -108,7 +108,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data) | |||
108 | /* | 108 | /* |
109 | * Feed the tape device CCW queue with requests supplied in a list. | 109 | * Feed the tape device CCW queue with requests supplied in a list. |
110 | */ | 110 | */ |
111 | static inline int | 111 | static int |
112 | tapeblock_start_request(struct tape_device *device, struct request *req) | 112 | tapeblock_start_request(struct tape_device *device, struct request *req) |
113 | { | 113 | { |
114 | struct tape_request * ccw_req; | 114 | struct tape_request * ccw_req; |
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 31198c8f2718..9faea04e11e9 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * character device frontend for tape device driver | 3 | * character device frontend for tape device driver |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2001,2006 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Michael Holzheu <holzheu@de.ibm.com> | 8 | * Michael Holzheu <holzheu@de.ibm.com> |
9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
@@ -89,22 +89,7 @@ tapechar_cleanup_device(struct tape_device *device) | |||
89 | device->nt = NULL; | 89 | device->nt = NULL; |
90 | } | 90 | } |
91 | 91 | ||
92 | /* | 92 | static int |
93 | * Terminate write command (we write two TMs and skip backward over last) | ||
94 | * This ensures that the tape is always correctly terminated. | ||
95 | * When the user writes afterwards a new file, he will overwrite the | ||
96 | * second TM and therefore one TM will remain to separate the | ||
97 | * two files on the tape... | ||
98 | */ | ||
99 | static inline void | ||
100 | tapechar_terminate_write(struct tape_device *device) | ||
101 | { | ||
102 | if (tape_mtop(device, MTWEOF, 1) == 0 && | ||
103 | tape_mtop(device, MTWEOF, 1) == 0) | ||
104 | tape_mtop(device, MTBSR, 1); | ||
105 | } | ||
106 | |||
107 | static inline int | ||
108 | tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) | 93 | tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) |
109 | { | 94 | { |
110 | struct idal_buffer *new; | 95 | struct idal_buffer *new; |
@@ -137,7 +122,7 @@ tapechar_check_idalbuffer(struct tape_device *device, size_t block_size) | |||
137 | /* | 122 | /* |
138 | * Tape device read function | 123 | * Tape device read function |
139 | */ | 124 | */ |
140 | ssize_t | 125 | static ssize_t |
141 | tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) | 126 | tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) |
142 | { | 127 | { |
143 | struct tape_device *device; | 128 | struct tape_device *device; |
@@ -201,7 +186,7 @@ tapechar_read(struct file *filp, char __user *data, size_t count, loff_t *ppos) | |||
201 | /* | 186 | /* |
202 | * Tape device write function | 187 | * Tape device write function |
203 | */ | 188 | */ |
204 | ssize_t | 189 | static ssize_t |
205 | tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) | 190 | tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t *ppos) |
206 | { | 191 | { |
207 | struct tape_device *device; | 192 | struct tape_device *device; |
@@ -291,7 +276,7 @@ tapechar_write(struct file *filp, const char __user *data, size_t count, loff_t | |||
291 | /* | 276 | /* |
292 | * Character frontend tape device open function. | 277 | * Character frontend tape device open function. |
293 | */ | 278 | */ |
294 | int | 279 | static int |
295 | tapechar_open (struct inode *inode, struct file *filp) | 280 | tapechar_open (struct inode *inode, struct file *filp) |
296 | { | 281 | { |
297 | struct tape_device *device; | 282 | struct tape_device *device; |
@@ -326,7 +311,7 @@ tapechar_open (struct inode *inode, struct file *filp) | |||
326 | * Character frontend tape device release function. | 311 | * Character frontend tape device release function. |
327 | */ | 312 | */ |
328 | 313 | ||
329 | int | 314 | static int |
330 | tapechar_release(struct inode *inode, struct file *filp) | 315 | tapechar_release(struct inode *inode, struct file *filp) |
331 | { | 316 | { |
332 | struct tape_device *device; | 317 | struct tape_device *device; |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index c6c2e918b990..e2a8a1a04bab 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * basic function of the tape device driver | 3 | * basic function of the tape device driver |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2001,2006 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Michael Holzheu <holzheu@de.ibm.com> | 8 | * Michael Holzheu <holzheu@de.ibm.com> |
9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
@@ -26,9 +26,11 @@ | |||
26 | #include "tape_std.h" | 26 | #include "tape_std.h" |
27 | 27 | ||
28 | #define PRINTK_HEADER "TAPE_CORE: " | 28 | #define PRINTK_HEADER "TAPE_CORE: " |
29 | #define LONG_BUSY_TIMEOUT 180 /* seconds */ | ||
29 | 30 | ||
30 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); | 31 | static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); |
31 | static void tape_delayed_next_request(struct work_struct *); | 32 | static void tape_delayed_next_request(struct work_struct *); |
33 | static void tape_long_busy_timeout(unsigned long data); | ||
32 | 34 | ||
33 | /* | 35 | /* |
34 | * One list to contain all tape devices of all disciplines, so | 36 | * One list to contain all tape devices of all disciplines, so |
@@ -69,10 +71,12 @@ const char *tape_op_verbose[TO_SIZE] = | |||
69 | [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", | 71 | [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", |
70 | [TO_READ_ATTMSG] = "RAT", | 72 | [TO_READ_ATTMSG] = "RAT", |
71 | [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", | 73 | [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", |
72 | [TO_UNASSIGN] = "UAS" | 74 | [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", |
75 | [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", | ||
76 | [TO_KEKL_QUERY] = "KLQ", | ||
73 | }; | 77 | }; |
74 | 78 | ||
75 | static inline int | 79 | static int |
76 | busid_to_int(char *bus_id) | 80 | busid_to_int(char *bus_id) |
77 | { | 81 | { |
78 | int dec; | 82 | int dec; |
@@ -252,7 +256,7 @@ tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) | |||
252 | /* | 256 | /* |
253 | * Stop running ccw. Has to be called with the device lock held. | 257 | * Stop running ccw. Has to be called with the device lock held. |
254 | */ | 258 | */ |
255 | static inline int | 259 | static int |
256 | __tape_cancel_io(struct tape_device *device, struct tape_request *request) | 260 | __tape_cancel_io(struct tape_device *device, struct tape_request *request) |
257 | { | 261 | { |
258 | int retries; | 262 | int retries; |
@@ -346,6 +350,9 @@ tape_generic_online(struct tape_device *device, | |||
346 | return -EINVAL; | 350 | return -EINVAL; |
347 | } | 351 | } |
348 | 352 | ||
353 | init_timer(&device->lb_timeout); | ||
354 | device->lb_timeout.function = tape_long_busy_timeout; | ||
355 | |||
349 | /* Let the discipline have a go at the device. */ | 356 | /* Let the discipline have a go at the device. */ |
350 | device->discipline = discipline; | 357 | device->discipline = discipline; |
351 | if (!try_module_get(discipline->owner)) { | 358 | if (!try_module_get(discipline->owner)) { |
@@ -385,7 +392,7 @@ out: | |||
385 | return rc; | 392 | return rc; |
386 | } | 393 | } |
387 | 394 | ||
388 | static inline void | 395 | static void |
389 | tape_cleanup_device(struct tape_device *device) | 396 | tape_cleanup_device(struct tape_device *device) |
390 | { | 397 | { |
391 | tapeblock_cleanup_device(device); | 398 | tapeblock_cleanup_device(device); |
@@ -563,7 +570,7 @@ tape_generic_probe(struct ccw_device *cdev) | |||
563 | return ret; | 570 | return ret; |
564 | } | 571 | } |
565 | 572 | ||
566 | static inline void | 573 | static void |
567 | __tape_discard_requests(struct tape_device *device) | 574 | __tape_discard_requests(struct tape_device *device) |
568 | { | 575 | { |
569 | struct tape_request * request; | 576 | struct tape_request * request; |
@@ -703,7 +710,7 @@ tape_free_request (struct tape_request * request) | |||
703 | kfree(request); | 710 | kfree(request); |
704 | } | 711 | } |
705 | 712 | ||
706 | static inline int | 713 | static int |
707 | __tape_start_io(struct tape_device *device, struct tape_request *request) | 714 | __tape_start_io(struct tape_device *device, struct tape_request *request) |
708 | { | 715 | { |
709 | int rc; | 716 | int rc; |
@@ -733,7 +740,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request) | |||
733 | return rc; | 740 | return rc; |
734 | } | 741 | } |
735 | 742 | ||
736 | static inline void | 743 | static void |
737 | __tape_start_next_request(struct tape_device *device) | 744 | __tape_start_next_request(struct tape_device *device) |
738 | { | 745 | { |
739 | struct list_head *l, *n; | 746 | struct list_head *l, *n; |
@@ -801,7 +808,23 @@ tape_delayed_next_request(struct work_struct *work) | |||
801 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 808 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
802 | } | 809 | } |
803 | 810 | ||
804 | static inline void | 811 | static void tape_long_busy_timeout(unsigned long data) |
812 | { | ||
813 | struct tape_request *request; | ||
814 | struct tape_device *device; | ||
815 | |||
816 | device = (struct tape_device *) data; | ||
817 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
818 | request = list_entry(device->req_queue.next, struct tape_request, list); | ||
819 | if (request->status != TAPE_REQUEST_LONG_BUSY) | ||
820 | BUG(); | ||
821 | DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); | ||
822 | __tape_start_next_request(device); | ||
823 | device->lb_timeout.data = (unsigned long) tape_put_device(device); | ||
824 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
825 | } | ||
826 | |||
827 | static void | ||
805 | __tape_end_request( | 828 | __tape_end_request( |
806 | struct tape_device * device, | 829 | struct tape_device * device, |
807 | struct tape_request * request, | 830 | struct tape_request * request, |
@@ -878,7 +901,7 @@ tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, | |||
878 | * and starts it if the tape is idle. Has to be called with | 901 | * and starts it if the tape is idle. Has to be called with |
879 | * the device lock held. | 902 | * the device lock held. |
880 | */ | 903 | */ |
881 | static inline int | 904 | static int |
882 | __tape_start_request(struct tape_device *device, struct tape_request *request) | 905 | __tape_start_request(struct tape_device *device, struct tape_request *request) |
883 | { | 906 | { |
884 | int rc; | 907 | int rc; |
@@ -1094,7 +1117,22 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1094 | /* May be an unsolicited irq */ | 1117 | /* May be an unsolicited irq */ |
1095 | if(request != NULL) | 1118 | if(request != NULL) |
1096 | request->rescnt = irb->scsw.count; | 1119 | request->rescnt = irb->scsw.count; |
1097 | 1120 | else if ((irb->scsw.dstat == 0x85 || irb->scsw.dstat == 0x80) && | |
1121 | !list_empty(&device->req_queue)) { | ||
1122 | /* Not Ready to Ready after long busy ? */ | ||
1123 | struct tape_request *req; | ||
1124 | req = list_entry(device->req_queue.next, | ||
1125 | struct tape_request, list); | ||
1126 | if (req->status == TAPE_REQUEST_LONG_BUSY) { | ||
1127 | DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); | ||
1128 | if (del_timer(&device->lb_timeout)) { | ||
1129 | device->lb_timeout.data = (unsigned long) | ||
1130 | tape_put_device(device); | ||
1131 | __tape_start_next_request(device); | ||
1132 | } | ||
1133 | return; | ||
1134 | } | ||
1135 | } | ||
1098 | if (irb->scsw.dstat != 0x0c) { | 1136 | if (irb->scsw.dstat != 0x0c) { |
1099 | /* Set the 'ONLINE' flag depending on sense byte 1 */ | 1137 | /* Set the 'ONLINE' flag depending on sense byte 1 */ |
1100 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) | 1138 | if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) |
@@ -1142,6 +1180,15 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1142 | break; | 1180 | break; |
1143 | case TAPE_IO_PENDING: | 1181 | case TAPE_IO_PENDING: |
1144 | break; | 1182 | break; |
1183 | case TAPE_IO_LONG_BUSY: | ||
1184 | device->lb_timeout.data = | ||
1185 | (unsigned long)tape_get_device_reference(device); | ||
1186 | device->lb_timeout.expires = jiffies + | ||
1187 | LONG_BUSY_TIMEOUT * HZ; | ||
1188 | DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); | ||
1189 | add_timer(&device->lb_timeout); | ||
1190 | request->status = TAPE_REQUEST_LONG_BUSY; | ||
1191 | break; | ||
1145 | case TAPE_IO_RETRY: | 1192 | case TAPE_IO_RETRY: |
1146 | rc = __tape_start_io(device, request); | 1193 | rc = __tape_start_io(device, request); |
1147 | if (rc) | 1194 | if (rc) |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 09844621edc0..bc33068b9ce2 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -36,7 +36,7 @@ | |||
36 | struct tty_driver *tty3270_driver; | 36 | struct tty_driver *tty3270_driver; |
37 | static int tty3270_max_index; | 37 | static int tty3270_max_index; |
38 | 38 | ||
39 | struct raw3270_fn tty3270_fn; | 39 | static struct raw3270_fn tty3270_fn; |
40 | 40 | ||
41 | struct tty3270_cell { | 41 | struct tty3270_cell { |
42 | unsigned char character; | 42 | unsigned char character; |
@@ -119,8 +119,7 @@ static void tty3270_update(struct tty3270 *); | |||
119 | /* | 119 | /* |
120 | * Setup timeout for a device. On timeout trigger an update. | 120 | * Setup timeout for a device. On timeout trigger an update. |
121 | */ | 121 | */ |
122 | void | 122 | static void tty3270_set_timer(struct tty3270 *tp, int expires) |
123 | tty3270_set_timer(struct tty3270 *tp, int expires) | ||
124 | { | 123 | { |
125 | if (expires == 0) { | 124 | if (expires == 0) { |
126 | if (timer_pending(&tp->timer) && del_timer(&tp->timer)) | 125 | if (timer_pending(&tp->timer) && del_timer(&tp->timer)) |
@@ -841,7 +840,7 @@ tty3270_del_views(void) | |||
841 | } | 840 | } |
842 | } | 841 | } |
843 | 842 | ||
844 | struct raw3270_fn tty3270_fn = { | 843 | static struct raw3270_fn tty3270_fn = { |
845 | .activate = tty3270_activate, | 844 | .activate = tty3270_activate, |
846 | .deactivate = tty3270_deactivate, | 845 | .deactivate = tty3270_deactivate, |
847 | .intv = (void *) tty3270_irq, | 846 | .intv = (void *) tty3270_irq, |
@@ -1754,8 +1753,7 @@ static const struct tty_operations tty3270_ops = { | |||
1754 | .set_termios = tty3270_set_termios | 1753 | .set_termios = tty3270_set_termios |
1755 | }; | 1754 | }; |
1756 | 1755 | ||
1757 | void | 1756 | static void tty3270_notifier(int index, int active) |
1758 | tty3270_notifier(int index, int active) | ||
1759 | { | 1757 | { |
1760 | if (active) | 1758 | if (active) |
1761 | tty_register_device(tty3270_driver, index, NULL); | 1759 | tty_register_device(tty3270_driver, index, NULL); |
@@ -1767,8 +1765,7 @@ tty3270_notifier(int index, int active) | |||
1767 | * 3270 tty registration code called from tty_init(). | 1765 | * 3270 tty registration code called from tty_init(). |
1768 | * Most kernel services (incl. kmalloc) are available at this poimt. | 1766 | * Most kernel services (incl. kmalloc) are available at this poimt. |
1769 | */ | 1767 | */ |
1770 | int __init | 1768 | static int __init tty3270_init(void) |
1771 | tty3270_init(void) | ||
1772 | { | 1769 | { |
1773 | struct tty_driver *driver; | 1770 | struct tty_driver *driver; |
1774 | int ret; | 1771 | int ret; |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 6cb23040954b..4f894dc2373b 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -128,9 +128,8 @@ static iucv_interrupt_ops_t vmlogrdr_iucvops = { | |||
128 | .MessagePending = vmlogrdr_iucv_MessagePending, | 128 | .MessagePending = vmlogrdr_iucv_MessagePending, |
129 | }; | 129 | }; |
130 | 130 | ||
131 | 131 | static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); | |
132 | DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue); | 132 | static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); |
133 | DECLARE_WAIT_QUEUE_HEAD(read_wait_queue); | ||
134 | 133 | ||
135 | /* | 134 | /* |
136 | * pointer to system service private structure | 135 | * pointer to system service private structure |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 12c2d6b746e6..aa65df4dfced 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -43,7 +43,7 @@ typedef enum {add, free} range_action; | |||
43 | * Function: blacklist_range | 43 | * Function: blacklist_range |
44 | * (Un-)blacklist the devices from-to | 44 | * (Un-)blacklist the devices from-to |
45 | */ | 45 | */ |
46 | static inline void | 46 | static void |
47 | blacklist_range (range_action action, unsigned int from, unsigned int to, | 47 | blacklist_range (range_action action, unsigned int from, unsigned int to, |
48 | unsigned int ssid) | 48 | unsigned int ssid) |
49 | { | 49 | { |
@@ -69,7 +69,7 @@ blacklist_range (range_action action, unsigned int from, unsigned int to, | |||
69 | * Get devno/busid from given string. | 69 | * Get devno/busid from given string. |
70 | * Shamelessly grabbed from dasd_devmap.c. | 70 | * Shamelessly grabbed from dasd_devmap.c. |
71 | */ | 71 | */ |
72 | static inline int | 72 | static int |
73 | blacklist_busid(char **str, int *id0, int *ssid, int *devno) | 73 | blacklist_busid(char **str, int *id0, int *ssid, int *devno) |
74 | { | 74 | { |
75 | int val, old_style; | 75 | int val, old_style; |
@@ -123,10 +123,10 @@ confused: | |||
123 | return 1; | 123 | return 1; |
124 | } | 124 | } |
125 | 125 | ||
126 | static inline int | 126 | static int |
127 | blacklist_parse_parameters (char *str, range_action action) | 127 | blacklist_parse_parameters (char *str, range_action action) |
128 | { | 128 | { |
129 | unsigned int from, to, from_id0, to_id0, from_ssid, to_ssid; | 129 | int from, to, from_id0, to_id0, from_ssid, to_ssid; |
130 | 130 | ||
131 | while (*str != 0 && *str != '\n') { | 131 | while (*str != 0 && *str != '\n') { |
132 | range_action ra = action; | 132 | range_action ra = action; |
@@ -227,7 +227,7 @@ is_blacklisted (int ssid, int devno) | |||
227 | * Function: blacklist_parse_proc_parameters | 227 | * Function: blacklist_parse_proc_parameters |
228 | * parse the stuff which is piped to /proc/cio_ignore | 228 | * parse the stuff which is piped to /proc/cio_ignore |
229 | */ | 229 | */ |
230 | static inline void | 230 | static void |
231 | blacklist_parse_proc_parameters (char *buf) | 231 | blacklist_parse_proc_parameters (char *buf) |
232 | { | 232 | { |
233 | if (strncmp (buf, "free ", 5) == 0) { | 233 | if (strncmp (buf, "free ", 5) == 0) { |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 38954f5cd14c..d48e3ca4752c 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -53,7 +53,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer, | |||
53 | 53 | ||
54 | static struct bus_type ccwgroup_bus_type; | 54 | static struct bus_type ccwgroup_bus_type; |
55 | 55 | ||
56 | static inline void | 56 | static void |
57 | __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) | 57 | __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) |
58 | { | 58 | { |
59 | int i; | 59 | int i; |
@@ -104,7 +104,7 @@ ccwgroup_release (struct device *dev) | |||
104 | kfree(gdev); | 104 | kfree(gdev); |
105 | } | 105 | } |
106 | 106 | ||
107 | static inline int | 107 | static int |
108 | __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) | 108 | __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) |
109 | { | 109 | { |
110 | char str[8]; | 110 | char str[8]; |
@@ -424,7 +424,7 @@ ccwgroup_probe_ccwdev(struct ccw_device *cdev) | |||
424 | return 0; | 424 | return 0; |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline struct ccwgroup_device * | 427 | static struct ccwgroup_device * |
428 | __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | 428 | __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) |
429 | { | 429 | { |
430 | struct ccwgroup_device *gdev; | 430 | struct ccwgroup_device *gdev; |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index cbab8d2ce5cf..6f05a44e3817 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -93,7 +93,7 @@ chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | |||
93 | u16 sch; /* subchannel */ | 93 | u16 sch; /* subchannel */ |
94 | u8 chpid[8]; /* chpids 0-7 */ | 94 | u8 chpid[8]; /* chpids 0-7 */ |
95 | u16 fla[8]; /* full link addresses 0-7 */ | 95 | u16 fla[8]; /* full link addresses 0-7 */ |
96 | } *ssd_area; | 96 | } __attribute__ ((packed)) *ssd_area; |
97 | 97 | ||
98 | ssd_area = page; | 98 | ssd_area = page; |
99 | 99 | ||
@@ -277,7 +277,7 @@ out_unreg: | |||
277 | return 0; | 277 | return 0; |
278 | } | 278 | } |
279 | 279 | ||
280 | static inline void | 280 | static void |
281 | s390_set_chpid_offline( __u8 chpid) | 281 | s390_set_chpid_offline( __u8 chpid) |
282 | { | 282 | { |
283 | char dbf_txt[15]; | 283 | char dbf_txt[15]; |
@@ -338,7 +338,7 @@ s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch) | |||
338 | return 0x80 >> chp; | 338 | return 0x80 >> chp; |
339 | } | 339 | } |
340 | 340 | ||
341 | static inline int | 341 | static int |
342 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 342 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
343 | { | 343 | { |
344 | struct schib schib; | 344 | struct schib schib; |
@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data) | |||
444 | u32 andesc[28]; | 444 | u32 andesc[28]; |
445 | /* incident-specific information */ | 445 | /* incident-specific information */ |
446 | u32 isinfo[28]; | 446 | u32 isinfo[28]; |
447 | } *lir; | 447 | } __attribute__ ((packed)) *lir; |
448 | 448 | ||
449 | lir = data; | 449 | lir = data; |
450 | if (!(lir->iq&0x80)) | 450 | if (!(lir->iq&0x80)) |
@@ -461,154 +461,146 @@ __get_chpid_from_lir(void *data) | |||
461 | return (u16) (lir->indesc[0]&0x000000ff); | 461 | return (u16) (lir->indesc[0]&0x000000ff); |
462 | } | 462 | } |
463 | 463 | ||
464 | int | 464 | struct chsc_sei_area { |
465 | chsc_process_crw(void) | 465 | struct chsc_header request; |
466 | u32 reserved1; | ||
467 | u32 reserved2; | ||
468 | u32 reserved3; | ||
469 | struct chsc_header response; | ||
470 | u32 reserved4; | ||
471 | u8 flags; | ||
472 | u8 vf; /* validity flags */ | ||
473 | u8 rs; /* reporting source */ | ||
474 | u8 cc; /* content code */ | ||
475 | u16 fla; /* full link address */ | ||
476 | u16 rsid; /* reporting source id */ | ||
477 | u32 reserved5; | ||
478 | u32 reserved6; | ||
479 | u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */ | ||
480 | /* ccdf has to be big enough for a link-incident record */ | ||
481 | } __attribute__ ((packed)); | ||
482 | |||
483 | static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) | ||
484 | { | ||
485 | int chpid; | ||
486 | |||
487 | CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", | ||
488 | sei_area->rs, sei_area->rsid); | ||
489 | if (sei_area->rs != 4) | ||
490 | return 0; | ||
491 | chpid = __get_chpid_from_lir(sei_area->ccdf); | ||
492 | if (chpid < 0) | ||
493 | CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); | ||
494 | else | ||
495 | s390_set_chpid_offline(chpid); | ||
496 | |||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) | ||
466 | { | 501 | { |
467 | int chpid, ret; | ||
468 | struct res_acc_data res_data; | 502 | struct res_acc_data res_data; |
469 | struct { | 503 | struct device *dev; |
470 | struct chsc_header request; | 504 | int status; |
471 | u32 reserved1; | 505 | int rc; |
472 | u32 reserved2; | 506 | |
473 | u32 reserved3; | 507 | CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " |
474 | struct chsc_header response; | 508 | "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); |
475 | u32 reserved4; | 509 | if (sei_area->rs != 4) |
476 | u8 flags; | 510 | return 0; |
477 | u8 vf; /* validity flags */ | 511 | /* allocate a new channel path structure, if needed */ |
478 | u8 rs; /* reporting source */ | 512 | status = get_chp_status(sei_area->rsid); |
479 | u8 cc; /* content code */ | 513 | if (status < 0) |
480 | u16 fla; /* full link address */ | 514 | new_channel_path(sei_area->rsid); |
481 | u16 rsid; /* reporting source id */ | 515 | else if (!status) |
482 | u32 reserved5; | 516 | return 0; |
483 | u32 reserved6; | 517 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); |
484 | u32 ccdf[96]; /* content-code dependent field */ | 518 | memset(&res_data, 0, sizeof(struct res_acc_data)); |
485 | /* ccdf has to be big enough for a link-incident record */ | 519 | res_data.chp = to_channelpath(dev); |
486 | } *sei_area; | 520 | if ((sei_area->vf & 0xc0) != 0) { |
521 | res_data.fla = sei_area->fla; | ||
522 | if ((sei_area->vf & 0xc0) == 0xc0) | ||
523 | /* full link address */ | ||
524 | res_data.fla_mask = 0xffff; | ||
525 | else | ||
526 | /* link address */ | ||
527 | res_data.fla_mask = 0xff00; | ||
528 | } | ||
529 | rc = s390_process_res_acc(&res_data); | ||
530 | put_device(dev); | ||
531 | |||
532 | return rc; | ||
533 | } | ||
534 | |||
535 | static int chsc_process_sei(struct chsc_sei_area *sei_area) | ||
536 | { | ||
537 | int rc; | ||
538 | |||
539 | /* Check if we might have lost some information. */ | ||
540 | if (sei_area->flags & 0x40) | ||
541 | CIO_CRW_EVENT(2, "chsc: event overflow\n"); | ||
542 | /* which kind of information was stored? */ | ||
543 | rc = 0; | ||
544 | switch (sei_area->cc) { | ||
545 | case 1: /* link incident*/ | ||
546 | rc = chsc_process_sei_link_incident(sei_area); | ||
547 | break; | ||
548 | case 2: /* i/o resource accessibiliy */ | ||
549 | rc = chsc_process_sei_res_acc(sei_area); | ||
550 | break; | ||
551 | default: /* other stuff */ | ||
552 | CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", | ||
553 | sei_area->cc); | ||
554 | break; | ||
555 | } | ||
556 | |||
557 | return rc; | ||
558 | } | ||
559 | |||
560 | int chsc_process_crw(void) | ||
561 | { | ||
562 | struct chsc_sei_area *sei_area; | ||
563 | int ret; | ||
564 | int rc; | ||
487 | 565 | ||
488 | if (!sei_page) | 566 | if (!sei_page) |
489 | return 0; | 567 | return 0; |
490 | /* | 568 | /* Access to sei_page is serialized through machine check handler |
491 | * build the chsc request block for store event information | 569 | * thread, so no need for locking. */ |
492 | * and do the call | ||
493 | * This function is only called by the machine check handler thread, | ||
494 | * so we don't need locking for the sei_page. | ||
495 | */ | ||
496 | sei_area = sei_page; | 570 | sei_area = sei_page; |
497 | 571 | ||
498 | CIO_TRACE_EVENT( 2, "prcss"); | 572 | CIO_TRACE_EVENT( 2, "prcss"); |
499 | ret = 0; | 573 | ret = 0; |
500 | do { | 574 | do { |
501 | int ccode, status; | ||
502 | struct device *dev; | ||
503 | memset(sei_area, 0, sizeof(*sei_area)); | 575 | memset(sei_area, 0, sizeof(*sei_area)); |
504 | memset(&res_data, 0, sizeof(struct res_acc_data)); | ||
505 | sei_area->request.length = 0x0010; | 576 | sei_area->request.length = 0x0010; |
506 | sei_area->request.code = 0x000e; | 577 | sei_area->request.code = 0x000e; |
578 | if (chsc(sei_area)) | ||
579 | break; | ||
507 | 580 | ||
508 | ccode = chsc(sei_area); | 581 | if (sei_area->response.code == 0x0001) { |
509 | if (ccode > 0) | 582 | CIO_CRW_EVENT(4, "chsc: sei successful\n"); |
510 | return 0; | 583 | rc = chsc_process_sei(sei_area); |
511 | 584 | if (rc) | |
512 | switch (sei_area->response.code) { | 585 | ret = rc; |
513 | /* for debug purposes, check for problems */ | 586 | } else { |
514 | case 0x0001: | 587 | CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", |
515 | CIO_CRW_EVENT(4, "chsc_process_crw: event information " | ||
516 | "successfully stored\n"); | ||
517 | break; /* everything ok */ | ||
518 | case 0x0002: | ||
519 | CIO_CRW_EVENT(2, | ||
520 | "chsc_process_crw: invalid command!\n"); | ||
521 | return 0; | ||
522 | case 0x0003: | ||
523 | CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " | ||
524 | "request block!\n"); | ||
525 | return 0; | ||
526 | case 0x0005: | ||
527 | CIO_CRW_EVENT(2, "chsc_process_crw: no event " | ||
528 | "information stored\n"); | ||
529 | return 0; | ||
530 | default: | ||
531 | CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", | ||
532 | sei_area->response.code); | 588 | sei_area->response.code); |
533 | return 0; | 589 | ret = 0; |
534 | } | ||
535 | |||
536 | /* Check if we might have lost some information. */ | ||
537 | if (sei_area->flags & 0x40) | ||
538 | CIO_CRW_EVENT(2, "chsc_process_crw: Event information " | ||
539 | "has been lost due to overflow!\n"); | ||
540 | |||
541 | if (sei_area->rs != 4) { | ||
542 | CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " | ||
543 | "(%04X) isn't a chpid!\n", | ||
544 | sei_area->rsid); | ||
545 | continue; | ||
546 | } | ||
547 | |||
548 | /* which kind of information was stored? */ | ||
549 | switch (sei_area->cc) { | ||
550 | case 1: /* link incident*/ | ||
551 | CIO_CRW_EVENT(4, "chsc_process_crw: " | ||
552 | "channel subsystem reports link incident," | ||
553 | " reporting source is chpid %x\n", | ||
554 | sei_area->rsid); | ||
555 | chpid = __get_chpid_from_lir(sei_area->ccdf); | ||
556 | if (chpid < 0) | ||
557 | CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", | ||
558 | __FUNCTION__); | ||
559 | else | ||
560 | s390_set_chpid_offline(chpid); | ||
561 | break; | ||
562 | |||
563 | case 2: /* i/o resource accessibiliy */ | ||
564 | CIO_CRW_EVENT(4, "chsc_process_crw: " | ||
565 | "channel subsystem reports some I/O " | ||
566 | "devices may have become accessible\n"); | ||
567 | pr_debug("Data received after sei: \n"); | ||
568 | pr_debug("Validity flags: %x\n", sei_area->vf); | ||
569 | |||
570 | /* allocate a new channel path structure, if needed */ | ||
571 | status = get_chp_status(sei_area->rsid); | ||
572 | if (status < 0) | ||
573 | new_channel_path(sei_area->rsid); | ||
574 | else if (!status) | ||
575 | break; | ||
576 | dev = get_device(&css[0]->chps[sei_area->rsid]->dev); | ||
577 | res_data.chp = to_channelpath(dev); | ||
578 | pr_debug("chpid: %x", sei_area->rsid); | ||
579 | if ((sei_area->vf & 0xc0) != 0) { | ||
580 | res_data.fla = sei_area->fla; | ||
581 | if ((sei_area->vf & 0xc0) == 0xc0) { | ||
582 | pr_debug(" full link addr: %x", | ||
583 | sei_area->fla); | ||
584 | res_data.fla_mask = 0xffff; | ||
585 | } else { | ||
586 | pr_debug(" link addr: %x", | ||
587 | sei_area->fla); | ||
588 | res_data.fla_mask = 0xff00; | ||
589 | } | ||
590 | } | ||
591 | ret = s390_process_res_acc(&res_data); | ||
592 | pr_debug("\n\n"); | ||
593 | put_device(dev); | ||
594 | break; | ||
595 | |||
596 | default: /* other stuff */ | ||
597 | CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", | ||
598 | sei_area->cc); | ||
599 | break; | 590 | break; |
600 | } | 591 | } |
601 | } while (sei_area->flags & 0x80); | 592 | } while (sei_area->flags & 0x80); |
593 | |||
602 | return ret; | 594 | return ret; |
603 | } | 595 | } |
604 | 596 | ||
605 | static inline int | 597 | static int |
606 | __chp_add_new_sch(struct subchannel_id schid) | 598 | __chp_add_new_sch(struct subchannel_id schid) |
607 | { | 599 | { |
608 | struct schib schib; | 600 | struct schib schib; |
609 | int ret; | 601 | int ret; |
610 | 602 | ||
611 | if (stsch(schid, &schib)) | 603 | if (stsch_err(schid, &schib)) |
612 | /* We're through */ | 604 | /* We're through */ |
613 | return need_rescan ? -EAGAIN : -ENXIO; | 605 | return need_rescan ? -EAGAIN : -ENXIO; |
614 | 606 | ||
@@ -709,7 +701,7 @@ chp_process_crw(int chpid, int on) | |||
709 | return chp_add(chpid); | 701 | return chp_add(chpid); |
710 | } | 702 | } |
711 | 703 | ||
712 | static inline int check_for_io_on_path(struct subchannel *sch, int index) | 704 | static int check_for_io_on_path(struct subchannel *sch, int index) |
713 | { | 705 | { |
714 | int cc; | 706 | int cc; |
715 | 707 | ||
@@ -741,7 +733,7 @@ static void terminate_internal_io(struct subchannel *sch) | |||
741 | sch->driver->termination(&sch->dev); | 733 | sch->driver->termination(&sch->dev); |
742 | } | 734 | } |
743 | 735 | ||
744 | static inline void | 736 | static void |
745 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | 737 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) |
746 | { | 738 | { |
747 | int chp, old_lpm; | 739 | int chp, old_lpm; |
@@ -967,8 +959,8 @@ static struct bin_attribute chp_measurement_attr = { | |||
967 | static void | 959 | static void |
968 | chsc_remove_chp_cmg_attr(struct channel_path *chp) | 960 | chsc_remove_chp_cmg_attr(struct channel_path *chp) |
969 | { | 961 | { |
970 | sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_chars_attr); | 962 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); |
971 | sysfs_remove_bin_file(&chp->dev.kobj, &chp_measurement_attr); | 963 | device_remove_bin_file(&chp->dev, &chp_measurement_attr); |
972 | } | 964 | } |
973 | 965 | ||
974 | static int | 966 | static int |
@@ -976,14 +968,12 @@ chsc_add_chp_cmg_attr(struct channel_path *chp) | |||
976 | { | 968 | { |
977 | int ret; | 969 | int ret; |
978 | 970 | ||
979 | ret = sysfs_create_bin_file(&chp->dev.kobj, | 971 | ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr); |
980 | &chp_measurement_chars_attr); | ||
981 | if (ret) | 972 | if (ret) |
982 | return ret; | 973 | return ret; |
983 | ret = sysfs_create_bin_file(&chp->dev.kobj, &chp_measurement_attr); | 974 | ret = device_create_bin_file(&chp->dev, &chp_measurement_attr); |
984 | if (ret) | 975 | if (ret) |
985 | sysfs_remove_bin_file(&chp->dev.kobj, | 976 | device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr); |
986 | &chp_measurement_chars_attr); | ||
987 | return ret; | 977 | return ret; |
988 | } | 978 | } |
989 | 979 | ||
@@ -1042,7 +1032,7 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
1042 | u32 : 4; | 1032 | u32 : 4; |
1043 | u32 fmt : 4; | 1033 | u32 fmt : 4; |
1044 | u32 : 16; | 1034 | u32 : 16; |
1045 | } *secm_area; | 1035 | } __attribute__ ((packed)) *secm_area; |
1046 | int ret, ccode; | 1036 | int ret, ccode; |
1047 | 1037 | ||
1048 | secm_area = page; | 1038 | secm_area = page; |
@@ -1253,7 +1243,7 @@ chsc_determine_channel_path_description(int chpid, | |||
1253 | struct chsc_header response; | 1243 | struct chsc_header response; |
1254 | u32 zeroes2; | 1244 | u32 zeroes2; |
1255 | struct channel_path_desc desc; | 1245 | struct channel_path_desc desc; |
1256 | } *scpd_area; | 1246 | } __attribute__ ((packed)) *scpd_area; |
1257 | 1247 | ||
1258 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1248 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1259 | if (!scpd_area) | 1249 | if (!scpd_area) |
@@ -1350,7 +1340,7 @@ chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
1350 | u32 cmg : 8; | 1340 | u32 cmg : 8; |
1351 | u32 zeroes3; | 1341 | u32 zeroes3; |
1352 | u32 data[NR_MEASUREMENT_CHARS]; | 1342 | u32 data[NR_MEASUREMENT_CHARS]; |
1353 | } *scmc_area; | 1343 | } __attribute__ ((packed)) *scmc_area; |
1354 | 1344 | ||
1355 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1345 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1356 | if (!scmc_area) | 1346 | if (!scmc_area) |
@@ -1517,7 +1507,7 @@ chsc_enable_facility(int operation_code) | |||
1517 | u32 reserved5:4; | 1507 | u32 reserved5:4; |
1518 | u32 format2:4; | 1508 | u32 format2:4; |
1519 | u32 reserved6:24; | 1509 | u32 reserved6:24; |
1520 | } *sda_area; | 1510 | } __attribute__ ((packed)) *sda_area; |
1521 | 1511 | ||
1522 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); | 1512 | sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA); |
1523 | if (!sda_area) | 1513 | if (!sda_area) |
@@ -1569,7 +1559,7 @@ chsc_determine_css_characteristics(void) | |||
1569 | u32 reserved4; | 1559 | u32 reserved4; |
1570 | u32 general_char[510]; | 1560 | u32 general_char[510]; |
1571 | u32 chsc_char[518]; | 1561 | u32 chsc_char[518]; |
1572 | } *scsc_area; | 1562 | } __attribute__ ((packed)) *scsc_area; |
1573 | 1563 | ||
1574 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1564 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1575 | if (!scsc_area) { | 1565 | if (!scsc_area) { |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index a259245780ae..0fb2b024208f 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -10,17 +10,17 @@ | |||
10 | struct chsc_header { | 10 | struct chsc_header { |
11 | u16 length; | 11 | u16 length; |
12 | u16 code; | 12 | u16 code; |
13 | }; | 13 | } __attribute__ ((packed)); |
14 | 14 | ||
15 | #define NR_MEASUREMENT_CHARS 5 | 15 | #define NR_MEASUREMENT_CHARS 5 |
16 | struct cmg_chars { | 16 | struct cmg_chars { |
17 | u32 values[NR_MEASUREMENT_CHARS]; | 17 | u32 values[NR_MEASUREMENT_CHARS]; |
18 | }; | 18 | } __attribute__ ((packed)); |
19 | 19 | ||
20 | #define NR_MEASUREMENT_ENTRIES 8 | 20 | #define NR_MEASUREMENT_ENTRIES 8 |
21 | struct cmg_entry { | 21 | struct cmg_entry { |
22 | u32 values[NR_MEASUREMENT_ENTRIES]; | 22 | u32 values[NR_MEASUREMENT_ENTRIES]; |
23 | }; | 23 | } __attribute__ ((packed)); |
24 | 24 | ||
25 | struct channel_path_desc { | 25 | struct channel_path_desc { |
26 | u8 flags; | 26 | u8 flags; |
@@ -31,7 +31,7 @@ struct channel_path_desc { | |||
31 | u8 zeroes; | 31 | u8 zeroes; |
32 | u8 chla; | 32 | u8 chla; |
33 | u8 chpp; | 33 | u8 chpp; |
34 | }; | 34 | } __attribute__ ((packed)); |
35 | 35 | ||
36 | struct channel_path { | 36 | struct channel_path { |
37 | int id; | 37 | int id; |
@@ -47,6 +47,9 @@ struct channel_path { | |||
47 | extern void s390_process_css( void ); | 47 | extern void s390_process_css( void ); |
48 | extern void chsc_validate_chpids(struct subchannel *); | 48 | extern void chsc_validate_chpids(struct subchannel *); |
49 | extern void chpid_is_actually_online(int); | 49 | extern void chpid_is_actually_online(int); |
50 | extern int css_get_ssd_info(struct subchannel *); | ||
51 | extern int chsc_process_crw(void); | ||
52 | extern int chp_process_crw(int, int); | ||
50 | 53 | ||
51 | struct css_general_char { | 54 | struct css_general_char { |
52 | u64 : 41; | 55 | u64 : 41; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index ae1bf231d089..b3a56dc5f68a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -122,7 +122,7 @@ cio_get_options (struct subchannel *sch) | |||
122 | * Use tpi to get a pending interrupt, call the interrupt handler and | 122 | * Use tpi to get a pending interrupt, call the interrupt handler and |
123 | * return a pointer to the subchannel structure. | 123 | * return a pointer to the subchannel structure. |
124 | */ | 124 | */ |
125 | static inline int | 125 | static int |
126 | cio_tpi(void) | 126 | cio_tpi(void) |
127 | { | 127 | { |
128 | struct tpi_info *tpi_info; | 128 | struct tpi_info *tpi_info; |
@@ -152,7 +152,7 @@ cio_tpi(void) | |||
152 | return 1; | 152 | return 1; |
153 | } | 153 | } |
154 | 154 | ||
155 | static inline int | 155 | static int |
156 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | 156 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) |
157 | { | 157 | { |
158 | char dbf_text[15]; | 158 | char dbf_text[15]; |
@@ -585,7 +585,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
585 | * This device must not be known to Linux. So we simply | 585 | * This device must not be known to Linux. So we simply |
586 | * say that there is no device and return ENODEV. | 586 | * say that there is no device and return ENODEV. |
587 | */ | 587 | */ |
588 | CIO_MSG_EVENT(0, "Blacklisted device detected " | 588 | CIO_MSG_EVENT(4, "Blacklisted device detected " |
589 | "at devno %04X, subchannel set %x\n", | 589 | "at devno %04X, subchannel set %x\n", |
590 | sch->schib.pmcw.dev, sch->schid.ssid); | 590 | sch->schib.pmcw.dev, sch->schid.ssid); |
591 | err = -ENODEV; | 591 | err = -ENODEV; |
@@ -646,7 +646,7 @@ do_IRQ (struct pt_regs *regs) | |||
646 | * Make sure that the i/o interrupt did not "overtake" | 646 | * Make sure that the i/o interrupt did not "overtake" |
647 | * the last HZ timer interrupt. | 647 | * the last HZ timer interrupt. |
648 | */ | 648 | */ |
649 | account_ticks(); | 649 | account_ticks(S390_lowcore.int_clock); |
650 | /* | 650 | /* |
651 | * Get interrupt information from lowcore | 651 | * Get interrupt information from lowcore |
652 | */ | 652 | */ |
@@ -832,7 +832,7 @@ cio_get_console_subchannel(void) | |||
832 | } | 832 | } |
833 | 833 | ||
834 | #endif | 834 | #endif |
835 | static inline int | 835 | static int |
836 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | 836 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) |
837 | { | 837 | { |
838 | int retry, cc; | 838 | int retry, cc; |
@@ -850,7 +850,20 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
850 | return -EBUSY; /* uhm... */ | 850 | return -EBUSY; /* uhm... */ |
851 | } | 851 | } |
852 | 852 | ||
853 | static inline int | 853 | /* we can't use the normal udelay here, since it enables external interrupts */ |
854 | |||
855 | static void udelay_reset(unsigned long usecs) | ||
856 | { | ||
857 | uint64_t start_cc, end_cc; | ||
858 | |||
859 | asm volatile ("STCK %0" : "=m" (start_cc)); | ||
860 | do { | ||
861 | cpu_relax(); | ||
862 | asm volatile ("STCK %0" : "=m" (end_cc)); | ||
863 | } while (((end_cc - start_cc)/4096) < usecs); | ||
864 | } | ||
865 | |||
866 | static int | ||
854 | __clear_subchannel_easy(struct subchannel_id schid) | 867 | __clear_subchannel_easy(struct subchannel_id schid) |
855 | { | 868 | { |
856 | int retry; | 869 | int retry; |
@@ -865,7 +878,7 @@ __clear_subchannel_easy(struct subchannel_id schid) | |||
865 | if (schid_equal(&ti.schid, &schid)) | 878 | if (schid_equal(&ti.schid, &schid)) |
866 | return 0; | 879 | return 0; |
867 | } | 880 | } |
868 | udelay(100); | 881 | udelay_reset(100); |
869 | } | 882 | } |
870 | return -EBUSY; | 883 | return -EBUSY; |
871 | } | 884 | } |
@@ -882,11 +895,11 @@ static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr) | |||
882 | int rc; | 895 | int rc; |
883 | 896 | ||
884 | pgm_check_occured = 0; | 897 | pgm_check_occured = 0; |
885 | s390_reset_pgm_handler = cio_reset_pgm_check_handler; | 898 | s390_base_pgm_handler_fn = cio_reset_pgm_check_handler; |
886 | rc = stsch(schid, addr); | 899 | rc = stsch(schid, addr); |
887 | s390_reset_pgm_handler = NULL; | 900 | s390_base_pgm_handler_fn = NULL; |
888 | 901 | ||
889 | /* The program check handler could have changed pgm_check_occured */ | 902 | /* The program check handler could have changed pgm_check_occured. */ |
890 | barrier(); | 903 | barrier(); |
891 | 904 | ||
892 | if (pgm_check_occured) | 905 | if (pgm_check_occured) |
@@ -944,7 +957,7 @@ static void css_reset(void) | |||
944 | /* Reset subchannels. */ | 957 | /* Reset subchannels. */ |
945 | for_each_subchannel(__shutdown_subchannel_easy, NULL); | 958 | for_each_subchannel(__shutdown_subchannel_easy, NULL); |
946 | /* Reset channel paths. */ | 959 | /* Reset channel paths. */ |
947 | s390_reset_mcck_handler = s390_reset_chpids_mcck_handler; | 960 | s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler; |
948 | /* Enable channel report machine checks. */ | 961 | /* Enable channel report machine checks. */ |
949 | __ctl_set_bit(14, 28); | 962 | __ctl_set_bit(14, 28); |
950 | /* Temporarily reenable machine checks. */ | 963 | /* Temporarily reenable machine checks. */ |
@@ -969,7 +982,7 @@ static void css_reset(void) | |||
969 | local_mcck_disable(); | 982 | local_mcck_disable(); |
970 | /* Disable channel report machine checks. */ | 983 | /* Disable channel report machine checks. */ |
971 | __ctl_clear_bit(14, 28); | 984 | __ctl_clear_bit(14, 28); |
972 | s390_reset_mcck_handler = NULL; | 985 | s390_base_mcck_handler_fn = NULL; |
973 | } | 986 | } |
974 | 987 | ||
975 | static struct reset_call css_reset_call = { | 988 | static struct reset_call css_reset_call = { |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 828b2d334f0a..90b22faabbf7 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -519,8 +519,8 @@ struct cmb { | |||
519 | /* insert a single device into the cmb_area list | 519 | /* insert a single device into the cmb_area list |
520 | * called with cmb_area.lock held from alloc_cmb | 520 | * called with cmb_area.lock held from alloc_cmb |
521 | */ | 521 | */ |
522 | static inline int alloc_cmb_single (struct ccw_device *cdev, | 522 | static int alloc_cmb_single(struct ccw_device *cdev, |
523 | struct cmb_data *cmb_data) | 523 | struct cmb_data *cmb_data) |
524 | { | 524 | { |
525 | struct cmb *cmb; | 525 | struct cmb *cmb; |
526 | struct ccw_device_private *node; | 526 | struct ccw_device_private *node; |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 9d6c02446863..fe0ace7aece8 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -30,7 +30,7 @@ struct channel_subsystem *css[__MAX_CSSID + 1]; | |||
30 | 30 | ||
31 | int css_characteristics_avail = 0; | 31 | int css_characteristics_avail = 0; |
32 | 32 | ||
33 | inline int | 33 | int |
34 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) | 34 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) |
35 | { | 35 | { |
36 | struct subchannel_id schid; | 36 | struct subchannel_id schid; |
@@ -108,9 +108,6 @@ css_subchannel_release(struct device *dev) | |||
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
111 | extern int css_get_ssd_info(struct subchannel *sch); | ||
112 | |||
113 | |||
114 | int css_sch_device_register(struct subchannel *sch) | 111 | int css_sch_device_register(struct subchannel *sch) |
115 | { | 112 | { |
116 | int ret; | 113 | int ret; |
@@ -187,7 +184,7 @@ get_subchannel_by_schid(struct subchannel_id schid) | |||
187 | return dev ? to_subchannel(dev) : NULL; | 184 | return dev ? to_subchannel(dev) : NULL; |
188 | } | 185 | } |
189 | 186 | ||
190 | static inline int css_get_subchannel_status(struct subchannel *sch) | 187 | static int css_get_subchannel_status(struct subchannel *sch) |
191 | { | 188 | { |
192 | struct schib schib; | 189 | struct schib schib; |
193 | 190 | ||
@@ -299,7 +296,7 @@ static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) | |||
299 | /* Will be done on the slow path. */ | 296 | /* Will be done on the slow path. */ |
300 | return -EAGAIN; | 297 | return -EAGAIN; |
301 | } | 298 | } |
302 | if (stsch(schid, &schib) || !schib.pmcw.dnv) { | 299 | if (stsch_err(schid, &schib) || !schib.pmcw.dnv) { |
303 | /* Unusable - ignore. */ | 300 | /* Unusable - ignore. */ |
304 | return 0; | 301 | return 0; |
305 | } | 302 | } |
@@ -417,7 +414,7 @@ static void reprobe_all(struct work_struct *unused) | |||
417 | need_reprobe); | 414 | need_reprobe); |
418 | } | 415 | } |
419 | 416 | ||
420 | DECLARE_WORK(css_reprobe_work, reprobe_all); | 417 | static DECLARE_WORK(css_reprobe_work, reprobe_all); |
421 | 418 | ||
422 | /* Schedule reprobing of all unregistered subchannels. */ | 419 | /* Schedule reprobing of all unregistered subchannels. */ |
423 | void css_schedule_reprobe(void) | 420 | void css_schedule_reprobe(void) |
@@ -578,7 +575,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, | |||
578 | 575 | ||
579 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); | 576 | static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); |
580 | 577 | ||
581 | static inline int __init setup_css(int nr) | 578 | static int __init setup_css(int nr) |
582 | { | 579 | { |
583 | u32 tod_high; | 580 | u32 tod_high; |
584 | int ret; | 581 | int ret; |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 3464c5b875c4..ca2bab932a8a 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -143,6 +143,8 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 143 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
144 | extern int css_init_done; | 144 | extern int css_init_done; |
145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 145 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
146 | extern int css_process_crw(int, int); | ||
147 | extern void css_reiterate_subchannels(void); | ||
146 | 148 | ||
147 | #define __MAX_SUBCHANNEL 65535 | 149 | #define __MAX_SUBCHANNEL 65535 |
148 | #define __MAX_SSID 3 | 150 | #define __MAX_SSID 3 |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 803579053c2f..e322111fb369 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -138,7 +138,6 @@ struct bus_type ccw_bus_type; | |||
138 | 138 | ||
139 | static int io_subchannel_probe (struct subchannel *); | 139 | static int io_subchannel_probe (struct subchannel *); |
140 | static int io_subchannel_remove (struct subchannel *); | 140 | static int io_subchannel_remove (struct subchannel *); |
141 | void io_subchannel_irq (struct device *); | ||
142 | static int io_subchannel_notify(struct device *, int); | 141 | static int io_subchannel_notify(struct device *, int); |
143 | static void io_subchannel_verify(struct device *); | 142 | static void io_subchannel_verify(struct device *); |
144 | static void io_subchannel_ioterm(struct device *); | 143 | static void io_subchannel_ioterm(struct device *); |
@@ -235,11 +234,8 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf) | |||
235 | ssize_t ret = 0; | 234 | ssize_t ret = 0; |
236 | int chp; | 235 | int chp; |
237 | 236 | ||
238 | if (ssd) | 237 | for (chp = 0; chp < 8; chp++) |
239 | for (chp = 0; chp < 8; chp++) | 238 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); |
240 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); | ||
241 | else | ||
242 | ret += sprintf (buf, "n/a"); | ||
243 | ret += sprintf (buf+ret, "\n"); | 239 | ret += sprintf (buf+ret, "\n"); |
244 | return min((ssize_t)PAGE_SIZE, ret); | 240 | return min((ssize_t)PAGE_SIZE, ret); |
245 | } | 241 | } |
@@ -552,13 +548,13 @@ static struct attribute_group ccwdev_attr_group = { | |||
552 | .attrs = ccwdev_attrs, | 548 | .attrs = ccwdev_attrs, |
553 | }; | 549 | }; |
554 | 550 | ||
555 | static inline int | 551 | static int |
556 | device_add_files (struct device *dev) | 552 | device_add_files (struct device *dev) |
557 | { | 553 | { |
558 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); | 554 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); |
559 | } | 555 | } |
560 | 556 | ||
561 | static inline void | 557 | static void |
562 | device_remove_files(struct device *dev) | 558 | device_remove_files(struct device *dev) |
563 | { | 559 | { |
564 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); | 560 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 29db6341d632..b66338b76579 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -74,6 +74,7 @@ extern struct workqueue_struct *ccw_device_notify_work; | |||
74 | extern wait_queue_head_t ccw_device_init_wq; | 74 | extern wait_queue_head_t ccw_device_init_wq; |
75 | extern atomic_t ccw_device_init_count; | 75 | extern atomic_t ccw_device_init_count; |
76 | 76 | ||
77 | void io_subchannel_irq (struct device *pdev); | ||
77 | void io_subchannel_recog_done(struct ccw_device *cdev); | 78 | void io_subchannel_recog_done(struct ccw_device *cdev); |
78 | 79 | ||
79 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 80 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
@@ -118,6 +119,7 @@ int ccw_device_stlck(struct ccw_device *); | |||
118 | /* qdio needs this. */ | 119 | /* qdio needs this. */ |
119 | void ccw_device_set_timeout(struct ccw_device *, int); | 120 | void ccw_device_set_timeout(struct ccw_device *, int); |
120 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); | 121 | extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); |
122 | extern struct bus_type ccw_bus_type; | ||
121 | 123 | ||
122 | /* Channel measurement facility related */ | 124 | /* Channel measurement facility related */ |
123 | void retry_set_schib(struct ccw_device *cdev); | 125 | void retry_set_schib(struct ccw_device *cdev); |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index eed14572fc3b..51238e7555bb 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -206,7 +206,7 @@ ccw_device_handle_oper(struct ccw_device *cdev) | |||
206 | * been varied online on the SE so we have to find out by magic (i. e. driving | 206 | * been varied online on the SE so we have to find out by magic (i. e. driving |
207 | * the channel subsystem to device selection and updating our path masks). | 207 | * the channel subsystem to device selection and updating our path masks). |
208 | */ | 208 | */ |
209 | static inline void | 209 | static void |
210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) | 210 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) |
211 | { | 211 | { |
212 | int mask, i; | 212 | int mask, i; |
@@ -387,7 +387,7 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
387 | put_device (&cdev->dev); | 387 | put_device (&cdev->dev); |
388 | } | 388 | } |
389 | 389 | ||
390 | static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) | 390 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) |
391 | { | 391 | { |
392 | char *c1; | 392 | char *c1; |
393 | char *c2; | 393 | char *c2; |
@@ -842,6 +842,8 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
842 | call_handler_unsol: | 842 | call_handler_unsol: |
843 | if (cdev->handler) | 843 | if (cdev->handler) |
844 | cdev->handler (cdev, 0, irb); | 844 | cdev->handler (cdev, 0, irb); |
845 | if (cdev->private->flags.doverify) | ||
846 | ccw_device_online_verify(cdev, 0); | ||
845 | return; | 847 | return; |
846 | } | 848 | } |
847 | /* Accumulate status and find out if a basic sense is needed. */ | 849 | /* Accumulate status and find out if a basic sense is needed. */ |
@@ -892,7 +894,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | |||
892 | /* | 894 | /* |
893 | * Got an interrupt for a basic sense. | 895 | * Got an interrupt for a basic sense. |
894 | */ | 896 | */ |
895 | void | 897 | static void |
896 | ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | 898 | ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) |
897 | { | 899 | { |
898 | struct irb *irb; | 900 | struct irb *irb; |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d269607336ec..d7b25b8f71d2 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -302,7 +302,7 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) | |||
302 | wake_up(&cdev->private->wait_q); | 302 | wake_up(&cdev->private->wait_q); |
303 | } | 303 | } |
304 | 304 | ||
305 | static inline int | 305 | static int |
306 | __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) | 306 | __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) |
307 | { | 307 | { |
308 | int ret; | 308 | int ret; |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index bdcf930f7beb..6b1caea622ea 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * Check for any kind of channel or interface control check but don't | 25 | * Check for any kind of channel or interface control check but don't |
26 | * issue the message for the console device | 26 | * issue the message for the console device |
27 | */ | 27 | */ |
28 | static inline void | 28 | static void |
29 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 29 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
30 | { | 30 | { |
31 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | 31 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | |
@@ -72,7 +72,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
72 | /* | 72 | /* |
73 | * Copy valid bits from the extended control word to device irb. | 73 | * Copy valid bits from the extended control word to device irb. |
74 | */ | 74 | */ |
75 | static inline void | 75 | static void |
76 | ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | 76 | ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) |
77 | { | 77 | { |
78 | /* | 78 | /* |
@@ -94,7 +94,7 @@ ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | |||
94 | /* | 94 | /* |
95 | * Check if extended status word is valid. | 95 | * Check if extended status word is valid. |
96 | */ | 96 | */ |
97 | static inline int | 97 | static int |
98 | ccw_device_accumulate_esw_valid(struct irb *irb) | 98 | ccw_device_accumulate_esw_valid(struct irb *irb) |
99 | { | 99 | { |
100 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) | 100 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) |
@@ -109,7 +109,7 @@ ccw_device_accumulate_esw_valid(struct irb *irb) | |||
109 | /* | 109 | /* |
110 | * Copy valid bits from the extended status word to device irb. | 110 | * Copy valid bits from the extended status word to device irb. |
111 | */ | 111 | */ |
112 | static inline void | 112 | static void |
113 | ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | 113 | ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) |
114 | { | 114 | { |
115 | struct irb *cdev_irb; | 115 | struct irb *cdev_irb; |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 6fd1940842eb..d726cd5777de 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -66,7 +66,6 @@ MODULE_LICENSE("GPL"); | |||
66 | /******************** HERE WE GO ***********************************/ | 66 | /******************** HERE WE GO ***********************************/ |
67 | 67 | ||
68 | static const char version[] = "QDIO base support version 2"; | 68 | static const char version[] = "QDIO base support version 2"; |
69 | extern struct bus_type ccw_bus_type; | ||
70 | 69 | ||
71 | static int qdio_performance_stats = 0; | 70 | static int qdio_performance_stats = 0; |
72 | static int proc_perf_file_registration; | 71 | static int proc_perf_file_registration; |
@@ -138,7 +137,7 @@ qdio_release_q(struct qdio_q *q) | |||
138 | } | 137 | } |
139 | 138 | ||
140 | /*check ccq */ | 139 | /*check ccq */ |
141 | static inline int | 140 | static int |
142 | qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | 141 | qdio_check_ccq(struct qdio_q *q, unsigned int ccq) |
143 | { | 142 | { |
144 | char dbf_text[15]; | 143 | char dbf_text[15]; |
@@ -153,7 +152,7 @@ qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
153 | return -EIO; | 152 | return -EIO; |
154 | } | 153 | } |
155 | /* EQBS: extract buffer states */ | 154 | /* EQBS: extract buffer states */ |
156 | static inline int | 155 | static int |
157 | qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | 156 | qdio_do_eqbs(struct qdio_q *q, unsigned char *state, |
158 | unsigned int *start, unsigned int *cnt) | 157 | unsigned int *start, unsigned int *cnt) |
159 | { | 158 | { |
@@ -188,7 +187,7 @@ again: | |||
188 | } | 187 | } |
189 | 188 | ||
190 | /* SQBS: set buffer states */ | 189 | /* SQBS: set buffer states */ |
191 | static inline int | 190 | static int |
192 | qdio_do_sqbs(struct qdio_q *q, unsigned char state, | 191 | qdio_do_sqbs(struct qdio_q *q, unsigned char state, |
193 | unsigned int *start, unsigned int *cnt) | 192 | unsigned int *start, unsigned int *cnt) |
194 | { | 193 | { |
@@ -315,7 +314,7 @@ __do_siga_output(struct qdio_q *q, unsigned int *busy_bit) | |||
315 | * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns | 314 | * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns |
316 | * an access exception | 315 | * an access exception |
317 | */ | 316 | */ |
318 | static inline int | 317 | static int |
319 | qdio_siga_output(struct qdio_q *q) | 318 | qdio_siga_output(struct qdio_q *q) |
320 | { | 319 | { |
321 | int cc; | 320 | int cc; |
@@ -349,7 +348,7 @@ qdio_siga_output(struct qdio_q *q) | |||
349 | return cc; | 348 | return cc; |
350 | } | 349 | } |
351 | 350 | ||
352 | static inline int | 351 | static int |
353 | qdio_siga_input(struct qdio_q *q) | 352 | qdio_siga_input(struct qdio_q *q) |
354 | { | 353 | { |
355 | int cc; | 354 | int cc; |
@@ -421,7 +420,7 @@ tiqdio_sched_tl(void) | |||
421 | tasklet_hi_schedule(&tiqdio_tasklet); | 420 | tasklet_hi_schedule(&tiqdio_tasklet); |
422 | } | 421 | } |
423 | 422 | ||
424 | static inline void | 423 | static void |
425 | qdio_mark_tiq(struct qdio_q *q) | 424 | qdio_mark_tiq(struct qdio_q *q) |
426 | { | 425 | { |
427 | unsigned long flags; | 426 | unsigned long flags; |
@@ -471,7 +470,7 @@ qdio_mark_q(struct qdio_q *q) | |||
471 | tasklet_schedule(&q->tasklet); | 470 | tasklet_schedule(&q->tasklet); |
472 | } | 471 | } |
473 | 472 | ||
474 | static inline int | 473 | static int |
475 | qdio_stop_polling(struct qdio_q *q) | 474 | qdio_stop_polling(struct qdio_q *q) |
476 | { | 475 | { |
477 | #ifdef QDIO_USE_PROCESSING_STATE | 476 | #ifdef QDIO_USE_PROCESSING_STATE |
@@ -525,7 +524,7 @@ qdio_stop_polling(struct qdio_q *q) | |||
525 | * sophisticated locking outside of unmark_q, so that we don't need to | 524 | * sophisticated locking outside of unmark_q, so that we don't need to |
526 | * disable the interrupts :-) | 525 | * disable the interrupts :-) |
527 | */ | 526 | */ |
528 | static inline void | 527 | static void |
529 | qdio_unmark_q(struct qdio_q *q) | 528 | qdio_unmark_q(struct qdio_q *q) |
530 | { | 529 | { |
531 | unsigned long flags; | 530 | unsigned long flags; |
@@ -691,7 +690,7 @@ qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q) | |||
691 | return q->first_to_check; | 690 | return q->first_to_check; |
692 | } | 691 | } |
693 | 692 | ||
694 | static inline int | 693 | static int |
695 | qdio_get_outbound_buffer_frontier(struct qdio_q *q) | 694 | qdio_get_outbound_buffer_frontier(struct qdio_q *q) |
696 | { | 695 | { |
697 | struct qdio_irq *irq; | 696 | struct qdio_irq *irq; |
@@ -774,7 +773,7 @@ out: | |||
774 | } | 773 | } |
775 | 774 | ||
776 | /* all buffers are processed */ | 775 | /* all buffers are processed */ |
777 | static inline int | 776 | static int |
778 | qdio_is_outbound_q_done(struct qdio_q *q) | 777 | qdio_is_outbound_q_done(struct qdio_q *q) |
779 | { | 778 | { |
780 | int no_used; | 779 | int no_used; |
@@ -796,7 +795,7 @@ qdio_is_outbound_q_done(struct qdio_q *q) | |||
796 | return (no_used==0); | 795 | return (no_used==0); |
797 | } | 796 | } |
798 | 797 | ||
799 | static inline int | 798 | static int |
800 | qdio_has_outbound_q_moved(struct qdio_q *q) | 799 | qdio_has_outbound_q_moved(struct qdio_q *q) |
801 | { | 800 | { |
802 | int i; | 801 | int i; |
@@ -816,7 +815,7 @@ qdio_has_outbound_q_moved(struct qdio_q *q) | |||
816 | } | 815 | } |
817 | } | 816 | } |
818 | 817 | ||
819 | static inline void | 818 | static void |
820 | qdio_kick_outbound_q(struct qdio_q *q) | 819 | qdio_kick_outbound_q(struct qdio_q *q) |
821 | { | 820 | { |
822 | int result; | 821 | int result; |
@@ -905,7 +904,7 @@ qdio_kick_outbound_q(struct qdio_q *q) | |||
905 | } | 904 | } |
906 | } | 905 | } |
907 | 906 | ||
908 | static inline void | 907 | static void |
909 | qdio_kick_outbound_handler(struct qdio_q *q) | 908 | qdio_kick_outbound_handler(struct qdio_q *q) |
910 | { | 909 | { |
911 | int start, end, real_end, count; | 910 | int start, end, real_end, count; |
@@ -942,7 +941,7 @@ qdio_kick_outbound_handler(struct qdio_q *q) | |||
942 | q->error_status_flags=0; | 941 | q->error_status_flags=0; |
943 | } | 942 | } |
944 | 943 | ||
945 | static inline void | 944 | static void |
946 | __qdio_outbound_processing(struct qdio_q *q) | 945 | __qdio_outbound_processing(struct qdio_q *q) |
947 | { | 946 | { |
948 | int siga_attempts; | 947 | int siga_attempts; |
@@ -1002,7 +1001,7 @@ qdio_outbound_processing(struct qdio_q *q) | |||
1002 | /************************* INBOUND ROUTINES *******************************/ | 1001 | /************************* INBOUND ROUTINES *******************************/ |
1003 | 1002 | ||
1004 | 1003 | ||
1005 | static inline int | 1004 | static int |
1006 | qdio_get_inbound_buffer_frontier(struct qdio_q *q) | 1005 | qdio_get_inbound_buffer_frontier(struct qdio_q *q) |
1007 | { | 1006 | { |
1008 | struct qdio_irq *irq; | 1007 | struct qdio_irq *irq; |
@@ -1133,7 +1132,7 @@ out: | |||
1133 | return q->first_to_check; | 1132 | return q->first_to_check; |
1134 | } | 1133 | } |
1135 | 1134 | ||
1136 | static inline int | 1135 | static int |
1137 | qdio_has_inbound_q_moved(struct qdio_q *q) | 1136 | qdio_has_inbound_q_moved(struct qdio_q *q) |
1138 | { | 1137 | { |
1139 | int i; | 1138 | int i; |
@@ -1167,7 +1166,7 @@ qdio_has_inbound_q_moved(struct qdio_q *q) | |||
1167 | } | 1166 | } |
1168 | 1167 | ||
1169 | /* means, no more buffers to be filled */ | 1168 | /* means, no more buffers to be filled */ |
1170 | static inline int | 1169 | static int |
1171 | tiqdio_is_inbound_q_done(struct qdio_q *q) | 1170 | tiqdio_is_inbound_q_done(struct qdio_q *q) |
1172 | { | 1171 | { |
1173 | int no_used; | 1172 | int no_used; |
@@ -1228,7 +1227,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) | |||
1228 | return 0; | 1227 | return 0; |
1229 | } | 1228 | } |
1230 | 1229 | ||
1231 | static inline int | 1230 | static int |
1232 | qdio_is_inbound_q_done(struct qdio_q *q) | 1231 | qdio_is_inbound_q_done(struct qdio_q *q) |
1233 | { | 1232 | { |
1234 | int no_used; | 1233 | int no_used; |
@@ -1296,7 +1295,7 @@ qdio_is_inbound_q_done(struct qdio_q *q) | |||
1296 | } | 1295 | } |
1297 | } | 1296 | } |
1298 | 1297 | ||
1299 | static inline void | 1298 | static void |
1300 | qdio_kick_inbound_handler(struct qdio_q *q) | 1299 | qdio_kick_inbound_handler(struct qdio_q *q) |
1301 | { | 1300 | { |
1302 | int count, start, end, real_end, i; | 1301 | int count, start, end, real_end, i; |
@@ -1343,7 +1342,7 @@ qdio_kick_inbound_handler(struct qdio_q *q) | |||
1343 | } | 1342 | } |
1344 | } | 1343 | } |
1345 | 1344 | ||
1346 | static inline void | 1345 | static void |
1347 | __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) | 1346 | __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) |
1348 | { | 1347 | { |
1349 | struct qdio_irq *irq_ptr; | 1348 | struct qdio_irq *irq_ptr; |
@@ -1442,7 +1441,7 @@ tiqdio_inbound_processing(struct qdio_q *q) | |||
1442 | __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); | 1441 | __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); |
1443 | } | 1442 | } |
1444 | 1443 | ||
1445 | static inline void | 1444 | static void |
1446 | __qdio_inbound_processing(struct qdio_q *q) | 1445 | __qdio_inbound_processing(struct qdio_q *q) |
1447 | { | 1446 | { |
1448 | int q_laps=0; | 1447 | int q_laps=0; |
@@ -1493,7 +1492,7 @@ qdio_inbound_processing(struct qdio_q *q) | |||
1493 | /************************* MAIN ROUTINES *******************************/ | 1492 | /************************* MAIN ROUTINES *******************************/ |
1494 | 1493 | ||
1495 | #ifdef QDIO_USE_PROCESSING_STATE | 1494 | #ifdef QDIO_USE_PROCESSING_STATE |
1496 | static inline int | 1495 | static int |
1497 | tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) | 1496 | tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) |
1498 | { | 1497 | { |
1499 | if (!q) { | 1498 | if (!q) { |
@@ -1545,7 +1544,7 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) | |||
1545 | } | 1544 | } |
1546 | #endif /* QDIO_USE_PROCESSING_STATE */ | 1545 | #endif /* QDIO_USE_PROCESSING_STATE */ |
1547 | 1546 | ||
1548 | static inline void | 1547 | static void |
1549 | tiqdio_inbound_checks(void) | 1548 | tiqdio_inbound_checks(void) |
1550 | { | 1549 | { |
1551 | struct qdio_q *q; | 1550 | struct qdio_q *q; |
@@ -1949,7 +1948,7 @@ qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) | |||
1949 | mb(); | 1948 | mb(); |
1950 | } | 1949 | } |
1951 | 1950 | ||
1952 | static inline void | 1951 | static void |
1953 | qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | 1952 | qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) |
1954 | { | 1953 | { |
1955 | char dbf_text[15]; | 1954 | char dbf_text[15]; |
@@ -1966,7 +1965,7 @@ qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | |||
1966 | 1965 | ||
1967 | } | 1966 | } |
1968 | 1967 | ||
1969 | static inline void | 1968 | static void |
1970 | qdio_handle_pci(struct qdio_irq *irq_ptr) | 1969 | qdio_handle_pci(struct qdio_irq *irq_ptr) |
1971 | { | 1970 | { |
1972 | int i; | 1971 | int i; |
@@ -2002,7 +2001,7 @@ qdio_handle_pci(struct qdio_irq *irq_ptr) | |||
2002 | 2001 | ||
2003 | static void qdio_establish_handle_irq(struct ccw_device*, int, int); | 2002 | static void qdio_establish_handle_irq(struct ccw_device*, int, int); |
2004 | 2003 | ||
2005 | static inline void | 2004 | static void |
2006 | qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, | 2005 | qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, |
2007 | int cstat, int dstat) | 2006 | int cstat, int dstat) |
2008 | { | 2007 | { |
@@ -2229,7 +2228,7 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags, | |||
2229 | return cc; | 2228 | return cc; |
2230 | } | 2229 | } |
2231 | 2230 | ||
2232 | static inline void | 2231 | static void |
2233 | qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, | 2232 | qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac, |
2234 | unsigned long token) | 2233 | unsigned long token) |
2235 | { | 2234 | { |
@@ -2740,7 +2739,7 @@ qdio_free(struct ccw_device *cdev) | |||
2740 | return 0; | 2739 | return 0; |
2741 | } | 2740 | } |
2742 | 2741 | ||
2743 | static inline void | 2742 | static void |
2744 | qdio_allocate_do_dbf(struct qdio_initialize *init_data) | 2743 | qdio_allocate_do_dbf(struct qdio_initialize *init_data) |
2745 | { | 2744 | { |
2746 | char dbf_text[20]; /* if a printf printed out more than 8 chars */ | 2745 | char dbf_text[20]; /* if a printf printed out more than 8 chars */ |
@@ -2773,7 +2772,7 @@ qdio_allocate_do_dbf(struct qdio_initialize *init_data) | |||
2773 | QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); | 2772 | QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); |
2774 | } | 2773 | } |
2775 | 2774 | ||
2776 | static inline void | 2775 | static void |
2777 | qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) | 2776 | qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) |
2778 | { | 2777 | { |
2779 | irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; | 2778 | irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; |
@@ -2792,7 +2791,7 @@ qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) | |||
2792 | irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; | 2791 | irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; |
2793 | } | 2792 | } |
2794 | 2793 | ||
2795 | static inline void | 2794 | static void |
2796 | qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, | 2795 | qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, |
2797 | int j, int iqfmt) | 2796 | int j, int iqfmt) |
2798 | { | 2797 | { |
@@ -2813,7 +2812,7 @@ qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, | |||
2813 | } | 2812 | } |
2814 | 2813 | ||
2815 | 2814 | ||
2816 | static inline void | 2815 | static void |
2817 | qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) | 2816 | qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) |
2818 | { | 2817 | { |
2819 | int i; | 2818 | int i; |
@@ -2839,7 +2838,7 @@ qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) | |||
2839 | } | 2838 | } |
2840 | } | 2839 | } |
2841 | 2840 | ||
2842 | static inline void | 2841 | static void |
2843 | qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) | 2842 | qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) |
2844 | { | 2843 | { |
2845 | int i; | 2844 | int i; |
@@ -2865,7 +2864,7 @@ qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) | |||
2865 | } | 2864 | } |
2866 | } | 2865 | } |
2867 | 2866 | ||
2868 | static inline int | 2867 | static int |
2869 | qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, | 2868 | qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, |
2870 | int dstat) | 2869 | int dstat) |
2871 | { | 2870 | { |
@@ -3014,7 +3013,7 @@ qdio_allocate(struct qdio_initialize *init_data) | |||
3014 | return 0; | 3013 | return 0; |
3015 | } | 3014 | } |
3016 | 3015 | ||
3017 | int qdio_fill_irq(struct qdio_initialize *init_data) | 3016 | static int qdio_fill_irq(struct qdio_initialize *init_data) |
3018 | { | 3017 | { |
3019 | int i; | 3018 | int i; |
3020 | char dbf_text[15]; | 3019 | char dbf_text[15]; |
@@ -3367,7 +3366,7 @@ qdio_activate(struct ccw_device *cdev, int flags) | |||
3367 | } | 3366 | } |
3368 | 3367 | ||
3369 | /* buffers filled forwards again to make Rick happy */ | 3368 | /* buffers filled forwards again to make Rick happy */ |
3370 | static inline void | 3369 | static void |
3371 | qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, | 3370 | qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, |
3372 | unsigned int count, struct qdio_buffer *buffers) | 3371 | unsigned int count, struct qdio_buffer *buffers) |
3373 | { | 3372 | { |
@@ -3386,7 +3385,7 @@ qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, | |||
3386 | } | 3385 | } |
3387 | } | 3386 | } |
3388 | 3387 | ||
3389 | static inline void | 3388 | static void |
3390 | qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, | 3389 | qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, |
3391 | unsigned int count, struct qdio_buffer *buffers) | 3390 | unsigned int count, struct qdio_buffer *buffers) |
3392 | { | 3391 | { |
@@ -3407,7 +3406,7 @@ qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, | |||
3407 | } | 3406 | } |
3408 | } | 3407 | } |
3409 | 3408 | ||
3410 | static inline void | 3409 | static void |
3411 | do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, | 3410 | do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, |
3412 | unsigned int qidx, unsigned int count, | 3411 | unsigned int qidx, unsigned int count, |
3413 | struct qdio_buffer *buffers) | 3412 | struct qdio_buffer *buffers) |
@@ -3443,7 +3442,7 @@ do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, | |||
3443 | qdio_mark_q(q); | 3442 | qdio_mark_q(q); |
3444 | } | 3443 | } |
3445 | 3444 | ||
3446 | static inline void | 3445 | static void |
3447 | do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, | 3446 | do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, |
3448 | unsigned int qidx, unsigned int count, | 3447 | unsigned int qidx, unsigned int count, |
3449 | struct qdio_buffer *buffers) | 3448 | struct qdio_buffer *buffers) |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 81b5899f4010..c7d1355237b6 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -465,7 +465,7 @@ static int ap_device_probe(struct device *dev) | |||
465 | * Flush all requests from the request/pending queue of an AP device. | 465 | * Flush all requests from the request/pending queue of an AP device. |
466 | * @ap_dev: pointer to the AP device. | 466 | * @ap_dev: pointer to the AP device. |
467 | */ | 467 | */ |
468 | static inline void __ap_flush_queue(struct ap_device *ap_dev) | 468 | static void __ap_flush_queue(struct ap_device *ap_dev) |
469 | { | 469 | { |
470 | struct ap_message *ap_msg, *next; | 470 | struct ap_message *ap_msg, *next; |
471 | 471 | ||
@@ -587,7 +587,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { | |||
587 | /** | 587 | /** |
588 | * Pick one of the 16 ap domains. | 588 | * Pick one of the 16 ap domains. |
589 | */ | 589 | */ |
590 | static inline int ap_select_domain(void) | 590 | static int ap_select_domain(void) |
591 | { | 591 | { |
592 | int queue_depth, device_type, count, max_count, best_domain; | 592 | int queue_depth, device_type, count, max_count, best_domain; |
593 | int rc, i, j; | 593 | int rc, i, j; |
@@ -825,7 +825,7 @@ static inline void ap_schedule_poll_timer(void) | |||
825 | * required, bit 2^1 is set if the poll timer needs to get armed | 825 | * required, bit 2^1 is set if the poll timer needs to get armed |
826 | * Returns 0 if the device is still present, -ENODEV if not. | 826 | * Returns 0 if the device is still present, -ENODEV if not. |
827 | */ | 827 | */ |
828 | static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) | 828 | static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) |
829 | { | 829 | { |
830 | struct ap_queue_status status; | 830 | struct ap_queue_status status; |
831 | struct ap_message *ap_msg; | 831 | struct ap_message *ap_msg; |
@@ -872,7 +872,7 @@ static inline int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) | |||
872 | * required, bit 2^1 is set if the poll timer needs to get armed | 872 | * required, bit 2^1 is set if the poll timer needs to get armed |
873 | * Returns 0 if the device is still present, -ENODEV if not. | 873 | * Returns 0 if the device is still present, -ENODEV if not. |
874 | */ | 874 | */ |
875 | static inline int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) | 875 | static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) |
876 | { | 876 | { |
877 | struct ap_queue_status status; | 877 | struct ap_queue_status status; |
878 | struct ap_message *ap_msg; | 878 | struct ap_message *ap_msg; |
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 1edc10a7a6f2..b9e59bc9435a 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c | |||
@@ -791,7 +791,7 @@ static long trans_xcRB32(struct file *filp, unsigned int cmd, | |||
791 | return rc; | 791 | return rc; |
792 | } | 792 | } |
793 | 793 | ||
794 | long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, | 794 | static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, |
795 | unsigned long arg) | 795 | unsigned long arg) |
796 | { | 796 | { |
797 | if (cmd == ICARSAMODEXPO) | 797 | if (cmd == ICARSAMODEXPO) |
@@ -833,8 +833,8 @@ static struct miscdevice zcrypt_misc_device = { | |||
833 | */ | 833 | */ |
834 | static struct proc_dir_entry *zcrypt_entry; | 834 | static struct proc_dir_entry *zcrypt_entry; |
835 | 835 | ||
836 | static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, | 836 | static int sprintcl(unsigned char *outaddr, unsigned char *addr, |
837 | unsigned int len) | 837 | unsigned int len) |
838 | { | 838 | { |
839 | int hl, i; | 839 | int hl, i; |
840 | 840 | ||
@@ -845,8 +845,8 @@ static inline int sprintcl(unsigned char *outaddr, unsigned char *addr, | |||
845 | return hl; | 845 | return hl; |
846 | } | 846 | } |
847 | 847 | ||
848 | static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, | 848 | static int sprintrw(unsigned char *outaddr, unsigned char *addr, |
849 | unsigned int len) | 849 | unsigned int len) |
850 | { | 850 | { |
851 | int hl, inl, c, cx; | 851 | int hl, inl, c, cx; |
852 | 852 | ||
@@ -865,8 +865,8 @@ static inline int sprintrw(unsigned char *outaddr, unsigned char *addr, | |||
865 | return hl; | 865 | return hl; |
866 | } | 866 | } |
867 | 867 | ||
868 | static inline int sprinthx(unsigned char *title, unsigned char *outaddr, | 868 | static int sprinthx(unsigned char *title, unsigned char *outaddr, |
869 | unsigned char *addr, unsigned int len) | 869 | unsigned char *addr, unsigned int len) |
870 | { | 870 | { |
871 | int hl, inl, r, rx; | 871 | int hl, inl, r, rx; |
872 | 872 | ||
@@ -885,8 +885,8 @@ static inline int sprinthx(unsigned char *title, unsigned char *outaddr, | |||
885 | return hl; | 885 | return hl; |
886 | } | 886 | } |
887 | 887 | ||
888 | static inline int sprinthx4(unsigned char *title, unsigned char *outaddr, | 888 | static int sprinthx4(unsigned char *title, unsigned char *outaddr, |
889 | unsigned int *array, unsigned int len) | 889 | unsigned int *array, unsigned int len) |
890 | { | 890 | { |
891 | int hl, r; | 891 | int hl, r; |
892 | 892 | ||
@@ -943,7 +943,7 @@ static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, | |||
943 | zcrypt_qdepth_mask(workarea); | 943 | zcrypt_qdepth_mask(workarea); |
944 | len += sprinthx("Waiting work element counts", | 944 | len += sprinthx("Waiting work element counts", |
945 | resp_buff+len, workarea, AP_DEVICES); | 945 | resp_buff+len, workarea, AP_DEVICES); |
946 | zcrypt_perdev_reqcnt((unsigned int *) workarea); | 946 | zcrypt_perdev_reqcnt((int *) workarea); |
947 | len += sprinthx4("Per-device successfully completed request counts", | 947 | len += sprinthx4("Per-device successfully completed request counts", |
948 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); | 948 | resp_buff+len,(unsigned int *) workarea, AP_DEVICES); |
949 | *eof = 1; | 949 | *eof = 1; |
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 32e37014345c..818ffe05ac00 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
@@ -191,10 +191,10 @@ static int ICACRT_msg_to_type4CRT_msg(struct zcrypt_device *zdev, | |||
191 | * | 191 | * |
192 | * Returns 0 on success or -EFAULT. | 192 | * Returns 0 on success or -EFAULT. |
193 | */ | 193 | */ |
194 | static inline int convert_type84(struct zcrypt_device *zdev, | 194 | static int convert_type84(struct zcrypt_device *zdev, |
195 | struct ap_message *reply, | 195 | struct ap_message *reply, |
196 | char __user *outputdata, | 196 | char __user *outputdata, |
197 | unsigned int outputdatalength) | 197 | unsigned int outputdatalength) |
198 | { | 198 | { |
199 | struct type84_hdr *t84h = reply->message; | 199 | struct type84_hdr *t84h = reply->message; |
200 | char *data; | 200 | char *data; |
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index b7153c1e15cd..252443b6bd1b 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -709,7 +709,8 @@ out_free: | |||
709 | * PCIXCC/CEX2C device to the request distributor | 709 | * PCIXCC/CEX2C device to the request distributor |
710 | * @xcRB: pointer to the send_cprb request buffer | 710 | * @xcRB: pointer to the send_cprb request buffer |
711 | */ | 711 | */ |
712 | long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, struct ica_xcRB *xcRB) | 712 | static long zcrypt_pcixcc_send_cprb(struct zcrypt_device *zdev, |
713 | struct ica_xcRB *xcRB) | ||
713 | { | 714 | { |
714 | struct ap_message ap_msg; | 715 | struct ap_message ap_msg; |
715 | struct response_type resp_type = { | 716 | struct response_type resp_type = { |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 95f4e105cb96..7809a79feec7 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -121,7 +121,7 @@ MODULE_LICENSE("GPL"); | |||
121 | #define DEBUG | 121 | #define DEBUG |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | char debug_buffer[255]; | 124 | static char debug_buffer[255]; |
125 | /** | 125 | /** |
126 | * Debug Facility Stuff | 126 | * Debug Facility Stuff |
127 | */ | 127 | */ |
@@ -223,16 +223,14 @@ static void claw_timer ( struct chbk * p_ch ); | |||
223 | /* Functions */ | 223 | /* Functions */ |
224 | static int add_claw_reads(struct net_device *dev, | 224 | static int add_claw_reads(struct net_device *dev, |
225 | struct ccwbk* p_first, struct ccwbk* p_last); | 225 | struct ccwbk* p_first, struct ccwbk* p_last); |
226 | static void inline ccw_check_return_code (struct ccw_device *cdev, | 226 | static void ccw_check_return_code (struct ccw_device *cdev, int return_code); |
227 | int return_code); | 227 | static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense ); |
228 | static void inline ccw_check_unit_check (struct chbk * p_ch, | ||
229 | unsigned char sense ); | ||
230 | static int find_link(struct net_device *dev, char *host_name, char *ws_name ); | 228 | static int find_link(struct net_device *dev, char *host_name, char *ws_name ); |
231 | static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); | 229 | static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid); |
232 | static int init_ccw_bk(struct net_device *dev); | 230 | static int init_ccw_bk(struct net_device *dev); |
233 | static void probe_error( struct ccwgroup_device *cgdev); | 231 | static void probe_error( struct ccwgroup_device *cgdev); |
234 | static struct net_device_stats *claw_stats(struct net_device *dev); | 232 | static struct net_device_stats *claw_stats(struct net_device *dev); |
235 | static int inline pages_to_order_of_mag(int num_of_pages); | 233 | static int pages_to_order_of_mag(int num_of_pages); |
236 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); | 234 | static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr); |
237 | #ifdef DEBUG | 235 | #ifdef DEBUG |
238 | static void dumpit (char *buf, int len); | 236 | static void dumpit (char *buf, int len); |
@@ -1310,7 +1308,7 @@ claw_timer ( struct chbk * p_ch ) | |||
1310 | * of magnitude get_free_pages() has an upper order of 9 * | 1308 | * of magnitude get_free_pages() has an upper order of 9 * |
1311 | *--------------------------------------------------------------------*/ | 1309 | *--------------------------------------------------------------------*/ |
1312 | 1310 | ||
1313 | static int inline | 1311 | static int |
1314 | pages_to_order_of_mag(int num_of_pages) | 1312 | pages_to_order_of_mag(int num_of_pages) |
1315 | { | 1313 | { |
1316 | int order_of_mag=1; /* assume 2 pages */ | 1314 | int order_of_mag=1; /* assume 2 pages */ |
@@ -1482,7 +1480,7 @@ add_claw_reads(struct net_device *dev, struct ccwbk* p_first, | |||
1482 | * * | 1480 | * * |
1483 | *-------------------------------------------------------------------*/ | 1481 | *-------------------------------------------------------------------*/ |
1484 | 1482 | ||
1485 | static void inline | 1483 | static void |
1486 | ccw_check_return_code(struct ccw_device *cdev, int return_code) | 1484 | ccw_check_return_code(struct ccw_device *cdev, int return_code) |
1487 | { | 1485 | { |
1488 | #ifdef FUNCTRACE | 1486 | #ifdef FUNCTRACE |
@@ -1529,7 +1527,7 @@ ccw_check_return_code(struct ccw_device *cdev, int return_code) | |||
1529 | * ccw_check_unit_check * | 1527 | * ccw_check_unit_check * |
1530 | *--------------------------------------------------------------------*/ | 1528 | *--------------------------------------------------------------------*/ |
1531 | 1529 | ||
1532 | static void inline | 1530 | static void |
1533 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) | 1531 | ccw_check_unit_check(struct chbk * p_ch, unsigned char sense ) |
1534 | { | 1532 | { |
1535 | struct net_device *dev = p_ch->ndev; | 1533 | struct net_device *dev = p_ch->ndev; |
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 03cc263fe0da..5a84fbbc6611 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c | |||
@@ -369,7 +369,7 @@ ctc_dump_skb(struct sk_buff *skb, int offset) | |||
369 | * @param ch The channel where this skb has been received. | 369 | * @param ch The channel where this skb has been received. |
370 | * @param pskb The received skb. | 370 | * @param pskb The received skb. |
371 | */ | 371 | */ |
372 | static __inline__ void | 372 | static void |
373 | ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | 373 | ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) |
374 | { | 374 | { |
375 | struct net_device *dev = ch->netdev; | 375 | struct net_device *dev = ch->netdev; |
@@ -512,7 +512,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb) | |||
512 | * @param ch The channel, the error belongs to. | 512 | * @param ch The channel, the error belongs to. |
513 | * @param return_code The error code to inspect. | 513 | * @param return_code The error code to inspect. |
514 | */ | 514 | */ |
515 | static void inline | 515 | static void |
516 | ccw_check_return_code(struct channel *ch, int return_code, char *msg) | 516 | ccw_check_return_code(struct channel *ch, int return_code, char *msg) |
517 | { | 517 | { |
518 | DBF_TEXT(trace, 5, __FUNCTION__); | 518 | DBF_TEXT(trace, 5, __FUNCTION__); |
@@ -547,7 +547,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg) | |||
547 | * @param ch The channel, the sense code belongs to. | 547 | * @param ch The channel, the sense code belongs to. |
548 | * @param sense The sense code to inspect. | 548 | * @param sense The sense code to inspect. |
549 | */ | 549 | */ |
550 | static void inline | 550 | static void |
551 | ccw_unit_check(struct channel *ch, unsigned char sense) | 551 | ccw_unit_check(struct channel *ch, unsigned char sense) |
552 | { | 552 | { |
553 | DBF_TEXT(trace, 5, __FUNCTION__); | 553 | DBF_TEXT(trace, 5, __FUNCTION__); |
@@ -603,7 +603,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q) | |||
603 | } | 603 | } |
604 | } | 604 | } |
605 | 605 | ||
606 | static __inline__ int | 606 | static int |
607 | ctc_checkalloc_buffer(struct channel *ch, int warn) | 607 | ctc_checkalloc_buffer(struct channel *ch, int warn) |
608 | { | 608 | { |
609 | DBF_TEXT(trace, 5, __FUNCTION__); | 609 | DBF_TEXT(trace, 5, __FUNCTION__); |
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index e965f03a7291..76728ae4b843 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c | |||
@@ -57,7 +57,7 @@ static struct ccw_device_id cu3088_ids[] = { | |||
57 | 57 | ||
58 | static struct ccw_driver cu3088_driver; | 58 | static struct ccw_driver cu3088_driver; |
59 | 59 | ||
60 | struct device *cu3088_root_dev; | 60 | static struct device *cu3088_root_dev; |
61 | 61 | ||
62 | static ssize_t | 62 | static ssize_t |
63 | group_write(struct device_driver *drv, const char *buf, size_t count) | 63 | group_write(struct device_driver *drv, const char *buf, size_t count) |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index e5665b6743a1..b97dd15bdb9a 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -828,7 +828,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) | |||
828 | /** | 828 | /** |
829 | * Emit buffer of a lan comand. | 829 | * Emit buffer of a lan comand. |
830 | */ | 830 | */ |
831 | void | 831 | static void |
832 | lcs_lancmd_timeout(unsigned long data) | 832 | lcs_lancmd_timeout(unsigned long data) |
833 | { | 833 | { |
834 | struct lcs_reply *reply, *list_reply, *r; | 834 | struct lcs_reply *reply, *list_reply, *r; |
@@ -1360,7 +1360,7 @@ lcs_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
1360 | return 0; | 1360 | return 0; |
1361 | } | 1361 | } |
1362 | 1362 | ||
1363 | void | 1363 | static void |
1364 | lcs_schedule_recovery(struct lcs_card *card) | 1364 | lcs_schedule_recovery(struct lcs_card *card) |
1365 | { | 1365 | { |
1366 | LCS_DBF_TEXT(2, trace, "startrec"); | 1366 | LCS_DBF_TEXT(2, trace, "startrec"); |
@@ -1990,7 +1990,7 @@ lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char | |||
1990 | 1990 | ||
1991 | } | 1991 | } |
1992 | 1992 | ||
1993 | DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); | 1993 | static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); |
1994 | 1994 | ||
1995 | static ssize_t | 1995 | static ssize_t |
1996 | lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, | 1996 | lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index d7d1cc0a5c8e..3346088f47e0 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -2053,7 +2053,7 @@ out_free_ndev: | |||
2053 | return ret; | 2053 | return ret; |
2054 | } | 2054 | } |
2055 | 2055 | ||
2056 | DRIVER_ATTR(connection, 0200, NULL, conn_write); | 2056 | static DRIVER_ATTR(connection, 0200, NULL, conn_write); |
2057 | 2057 | ||
2058 | static ssize_t | 2058 | static ssize_t |
2059 | remove_write (struct device_driver *drv, const char *buf, size_t count) | 2059 | remove_write (struct device_driver *drv, const char *buf, size_t count) |
@@ -2112,7 +2112,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2112 | return -EINVAL; | 2112 | return -EINVAL; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | DRIVER_ATTR(remove, 0200, NULL, remove_write); | 2115 | static DRIVER_ATTR(remove, 0200, NULL, remove_write); |
2116 | 2116 | ||
2117 | static void | 2117 | static void |
2118 | netiucv_banner(void) | 2118 | netiucv_banner(void) |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 6bb558a9a032..7c735e1fe063 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -49,7 +49,7 @@ qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue, | |||
49 | return buffers_needed; | 49 | return buffers_needed; |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline void | 52 | static void |
53 | qeth_eddp_free_context(struct qeth_eddp_context *ctx) | 53 | qeth_eddp_free_context(struct qeth_eddp_context *ctx) |
54 | { | 54 | { |
55 | int i; | 55 | int i; |
@@ -91,7 +91,7 @@ qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf) | |||
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | static inline int | 94 | static int |
95 | qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, | 95 | qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf, |
96 | struct qeth_eddp_context *ctx) | 96 | struct qeth_eddp_context *ctx) |
97 | { | 97 | { |
@@ -196,7 +196,7 @@ out: | |||
196 | return flush_cnt; | 196 | return flush_cnt; |
197 | } | 197 | } |
198 | 198 | ||
199 | static inline void | 199 | static void |
200 | qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | 200 | qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, |
201 | struct qeth_eddp_data *eddp, int data_len) | 201 | struct qeth_eddp_data *eddp, int data_len) |
202 | { | 202 | { |
@@ -256,7 +256,7 @@ qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx, | |||
256 | ctx->offset += eddp->thl; | 256 | ctx->offset += eddp->thl; |
257 | } | 257 | } |
258 | 258 | ||
259 | static inline void | 259 | static void |
260 | qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | 260 | qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, |
261 | __wsum *hcsum) | 261 | __wsum *hcsum) |
262 | { | 262 | { |
@@ -302,7 +302,7 @@ qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len, | |||
302 | } | 302 | } |
303 | } | 303 | } |
304 | 304 | ||
305 | static inline void | 305 | static void |
306 | qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | 306 | qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, |
307 | struct qeth_eddp_data *eddp, int data_len, | 307 | struct qeth_eddp_data *eddp, int data_len, |
308 | __wsum hcsum) | 308 | __wsum hcsum) |
@@ -349,7 +349,7 @@ qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx, | |||
349 | ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); | 349 | ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline __wsum | 352 | static __wsum |
353 | qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) | 353 | qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) |
354 | { | 354 | { |
355 | __wsum phcsum; /* pseudo header checksum */ | 355 | __wsum phcsum; /* pseudo header checksum */ |
@@ -363,7 +363,7 @@ qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len) | |||
363 | return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); | 363 | return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum); |
364 | } | 364 | } |
365 | 365 | ||
366 | static inline __wsum | 366 | static __wsum |
367 | qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) | 367 | qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) |
368 | { | 368 | { |
369 | __be32 proto; | 369 | __be32 proto; |
@@ -381,7 +381,7 @@ qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len) | |||
381 | return phcsum; | 381 | return phcsum; |
382 | } | 382 | } |
383 | 383 | ||
384 | static inline struct qeth_eddp_data * | 384 | static struct qeth_eddp_data * |
385 | qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) | 385 | qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) |
386 | { | 386 | { |
387 | struct qeth_eddp_data *eddp; | 387 | struct qeth_eddp_data *eddp; |
@@ -399,7 +399,7 @@ qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl) | |||
399 | return eddp; | 399 | return eddp; |
400 | } | 400 | } |
401 | 401 | ||
402 | static inline void | 402 | static void |
403 | __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | 403 | __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, |
404 | struct qeth_eddp_data *eddp) | 404 | struct qeth_eddp_data *eddp) |
405 | { | 405 | { |
@@ -464,7 +464,7 @@ __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | static inline int | 467 | static int |
468 | qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | 468 | qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, |
469 | struct sk_buff *skb, struct qeth_hdr *qhdr) | 469 | struct sk_buff *skb, struct qeth_hdr *qhdr) |
470 | { | 470 | { |
@@ -505,7 +505,7 @@ qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx, | |||
505 | return 0; | 505 | return 0; |
506 | } | 506 | } |
507 | 507 | ||
508 | static inline void | 508 | static void |
509 | qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, | 509 | qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, |
510 | int hdr_len) | 510 | int hdr_len) |
511 | { | 511 | { |
@@ -529,7 +529,7 @@ qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb, | |||
529 | (skb_shinfo(skb)->gso_segs + 1); | 529 | (skb_shinfo(skb)->gso_segs + 1); |
530 | } | 530 | } |
531 | 531 | ||
532 | static inline struct qeth_eddp_context * | 532 | static struct qeth_eddp_context * |
533 | qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | 533 | qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, |
534 | int hdr_len) | 534 | int hdr_len) |
535 | { | 535 | { |
@@ -581,7 +581,7 @@ qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb, | |||
581 | return ctx; | 581 | return ctx; |
582 | } | 582 | } |
583 | 583 | ||
584 | static inline struct qeth_eddp_context * | 584 | static struct qeth_eddp_context * |
585 | qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, | 585 | qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb, |
586 | struct qeth_hdr *qhdr) | 586 | struct qeth_hdr *qhdr) |
587 | { | 587 | { |
@@ -625,5 +625,3 @@ qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb, | |||
625 | } | 625 | } |
626 | return NULL; | 626 | return NULL; |
627 | } | 627 | } |
628 | |||
629 | |||
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index d2efa5ff125d..2257e45594b3 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -651,7 +651,7 @@ __qeth_ref_ip_on_card(struct qeth_card *card, struct qeth_ipaddr *todo, | |||
651 | return 0; | 651 | return 0; |
652 | } | 652 | } |
653 | 653 | ||
654 | static inline int | 654 | static int |
655 | __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, | 655 | __qeth_address_exists_in_list(struct list_head *list, struct qeth_ipaddr *addr, |
656 | int same_type) | 656 | int same_type) |
657 | { | 657 | { |
@@ -795,7 +795,7 @@ qeth_add_ip(struct qeth_card *card, struct qeth_ipaddr *addr) | |||
795 | return rc; | 795 | return rc; |
796 | } | 796 | } |
797 | 797 | ||
798 | static inline void | 798 | static void |
799 | __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) | 799 | __qeth_delete_all_mc(struct qeth_card *card, unsigned long *flags) |
800 | { | 800 | { |
801 | struct qeth_ipaddr *addr, *tmp; | 801 | struct qeth_ipaddr *addr, *tmp; |
@@ -882,7 +882,7 @@ static void qeth_layer2_add_multicast(struct qeth_card *); | |||
882 | static void qeth_add_multicast_ipv6(struct qeth_card *); | 882 | static void qeth_add_multicast_ipv6(struct qeth_card *); |
883 | #endif | 883 | #endif |
884 | 884 | ||
885 | static inline int | 885 | static int |
886 | qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) | 886 | qeth_set_thread_start_bit(struct qeth_card *card, unsigned long thread) |
887 | { | 887 | { |
888 | unsigned long flags; | 888 | unsigned long flags; |
@@ -920,7 +920,7 @@ qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) | |||
920 | wake_up(&card->wait_q); | 920 | wake_up(&card->wait_q); |
921 | } | 921 | } |
922 | 922 | ||
923 | static inline int | 923 | static int |
924 | __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) | 924 | __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) |
925 | { | 925 | { |
926 | unsigned long flags; | 926 | unsigned long flags; |
@@ -1764,9 +1764,9 @@ out: | |||
1764 | qeth_release_buffer(channel,iob); | 1764 | qeth_release_buffer(channel,iob); |
1765 | } | 1765 | } |
1766 | 1766 | ||
1767 | static inline void | 1767 | static void |
1768 | qeth_prepare_control_data(struct qeth_card *card, int len, | 1768 | qeth_prepare_control_data(struct qeth_card *card, int len, |
1769 | struct qeth_cmd_buffer *iob) | 1769 | struct qeth_cmd_buffer *iob) |
1770 | { | 1770 | { |
1771 | qeth_setup_ccw(&card->write,iob->data,len); | 1771 | qeth_setup_ccw(&card->write,iob->data,len); |
1772 | iob->callback = qeth_release_buffer; | 1772 | iob->callback = qeth_release_buffer; |
@@ -2160,7 +2160,7 @@ qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error, | |||
2160 | return 0; | 2160 | return 0; |
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | static inline struct sk_buff * | 2163 | static struct sk_buff * |
2164 | qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) | 2164 | qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) |
2165 | { | 2165 | { |
2166 | struct sk_buff* skb; | 2166 | struct sk_buff* skb; |
@@ -2179,7 +2179,7 @@ qeth_get_skb(unsigned int length, struct qeth_hdr *hdr) | |||
2179 | return skb; | 2179 | return skb; |
2180 | } | 2180 | } |
2181 | 2181 | ||
2182 | static inline struct sk_buff * | 2182 | static struct sk_buff * |
2183 | qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, | 2183 | qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, |
2184 | struct qdio_buffer_element **__element, int *__offset, | 2184 | struct qdio_buffer_element **__element, int *__offset, |
2185 | struct qeth_hdr **hdr) | 2185 | struct qeth_hdr **hdr) |
@@ -2264,7 +2264,7 @@ no_mem: | |||
2264 | return NULL; | 2264 | return NULL; |
2265 | } | 2265 | } |
2266 | 2266 | ||
2267 | static inline __be16 | 2267 | static __be16 |
2268 | qeth_type_trans(struct sk_buff *skb, struct net_device *dev) | 2268 | qeth_type_trans(struct sk_buff *skb, struct net_device *dev) |
2269 | { | 2269 | { |
2270 | struct qeth_card *card; | 2270 | struct qeth_card *card; |
@@ -2297,7 +2297,7 @@ qeth_type_trans(struct sk_buff *skb, struct net_device *dev) | |||
2297 | return htons(ETH_P_802_2); | 2297 | return htons(ETH_P_802_2); |
2298 | } | 2298 | } |
2299 | 2299 | ||
2300 | static inline void | 2300 | static void |
2301 | qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | 2301 | qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, |
2302 | struct qeth_hdr *hdr) | 2302 | struct qeth_hdr *hdr) |
2303 | { | 2303 | { |
@@ -2351,7 +2351,7 @@ qeth_rebuild_skb_fake_ll_tr(struct qeth_card *card, struct sk_buff *skb, | |||
2351 | fake_llc->ethertype = ETH_P_IP; | 2351 | fake_llc->ethertype = ETH_P_IP; |
2352 | } | 2352 | } |
2353 | 2353 | ||
2354 | static inline void | 2354 | static void |
2355 | qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, | 2355 | qeth_rebuild_skb_fake_ll_eth(struct qeth_card *card, struct sk_buff *skb, |
2356 | struct qeth_hdr *hdr) | 2356 | struct qeth_hdr *hdr) |
2357 | { | 2357 | { |
@@ -2420,7 +2420,7 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2420 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; | 2420 | *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; |
2421 | } | 2421 | } |
2422 | 2422 | ||
2423 | static inline __u16 | 2423 | static __u16 |
2424 | qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | 2424 | qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, |
2425 | struct qeth_hdr *hdr) | 2425 | struct qeth_hdr *hdr) |
2426 | { | 2426 | { |
@@ -2476,7 +2476,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2476 | return vlan_id; | 2476 | return vlan_id; |
2477 | } | 2477 | } |
2478 | 2478 | ||
2479 | static inline void | 2479 | static void |
2480 | qeth_process_inbound_buffer(struct qeth_card *card, | 2480 | qeth_process_inbound_buffer(struct qeth_card *card, |
2481 | struct qeth_qdio_buffer *buf, int index) | 2481 | struct qeth_qdio_buffer *buf, int index) |
2482 | { | 2482 | { |
@@ -2528,7 +2528,7 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2528 | } | 2528 | } |
2529 | } | 2529 | } |
2530 | 2530 | ||
2531 | static inline struct qeth_buffer_pool_entry * | 2531 | static struct qeth_buffer_pool_entry * |
2532 | qeth_get_buffer_pool_entry(struct qeth_card *card) | 2532 | qeth_get_buffer_pool_entry(struct qeth_card *card) |
2533 | { | 2533 | { |
2534 | struct qeth_buffer_pool_entry *entry; | 2534 | struct qeth_buffer_pool_entry *entry; |
@@ -2543,7 +2543,7 @@ qeth_get_buffer_pool_entry(struct qeth_card *card) | |||
2543 | return NULL; | 2543 | return NULL; |
2544 | } | 2544 | } |
2545 | 2545 | ||
2546 | static inline void | 2546 | static void |
2547 | qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) | 2547 | qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) |
2548 | { | 2548 | { |
2549 | struct qeth_buffer_pool_entry *pool_entry; | 2549 | struct qeth_buffer_pool_entry *pool_entry; |
@@ -2570,7 +2570,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) | |||
2570 | buf->state = QETH_QDIO_BUF_EMPTY; | 2570 | buf->state = QETH_QDIO_BUF_EMPTY; |
2571 | } | 2571 | } |
2572 | 2572 | ||
2573 | static inline void | 2573 | static void |
2574 | qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | 2574 | qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, |
2575 | struct qeth_qdio_out_buffer *buf) | 2575 | struct qeth_qdio_out_buffer *buf) |
2576 | { | 2576 | { |
@@ -2595,7 +2595,7 @@ qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, | |||
2595 | atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); | 2595 | atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); |
2596 | } | 2596 | } |
2597 | 2597 | ||
2598 | static inline void | 2598 | static void |
2599 | qeth_queue_input_buffer(struct qeth_card *card, int index) | 2599 | qeth_queue_input_buffer(struct qeth_card *card, int index) |
2600 | { | 2600 | { |
2601 | struct qeth_qdio_q *queue = card->qdio.in_q; | 2601 | struct qeth_qdio_q *queue = card->qdio.in_q; |
@@ -2699,7 +2699,7 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, | |||
2699 | card->perf_stats.inbound_start_time; | 2699 | card->perf_stats.inbound_start_time; |
2700 | } | 2700 | } |
2701 | 2701 | ||
2702 | static inline int | 2702 | static int |
2703 | qeth_handle_send_error(struct qeth_card *card, | 2703 | qeth_handle_send_error(struct qeth_card *card, |
2704 | struct qeth_qdio_out_buffer *buffer, | 2704 | struct qeth_qdio_out_buffer *buffer, |
2705 | unsigned int qdio_err, unsigned int siga_err) | 2705 | unsigned int qdio_err, unsigned int siga_err) |
@@ -2821,7 +2821,7 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2821 | * Switched to packing state if the number of used buffers on a queue | 2821 | * Switched to packing state if the number of used buffers on a queue |
2822 | * reaches a certain limit. | 2822 | * reaches a certain limit. |
2823 | */ | 2823 | */ |
2824 | static inline void | 2824 | static void |
2825 | qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | 2825 | qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) |
2826 | { | 2826 | { |
2827 | if (!queue->do_pack) { | 2827 | if (!queue->do_pack) { |
@@ -2842,7 +2842,7 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | |||
2842 | * In that case 1 is returned to inform the caller. If no buffer | 2842 | * In that case 1 is returned to inform the caller. If no buffer |
2843 | * has to be flushed, zero is returned. | 2843 | * has to be flushed, zero is returned. |
2844 | */ | 2844 | */ |
2845 | static inline int | 2845 | static int |
2846 | qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | 2846 | qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) |
2847 | { | 2847 | { |
2848 | struct qeth_qdio_out_buffer *buffer; | 2848 | struct qeth_qdio_out_buffer *buffer; |
@@ -2877,7 +2877,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | |||
2877 | * Checks if there is a packing buffer and prepares it to be flushed. | 2877 | * Checks if there is a packing buffer and prepares it to be flushed. |
2878 | * In that case returns 1, otherwise zero. | 2878 | * In that case returns 1, otherwise zero. |
2879 | */ | 2879 | */ |
2880 | static inline int | 2880 | static int |
2881 | qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) | 2881 | qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) |
2882 | { | 2882 | { |
2883 | struct qeth_qdio_out_buffer *buffer; | 2883 | struct qeth_qdio_out_buffer *buffer; |
@@ -2894,7 +2894,7 @@ qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) | |||
2894 | return 0; | 2894 | return 0; |
2895 | } | 2895 | } |
2896 | 2896 | ||
2897 | static inline void | 2897 | static void |
2898 | qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) | 2898 | qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) |
2899 | { | 2899 | { |
2900 | int index; | 2900 | int index; |
@@ -3594,7 +3594,7 @@ qeth_fake_header(struct sk_buff *skb, struct net_device *dev, | |||
3594 | } | 3594 | } |
3595 | } | 3595 | } |
3596 | 3596 | ||
3597 | static inline int | 3597 | static int |
3598 | qeth_send_packet(struct qeth_card *, struct sk_buff *); | 3598 | qeth_send_packet(struct qeth_card *, struct sk_buff *); |
3599 | 3599 | ||
3600 | static int | 3600 | static int |
@@ -3759,7 +3759,7 @@ qeth_stop(struct net_device *dev) | |||
3759 | return 0; | 3759 | return 0; |
3760 | } | 3760 | } |
3761 | 3761 | ||
3762 | static inline int | 3762 | static int |
3763 | qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | 3763 | qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) |
3764 | { | 3764 | { |
3765 | int cast_type = RTN_UNSPEC; | 3765 | int cast_type = RTN_UNSPEC; |
@@ -3806,7 +3806,7 @@ qeth_get_cast_type(struct qeth_card *card, struct sk_buff *skb) | |||
3806 | return cast_type; | 3806 | return cast_type; |
3807 | } | 3807 | } |
3808 | 3808 | ||
3809 | static inline int | 3809 | static int |
3810 | qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | 3810 | qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, |
3811 | int ipv, int cast_type) | 3811 | int ipv, int cast_type) |
3812 | { | 3812 | { |
@@ -3853,7 +3853,7 @@ qeth_get_ip_version(struct sk_buff *skb) | |||
3853 | } | 3853 | } |
3854 | } | 3854 | } |
3855 | 3855 | ||
3856 | static inline struct qeth_hdr * | 3856 | static struct qeth_hdr * |
3857 | __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) | 3857 | __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) |
3858 | { | 3858 | { |
3859 | #ifdef CONFIG_QETH_VLAN | 3859 | #ifdef CONFIG_QETH_VLAN |
@@ -3882,14 +3882,14 @@ __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) | |||
3882 | qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); | 3882 | qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); |
3883 | } | 3883 | } |
3884 | 3884 | ||
3885 | static inline void | 3885 | static void |
3886 | __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) | 3886 | __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) |
3887 | { | 3887 | { |
3888 | if (orig_skb != new_skb) | 3888 | if (orig_skb != new_skb) |
3889 | dev_kfree_skb_any(new_skb); | 3889 | dev_kfree_skb_any(new_skb); |
3890 | } | 3890 | } |
3891 | 3891 | ||
3892 | static inline struct sk_buff * | 3892 | static struct sk_buff * |
3893 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, | 3893 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, |
3894 | struct qeth_hdr **hdr, int ipv) | 3894 | struct qeth_hdr **hdr, int ipv) |
3895 | { | 3895 | { |
@@ -3940,7 +3940,7 @@ qeth_get_qeth_hdr_flags6(int cast_type) | |||
3940 | return ct | QETH_CAST_UNICAST; | 3940 | return ct | QETH_CAST_UNICAST; |
3941 | } | 3941 | } |
3942 | 3942 | ||
3943 | static inline void | 3943 | static void |
3944 | qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, | 3944 | qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, |
3945 | struct sk_buff *skb) | 3945 | struct sk_buff *skb) |
3946 | { | 3946 | { |
@@ -3977,7 +3977,7 @@ qeth_layer2_get_packet_type(struct qeth_card *card, struct qeth_hdr *hdr, | |||
3977 | } | 3977 | } |
3978 | } | 3978 | } |
3979 | 3979 | ||
3980 | static inline void | 3980 | static void |
3981 | qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | 3981 | qeth_layer2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, |
3982 | struct sk_buff *skb, int cast_type) | 3982 | struct sk_buff *skb, int cast_type) |
3983 | { | 3983 | { |
@@ -4068,7 +4068,7 @@ qeth_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
4068 | } | 4068 | } |
4069 | } | 4069 | } |
4070 | 4070 | ||
4071 | static inline void | 4071 | static void |
4072 | __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, | 4072 | __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, |
4073 | int is_tso, int *next_element_to_fill) | 4073 | int is_tso, int *next_element_to_fill) |
4074 | { | 4074 | { |
@@ -4112,7 +4112,7 @@ __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, | |||
4112 | *next_element_to_fill = element; | 4112 | *next_element_to_fill = element; |
4113 | } | 4113 | } |
4114 | 4114 | ||
4115 | static inline int | 4115 | static int |
4116 | qeth_fill_buffer(struct qeth_qdio_out_q *queue, | 4116 | qeth_fill_buffer(struct qeth_qdio_out_q *queue, |
4117 | struct qeth_qdio_out_buffer *buf, | 4117 | struct qeth_qdio_out_buffer *buf, |
4118 | struct sk_buff *skb) | 4118 | struct sk_buff *skb) |
@@ -4171,7 +4171,7 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
4171 | return flush_cnt; | 4171 | return flush_cnt; |
4172 | } | 4172 | } |
4173 | 4173 | ||
4174 | static inline int | 4174 | static int |
4175 | qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, | 4175 | qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, |
4176 | struct sk_buff *skb, struct qeth_hdr *hdr, | 4176 | struct sk_buff *skb, struct qeth_hdr *hdr, |
4177 | int elements_needed, | 4177 | int elements_needed, |
@@ -4222,7 +4222,7 @@ out: | |||
4222 | return -EBUSY; | 4222 | return -EBUSY; |
4223 | } | 4223 | } |
4224 | 4224 | ||
4225 | static inline int | 4225 | static int |
4226 | qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | 4226 | qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, |
4227 | struct sk_buff *skb, struct qeth_hdr *hdr, | 4227 | struct sk_buff *skb, struct qeth_hdr *hdr, |
4228 | int elements_needed, struct qeth_eddp_context *ctx) | 4228 | int elements_needed, struct qeth_eddp_context *ctx) |
@@ -4328,7 +4328,7 @@ out: | |||
4328 | return rc; | 4328 | return rc; |
4329 | } | 4329 | } |
4330 | 4330 | ||
4331 | static inline int | 4331 | static int |
4332 | qeth_get_elements_no(struct qeth_card *card, void *hdr, | 4332 | qeth_get_elements_no(struct qeth_card *card, void *hdr, |
4333 | struct sk_buff *skb, int elems) | 4333 | struct sk_buff *skb, int elems) |
4334 | { | 4334 | { |
@@ -4349,7 +4349,7 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
4349 | } | 4349 | } |
4350 | 4350 | ||
4351 | 4351 | ||
4352 | static inline int | 4352 | static int |
4353 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | 4353 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) |
4354 | { | 4354 | { |
4355 | int ipv = 0; | 4355 | int ipv = 0; |
@@ -4536,7 +4536,7 @@ qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) | |||
4536 | } | 4536 | } |
4537 | 4537 | ||
4538 | 4538 | ||
4539 | static inline const char * | 4539 | static const char * |
4540 | qeth_arp_get_error_cause(int *rc) | 4540 | qeth_arp_get_error_cause(int *rc) |
4541 | { | 4541 | { |
4542 | switch (*rc) { | 4542 | switch (*rc) { |
@@ -4597,7 +4597,7 @@ qeth_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
4597 | return rc; | 4597 | return rc; |
4598 | } | 4598 | } |
4599 | 4599 | ||
4600 | static inline void | 4600 | static void |
4601 | qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, | 4601 | qeth_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo, |
4602 | struct qeth_arp_query_data *qdata, | 4602 | struct qeth_arp_query_data *qdata, |
4603 | int entry_size, int uentry_size) | 4603 | int entry_size, int uentry_size) |
@@ -5214,7 +5214,7 @@ qeth_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | |||
5214 | spin_unlock_irqrestore(&card->vlanlock, flags); | 5214 | spin_unlock_irqrestore(&card->vlanlock, flags); |
5215 | } | 5215 | } |
5216 | 5216 | ||
5217 | static inline void | 5217 | static void |
5218 | qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, | 5218 | qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, |
5219 | unsigned short vid) | 5219 | unsigned short vid) |
5220 | { | 5220 | { |
@@ -5625,7 +5625,7 @@ qeth_delete_mc_addresses(struct qeth_card *card) | |||
5625 | spin_unlock_irqrestore(&card->ip_lock, flags); | 5625 | spin_unlock_irqrestore(&card->ip_lock, flags); |
5626 | } | 5626 | } |
5627 | 5627 | ||
5628 | static inline void | 5628 | static void |
5629 | qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) | 5629 | qeth_add_mc(struct qeth_card *card, struct in_device *in4_dev) |
5630 | { | 5630 | { |
5631 | struct qeth_ipaddr *ipm; | 5631 | struct qeth_ipaddr *ipm; |
@@ -5711,7 +5711,7 @@ qeth_layer2_add_multicast(struct qeth_card *card) | |||
5711 | } | 5711 | } |
5712 | 5712 | ||
5713 | #ifdef CONFIG_QETH_IPV6 | 5713 | #ifdef CONFIG_QETH_IPV6 |
5714 | static inline void | 5714 | static void |
5715 | qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) | 5715 | qeth_add_mc6(struct qeth_card *card, struct inet6_dev *in6_dev) |
5716 | { | 5716 | { |
5717 | struct qeth_ipaddr *ipm; | 5717 | struct qeth_ipaddr *ipm; |
@@ -6022,7 +6022,7 @@ qeth_send_setdelmc(struct qeth_card *card, struct qeth_ipaddr *addr, int ipacmd) | |||
6022 | 6022 | ||
6023 | return rc; | 6023 | return rc; |
6024 | } | 6024 | } |
6025 | static inline void | 6025 | static void |
6026 | qeth_fill_netmask(u8 *netmask, unsigned int len) | 6026 | qeth_fill_netmask(u8 *netmask, unsigned int len) |
6027 | { | 6027 | { |
6028 | int i,j; | 6028 | int i,j; |
@@ -6626,7 +6626,7 @@ qeth_send_setadp_mode(struct qeth_card *card, __u32 command, __u32 mode) | |||
6626 | return rc; | 6626 | return rc; |
6627 | } | 6627 | } |
6628 | 6628 | ||
6629 | static inline int | 6629 | static int |
6630 | qeth_setadapter_hstr(struct qeth_card *card) | 6630 | qeth_setadapter_hstr(struct qeth_card *card) |
6631 | { | 6631 | { |
6632 | int rc; | 6632 | int rc; |
@@ -6889,7 +6889,7 @@ qeth_send_simple_setassparms(struct qeth_card *card, | |||
6889 | return rc; | 6889 | return rc; |
6890 | } | 6890 | } |
6891 | 6891 | ||
6892 | static inline int | 6892 | static int |
6893 | qeth_start_ipa_arp_processing(struct qeth_card *card) | 6893 | qeth_start_ipa_arp_processing(struct qeth_card *card) |
6894 | { | 6894 | { |
6895 | int rc; | 6895 | int rc; |
@@ -7529,7 +7529,7 @@ qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, | |||
7529 | wake_up(&card->wait_q); | 7529 | wake_up(&card->wait_q); |
7530 | } | 7530 | } |
7531 | 7531 | ||
7532 | static inline int | 7532 | static int |
7533 | qeth_threads_running(struct qeth_card *card, unsigned long threads) | 7533 | qeth_threads_running(struct qeth_card *card, unsigned long threads) |
7534 | { | 7534 | { |
7535 | unsigned long flags; | 7535 | unsigned long flags; |
@@ -8118,7 +8118,7 @@ qeth_del_ipato_entry(struct qeth_card *card, enum qeth_prot_versions proto, | |||
8118 | spin_unlock_irqrestore(&card->ip_lock, flags); | 8118 | spin_unlock_irqrestore(&card->ip_lock, flags); |
8119 | } | 8119 | } |
8120 | 8120 | ||
8121 | static inline void | 8121 | static void |
8122 | qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | 8122 | qeth_convert_addr_to_bits(u8 *addr, u8 *bits, int len) |
8123 | { | 8123 | { |
8124 | int i, j; | 8124 | int i, j; |
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index 5836737ac58f..d518419cd0c6 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c | |||
@@ -328,7 +328,7 @@ qeth_dev_bufcnt_store(struct device *dev, struct device_attribute *attr, const c | |||
328 | static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, | 328 | static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, |
329 | qeth_dev_bufcnt_store); | 329 | qeth_dev_bufcnt_store); |
330 | 330 | ||
331 | static inline ssize_t | 331 | static ssize_t |
332 | qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, | 332 | qeth_dev_route_show(struct qeth_card *card, struct qeth_routing_info *route, |
333 | char *buf) | 333 | char *buf) |
334 | { | 334 | { |
@@ -368,7 +368,7 @@ qeth_dev_route4_show(struct device *dev, struct device_attribute *attr, char *bu | |||
368 | return qeth_dev_route_show(card, &card->options.route4, buf); | 368 | return qeth_dev_route_show(card, &card->options.route4, buf); |
369 | } | 369 | } |
370 | 370 | ||
371 | static inline ssize_t | 371 | static ssize_t |
372 | qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, | 372 | qeth_dev_route_store(struct qeth_card *card, struct qeth_routing_info *route, |
373 | enum qeth_prot_versions prot, const char *buf, size_t count) | 373 | enum qeth_prot_versions prot, const char *buf, size_t count) |
374 | { | 374 | { |
@@ -998,7 +998,7 @@ struct device_attribute dev_attr_##_id = { \ | |||
998 | .store = _store, \ | 998 | .store = _store, \ |
999 | }; | 999 | }; |
1000 | 1000 | ||
1001 | int | 1001 | static int |
1002 | qeth_check_layer2(struct qeth_card *card) | 1002 | qeth_check_layer2(struct qeth_card *card) |
1003 | { | 1003 | { |
1004 | if (card->options.layer2) | 1004 | if (card->options.layer2) |
@@ -1100,7 +1100,7 @@ static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, | |||
1100 | qeth_dev_ipato_invert4_show, | 1100 | qeth_dev_ipato_invert4_show, |
1101 | qeth_dev_ipato_invert4_store); | 1101 | qeth_dev_ipato_invert4_store); |
1102 | 1102 | ||
1103 | static inline ssize_t | 1103 | static ssize_t |
1104 | qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, | 1104 | qeth_dev_ipato_add_show(char *buf, struct qeth_card *card, |
1105 | enum qeth_prot_versions proto) | 1105 | enum qeth_prot_versions proto) |
1106 | { | 1106 | { |
@@ -1146,7 +1146,7 @@ qeth_dev_ipato_add4_show(struct device *dev, struct device_attribute *attr, char | |||
1146 | return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); | 1146 | return qeth_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); |
1147 | } | 1147 | } |
1148 | 1148 | ||
1149 | static inline int | 1149 | static int |
1150 | qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, | 1150 | qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, |
1151 | u8 *addr, int *mask_bits) | 1151 | u8 *addr, int *mask_bits) |
1152 | { | 1152 | { |
@@ -1178,7 +1178,7 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, | |||
1178 | return 0; | 1178 | return 0; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | static inline ssize_t | 1181 | static ssize_t |
1182 | qeth_dev_ipato_add_store(const char *buf, size_t count, | 1182 | qeth_dev_ipato_add_store(const char *buf, size_t count, |
1183 | struct qeth_card *card, enum qeth_prot_versions proto) | 1183 | struct qeth_card *card, enum qeth_prot_versions proto) |
1184 | { | 1184 | { |
@@ -1223,7 +1223,7 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, | |||
1223 | qeth_dev_ipato_add4_show, | 1223 | qeth_dev_ipato_add4_show, |
1224 | qeth_dev_ipato_add4_store); | 1224 | qeth_dev_ipato_add4_store); |
1225 | 1225 | ||
1226 | static inline ssize_t | 1226 | static ssize_t |
1227 | qeth_dev_ipato_del_store(const char *buf, size_t count, | 1227 | qeth_dev_ipato_del_store(const char *buf, size_t count, |
1228 | struct qeth_card *card, enum qeth_prot_versions proto) | 1228 | struct qeth_card *card, enum qeth_prot_versions proto) |
1229 | { | 1229 | { |
@@ -1361,7 +1361,7 @@ static struct attribute_group qeth_device_ipato_group = { | |||
1361 | .attrs = (struct attribute **)qeth_ipato_device_attrs, | 1361 | .attrs = (struct attribute **)qeth_ipato_device_attrs, |
1362 | }; | 1362 | }; |
1363 | 1363 | ||
1364 | static inline ssize_t | 1364 | static ssize_t |
1365 | qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, | 1365 | qeth_dev_vipa_add_show(char *buf, struct qeth_card *card, |
1366 | enum qeth_prot_versions proto) | 1366 | enum qeth_prot_versions proto) |
1367 | { | 1367 | { |
@@ -1407,7 +1407,7 @@ qeth_dev_vipa_add4_show(struct device *dev, struct device_attribute *attr, char | |||
1407 | return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); | 1407 | return qeth_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); |
1408 | } | 1408 | } |
1409 | 1409 | ||
1410 | static inline int | 1410 | static int |
1411 | qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, | 1411 | qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, |
1412 | u8 *addr) | 1412 | u8 *addr) |
1413 | { | 1413 | { |
@@ -1418,7 +1418,7 @@ qeth_parse_vipae(const char* buf, enum qeth_prot_versions proto, | |||
1418 | return 0; | 1418 | return 0; |
1419 | } | 1419 | } |
1420 | 1420 | ||
1421 | static inline ssize_t | 1421 | static ssize_t |
1422 | qeth_dev_vipa_add_store(const char *buf, size_t count, | 1422 | qeth_dev_vipa_add_store(const char *buf, size_t count, |
1423 | struct qeth_card *card, enum qeth_prot_versions proto) | 1423 | struct qeth_card *card, enum qeth_prot_versions proto) |
1424 | { | 1424 | { |
@@ -1451,7 +1451,7 @@ static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, | |||
1451 | qeth_dev_vipa_add4_show, | 1451 | qeth_dev_vipa_add4_show, |
1452 | qeth_dev_vipa_add4_store); | 1452 | qeth_dev_vipa_add4_store); |
1453 | 1453 | ||
1454 | static inline ssize_t | 1454 | static ssize_t |
1455 | qeth_dev_vipa_del_store(const char *buf, size_t count, | 1455 | qeth_dev_vipa_del_store(const char *buf, size_t count, |
1456 | struct qeth_card *card, enum qeth_prot_versions proto) | 1456 | struct qeth_card *card, enum qeth_prot_versions proto) |
1457 | { | 1457 | { |
@@ -1542,7 +1542,7 @@ static struct attribute_group qeth_device_vipa_group = { | |||
1542 | .attrs = (struct attribute **)qeth_vipa_device_attrs, | 1542 | .attrs = (struct attribute **)qeth_vipa_device_attrs, |
1543 | }; | 1543 | }; |
1544 | 1544 | ||
1545 | static inline ssize_t | 1545 | static ssize_t |
1546 | qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, | 1546 | qeth_dev_rxip_add_show(char *buf, struct qeth_card *card, |
1547 | enum qeth_prot_versions proto) | 1547 | enum qeth_prot_versions proto) |
1548 | { | 1548 | { |
@@ -1588,7 +1588,7 @@ qeth_dev_rxip_add4_show(struct device *dev, struct device_attribute *attr, char | |||
1588 | return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); | 1588 | return qeth_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); |
1589 | } | 1589 | } |
1590 | 1590 | ||
1591 | static inline int | 1591 | static int |
1592 | qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, | 1592 | qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, |
1593 | u8 *addr) | 1593 | u8 *addr) |
1594 | { | 1594 | { |
@@ -1599,7 +1599,7 @@ qeth_parse_rxipe(const char* buf, enum qeth_prot_versions proto, | |||
1599 | return 0; | 1599 | return 0; |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | static inline ssize_t | 1602 | static ssize_t |
1603 | qeth_dev_rxip_add_store(const char *buf, size_t count, | 1603 | qeth_dev_rxip_add_store(const char *buf, size_t count, |
1604 | struct qeth_card *card, enum qeth_prot_versions proto) | 1604 | struct qeth_card *card, enum qeth_prot_versions proto) |
1605 | { | 1605 | { |
@@ -1632,7 +1632,7 @@ static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, | |||
1632 | qeth_dev_rxip_add4_show, | 1632 | qeth_dev_rxip_add4_show, |
1633 | qeth_dev_rxip_add4_store); | 1633 | qeth_dev_rxip_add4_store); |
1634 | 1634 | ||
1635 | static inline ssize_t | 1635 | static ssize_t |
1636 | qeth_dev_rxip_del_store(const char *buf, size_t count, | 1636 | qeth_dev_rxip_del_store(const char *buf, size_t count, |
1637 | struct qeth_card *card, enum qeth_prot_versions proto) | 1637 | struct qeth_card *card, enum qeth_prot_versions proto) |
1638 | { | 1638 | { |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index e088b5e28711..806bb1a921eb 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -13,22 +13,18 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/time.h> | 15 | #include <linux/time.h> |
16 | #include <linux/device.h> | ||
16 | #include <linux/kthread.h> | 17 | #include <linux/kthread.h> |
17 | 18 | #include <asm/etr.h> | |
18 | #include <asm/lowcore.h> | 19 | #include <asm/lowcore.h> |
19 | 20 | #include <asm/cio.h> | |
21 | #include "cio/cio.h" | ||
22 | #include "cio/chsc.h" | ||
23 | #include "cio/css.h" | ||
20 | #include "s390mach.h" | 24 | #include "s390mach.h" |
21 | 25 | ||
22 | static struct semaphore m_sem; | 26 | static struct semaphore m_sem; |
23 | 27 | ||
24 | extern int css_process_crw(int, int); | ||
25 | extern int chsc_process_crw(void); | ||
26 | extern int chp_process_crw(int, int); | ||
27 | extern void css_reiterate_subchannels(void); | ||
28 | |||
29 | extern struct workqueue_struct *slow_path_wq; | ||
30 | extern struct work_struct slow_path_work; | ||
31 | |||
32 | static NORET_TYPE void | 28 | static NORET_TYPE void |
33 | s390_handle_damage(char *msg) | 29 | s390_handle_damage(char *msg) |
34 | { | 30 | { |
@@ -470,6 +466,19 @@ s390_do_machine_check(struct pt_regs *regs) | |||
470 | s390_handle_damage("unable to revalidate registers."); | 466 | s390_handle_damage("unable to revalidate registers."); |
471 | } | 467 | } |
472 | 468 | ||
469 | if (mci->cd) { | ||
470 | /* Timing facility damage */ | ||
471 | s390_handle_damage("TOD clock damaged"); | ||
472 | } | ||
473 | |||
474 | if (mci->ed && mci->ec) { | ||
475 | /* External damage */ | ||
476 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) | ||
477 | etr_sync_check(); | ||
478 | if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) | ||
479 | etr_switch_to_local(); | ||
480 | } | ||
481 | |||
473 | if (mci->se) | 482 | if (mci->se) |
474 | /* Storage error uncorrected */ | 483 | /* Storage error uncorrected */ |
475 | s390_handle_damage("received storage error uncorrected " | 484 | s390_handle_damage("received storage error uncorrected " |
@@ -508,7 +517,7 @@ static int | |||
508 | machine_check_init(void) | 517 | machine_check_init(void) |
509 | { | 518 | { |
510 | init_MUTEX_LOCKED(&m_sem); | 519 | init_MUTEX_LOCKED(&m_sem); |
511 | ctl_clear_bit(14, 25); /* disable external damage MCH */ | 520 | ctl_set_bit(14, 25); /* enable external damage MCH */ |
512 | ctl_set_bit(14, 27); /* enable system recovery MCH */ | 521 | ctl_set_bit(14, 27); /* enable system recovery MCH */ |
513 | #ifdef CONFIG_MACHCHK_WARNING | 522 | #ifdef CONFIG_MACHCHK_WARNING |
514 | ctl_set_bit(14, 24); /* enable warning MCH */ | 523 | ctl_set_bit(14, 24); /* enable warning MCH */ |
@@ -529,7 +538,11 @@ arch_initcall(machine_check_init); | |||
529 | static int __init | 538 | static int __init |
530 | machine_check_crw_init (void) | 539 | machine_check_crw_init (void) |
531 | { | 540 | { |
532 | kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); | 541 | struct task_struct *task; |
542 | |||
543 | task = kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); | ||
544 | if (IS_ERR(task)) | ||
545 | return PTR_ERR(task); | ||
533 | ctl_set_bit(14, 28); /* enable channel report MCH */ | 546 | ctl_set_bit(14, 28); /* enable channel report MCH */ |
534 | return 0; | 547 | return 0; |
535 | } | 548 | } |
diff --git a/drivers/s390/s390mach.h b/drivers/s390/s390mach.h index 7abb42a09ae2..d3ca4281a494 100644 --- a/drivers/s390/s390mach.h +++ b/drivers/s390/s390mach.h | |||
@@ -102,4 +102,7 @@ static inline int stcrw(struct crw *pcrw ) | |||
102 | return ccode; | 102 | return ccode; |
103 | } | 103 | } |
104 | 104 | ||
105 | #define ED_ETR_SYNC 12 /* External damage ETR sync check */ | ||
106 | #define ED_ETR_SWITCH 13 /* External damage ETR switch to local */ | ||
107 | |||
105 | #endif /* __s390mach */ | 108 | #endif /* __s390mach */ |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 85093b71f9fa..39a885266790 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -47,13 +47,12 @@ static int __init zfcp_module_init(void); | |||
47 | static void zfcp_ns_gid_pn_handler(unsigned long); | 47 | static void zfcp_ns_gid_pn_handler(unsigned long); |
48 | 48 | ||
49 | /* miscellaneous */ | 49 | /* miscellaneous */ |
50 | static inline int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); | 50 | static int zfcp_sg_list_alloc(struct zfcp_sg_list *, size_t); |
51 | static inline void zfcp_sg_list_free(struct zfcp_sg_list *); | 51 | static void zfcp_sg_list_free(struct zfcp_sg_list *); |
52 | static inline int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, | 52 | static int zfcp_sg_list_copy_from_user(struct zfcp_sg_list *, |
53 | void __user *, size_t); | 53 | void __user *, size_t); |
54 | static inline int zfcp_sg_list_copy_to_user(void __user *, | 54 | static int zfcp_sg_list_copy_to_user(void __user *, |
55 | struct zfcp_sg_list *, size_t); | 55 | struct zfcp_sg_list *, size_t); |
56 | |||
57 | static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); | 56 | static long zfcp_cfdc_dev_ioctl(struct file *, unsigned int, unsigned long); |
58 | 57 | ||
59 | #define ZFCP_CFDC_IOC_MAGIC 0xDD | 58 | #define ZFCP_CFDC_IOC_MAGIC 0xDD |
@@ -605,7 +604,7 @@ zfcp_cfdc_dev_ioctl(struct file *file, unsigned int command, | |||
605 | * elements of the scatter-gather list. The maximum size of a single element | 604 | * elements of the scatter-gather list. The maximum size of a single element |
606 | * in the scatter-gather list is PAGE_SIZE. | 605 | * in the scatter-gather list is PAGE_SIZE. |
607 | */ | 606 | */ |
608 | static inline int | 607 | static int |
609 | zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) | 608 | zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) |
610 | { | 609 | { |
611 | struct scatterlist *sg; | 610 | struct scatterlist *sg; |
@@ -652,7 +651,7 @@ zfcp_sg_list_alloc(struct zfcp_sg_list *sg_list, size_t size) | |||
652 | * Memory for each element in the scatter-gather list is freed. | 651 | * Memory for each element in the scatter-gather list is freed. |
653 | * Finally sg_list->sg is freed itself and sg_list->count is reset. | 652 | * Finally sg_list->sg is freed itself and sg_list->count is reset. |
654 | */ | 653 | */ |
655 | static inline void | 654 | static void |
656 | zfcp_sg_list_free(struct zfcp_sg_list *sg_list) | 655 | zfcp_sg_list_free(struct zfcp_sg_list *sg_list) |
657 | { | 656 | { |
658 | struct scatterlist *sg; | 657 | struct scatterlist *sg; |
@@ -697,7 +696,7 @@ zfcp_sg_size(struct scatterlist *sg, unsigned int sg_count) | |||
697 | * @size: number of bytes to be copied | 696 | * @size: number of bytes to be copied |
698 | * Return: 0 on success, -EFAULT if copy_from_user fails. | 697 | * Return: 0 on success, -EFAULT if copy_from_user fails. |
699 | */ | 698 | */ |
700 | static inline int | 699 | static int |
701 | zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, | 700 | zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, |
702 | void __user *user_buffer, | 701 | void __user *user_buffer, |
703 | size_t size) | 702 | size_t size) |
@@ -735,7 +734,7 @@ zfcp_sg_list_copy_from_user(struct zfcp_sg_list *sg_list, | |||
735 | * @size: number of bytes to be copied | 734 | * @size: number of bytes to be copied |
736 | * Return: 0 on success, -EFAULT if copy_to_user fails | 735 | * Return: 0 on success, -EFAULT if copy_to_user fails |
737 | */ | 736 | */ |
738 | static inline int | 737 | static int |
739 | zfcp_sg_list_copy_to_user(void __user *user_buffer, | 738 | zfcp_sg_list_copy_to_user(void __user *user_buffer, |
740 | struct zfcp_sg_list *sg_list, | 739 | struct zfcp_sg_list *sg_list, |
741 | size_t size) | 740 | size_t size) |
@@ -1799,7 +1798,7 @@ static const struct zfcp_rc_entry zfcp_p_rjt_rc[] = { | |||
1799 | * @code: reason code | 1798 | * @code: reason code |
1800 | * @rc_table: table of reason codes and descriptions | 1799 | * @rc_table: table of reason codes and descriptions |
1801 | */ | 1800 | */ |
1802 | static inline const char * | 1801 | static const char * |
1803 | zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) | 1802 | zfcp_rc_description(u8 code, const struct zfcp_rc_entry *rc_table) |
1804 | { | 1803 | { |
1805 | const char *descr = "unknown reason code"; | 1804 | const char *descr = "unknown reason code"; |
@@ -1847,7 +1846,7 @@ zfcp_check_ct_response(struct ct_hdr *rjt) | |||
1847 | * @rjt_par: reject parameter acc. to FC-PH/FC-FS | 1846 | * @rjt_par: reject parameter acc. to FC-PH/FC-FS |
1848 | * @rc_table: table of reason codes and descriptions | 1847 | * @rc_table: table of reason codes and descriptions |
1849 | */ | 1848 | */ |
1850 | static inline void | 1849 | static void |
1851 | zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, | 1850 | zfcp_print_els_rjt(struct zfcp_ls_rjt_par *rjt_par, |
1852 | const struct zfcp_rc_entry *rc_table) | 1851 | const struct zfcp_rc_entry *rc_table) |
1853 | { | 1852 | { |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 0aa3b1ac76af..d8191d115c14 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -31,7 +31,7 @@ MODULE_PARM_DESC(dbfsize, | |||
31 | 31 | ||
32 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER | 32 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_OTHER |
33 | 33 | ||
34 | static inline int | 34 | static int |
35 | zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) | 35 | zfcp_dbf_stck(char *out_buf, const char *label, unsigned long long stck) |
36 | { | 36 | { |
37 | unsigned long long sec; | 37 | unsigned long long sec; |
@@ -106,7 +106,7 @@ zfcp_dbf_view_dump(char *out_buf, const char *label, | |||
106 | return len; | 106 | return len; |
107 | } | 107 | } |
108 | 108 | ||
109 | static inline int | 109 | static int |
110 | zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, | 110 | zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, |
111 | debug_entry_t * entry, char *out_buf) | 111 | debug_entry_t * entry, char *out_buf) |
112 | { | 112 | { |
@@ -130,7 +130,7 @@ zfcp_dbf_view_header(debug_info_t * id, struct debug_view *view, int area, | |||
130 | return len; | 130 | return len; |
131 | } | 131 | } |
132 | 132 | ||
133 | inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | 133 | void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) |
134 | { | 134 | { |
135 | struct zfcp_adapter *adapter = fsf_req->adapter; | 135 | struct zfcp_adapter *adapter = fsf_req->adapter; |
136 | struct fsf_qtcb *qtcb = fsf_req->qtcb; | 136 | struct fsf_qtcb *qtcb = fsf_req->qtcb; |
@@ -241,7 +241,7 @@ inline void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) | |||
241 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 241 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
242 | } | 242 | } |
243 | 243 | ||
244 | inline void | 244 | void |
245 | zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | 245 | zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, |
246 | struct fsf_status_read_buffer *status_buffer) | 246 | struct fsf_status_read_buffer *status_buffer) |
247 | { | 247 | { |
@@ -295,7 +295,7 @@ zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter, | |||
295 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 295 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
296 | } | 296 | } |
297 | 297 | ||
298 | inline void | 298 | void |
299 | zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, | 299 | zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, |
300 | unsigned int qdio_error, unsigned int siga_error, | 300 | unsigned int qdio_error, unsigned int siga_error, |
301 | int sbal_index, int sbal_count) | 301 | int sbal_index, int sbal_count) |
@@ -316,7 +316,7 @@ zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status, | |||
316 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); | 316 | spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags); |
317 | } | 317 | } |
318 | 318 | ||
319 | static inline int | 319 | static int |
320 | zfcp_hba_dbf_view_response(char *out_buf, | 320 | zfcp_hba_dbf_view_response(char *out_buf, |
321 | struct zfcp_hba_dbf_record_response *rec) | 321 | struct zfcp_hba_dbf_record_response *rec) |
322 | { | 322 | { |
@@ -403,7 +403,7 @@ zfcp_hba_dbf_view_response(char *out_buf, | |||
403 | return len; | 403 | return len; |
404 | } | 404 | } |
405 | 405 | ||
406 | static inline int | 406 | static int |
407 | zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) | 407 | zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) |
408 | { | 408 | { |
409 | int len = 0; | 409 | int len = 0; |
@@ -424,7 +424,7 @@ zfcp_hba_dbf_view_status(char *out_buf, struct zfcp_hba_dbf_record_status *rec) | |||
424 | return len; | 424 | return len; |
425 | } | 425 | } |
426 | 426 | ||
427 | static inline int | 427 | static int |
428 | zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) | 428 | zfcp_hba_dbf_view_qdio(char *out_buf, struct zfcp_hba_dbf_record_qdio *rec) |
429 | { | 429 | { |
430 | int len = 0; | 430 | int len = 0; |
@@ -469,7 +469,7 @@ zfcp_hba_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
469 | return len; | 469 | return len; |
470 | } | 470 | } |
471 | 471 | ||
472 | struct debug_view zfcp_hba_dbf_view = { | 472 | static struct debug_view zfcp_hba_dbf_view = { |
473 | "structured", | 473 | "structured", |
474 | NULL, | 474 | NULL, |
475 | &zfcp_dbf_view_header, | 475 | &zfcp_dbf_view_header, |
@@ -478,7 +478,7 @@ struct debug_view zfcp_hba_dbf_view = { | |||
478 | NULL | 478 | NULL |
479 | }; | 479 | }; |
480 | 480 | ||
481 | inline void | 481 | void |
482 | _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, | 482 | _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, |
483 | u32 s_id, u32 d_id, void *buffer, int buflen) | 483 | u32 s_id, u32 d_id, void *buffer, int buflen) |
484 | { | 484 | { |
@@ -519,7 +519,7 @@ _zfcp_san_dbf_event_common_ct(const char *tag, struct zfcp_fsf_req *fsf_req, | |||
519 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 519 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); |
520 | } | 520 | } |
521 | 521 | ||
522 | inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | 522 | void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) |
523 | { | 523 | { |
524 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 524 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
525 | struct zfcp_port *port = ct->port; | 525 | struct zfcp_port *port = ct->port; |
@@ -531,7 +531,7 @@ inline void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) | |||
531 | ct->req->length); | 531 | ct->req->length); |
532 | } | 532 | } |
533 | 533 | ||
534 | inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | 534 | void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) |
535 | { | 535 | { |
536 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; | 536 | struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data; |
537 | struct zfcp_port *port = ct->port; | 537 | struct zfcp_port *port = ct->port; |
@@ -543,7 +543,7 @@ inline void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) | |||
543 | ct->resp->length); | 543 | ct->resp->length); |
544 | } | 544 | } |
545 | 545 | ||
546 | static inline void | 546 | static void |
547 | _zfcp_san_dbf_event_common_els(const char *tag, int level, | 547 | _zfcp_san_dbf_event_common_els(const char *tag, int level, |
548 | struct zfcp_fsf_req *fsf_req, u32 s_id, | 548 | struct zfcp_fsf_req *fsf_req, u32 s_id, |
549 | u32 d_id, u8 ls_code, void *buffer, int buflen) | 549 | u32 d_id, u8 ls_code, void *buffer, int buflen) |
@@ -585,7 +585,7 @@ _zfcp_san_dbf_event_common_els(const char *tag, int level, | |||
585 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); | 585 | spin_unlock_irqrestore(&adapter->san_dbf_lock, flags); |
586 | } | 586 | } |
587 | 587 | ||
588 | inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) | 588 | void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) |
589 | { | 589 | { |
590 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 590 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
591 | 591 | ||
@@ -597,7 +597,7 @@ inline void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req) | |||
597 | els->req->length); | 597 | els->req->length); |
598 | } | 598 | } |
599 | 599 | ||
600 | inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) | 600 | void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) |
601 | { | 601 | { |
602 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; | 602 | struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data; |
603 | 603 | ||
@@ -608,7 +608,7 @@ inline void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req) | |||
608 | els->resp->length); | 608 | els->resp->length); |
609 | } | 609 | } |
610 | 610 | ||
611 | inline void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) | 611 | void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req) |
612 | { | 612 | { |
613 | struct zfcp_adapter *adapter = fsf_req->adapter; | 613 | struct zfcp_adapter *adapter = fsf_req->adapter; |
614 | struct fsf_status_read_buffer *status_buffer = | 614 | struct fsf_status_read_buffer *status_buffer = |
@@ -693,7 +693,7 @@ zfcp_san_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
693 | return len; | 693 | return len; |
694 | } | 694 | } |
695 | 695 | ||
696 | struct debug_view zfcp_san_dbf_view = { | 696 | static struct debug_view zfcp_san_dbf_view = { |
697 | "structured", | 697 | "structured", |
698 | NULL, | 698 | NULL, |
699 | &zfcp_dbf_view_header, | 699 | &zfcp_dbf_view_header, |
@@ -702,7 +702,7 @@ struct debug_view zfcp_san_dbf_view = { | |||
702 | NULL | 702 | NULL |
703 | }; | 703 | }; |
704 | 704 | ||
705 | static inline void | 705 | static void |
706 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | 706 | _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, |
707 | struct zfcp_adapter *adapter, | 707 | struct zfcp_adapter *adapter, |
708 | struct scsi_cmnd *scsi_cmnd, | 708 | struct scsi_cmnd *scsi_cmnd, |
@@ -786,7 +786,7 @@ _zfcp_scsi_dbf_event_common(const char *tag, const char *tag2, int level, | |||
786 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); | 786 | spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags); |
787 | } | 787 | } |
788 | 788 | ||
789 | inline void | 789 | void |
790 | zfcp_scsi_dbf_event_result(const char *tag, int level, | 790 | zfcp_scsi_dbf_event_result(const char *tag, int level, |
791 | struct zfcp_adapter *adapter, | 791 | struct zfcp_adapter *adapter, |
792 | struct scsi_cmnd *scsi_cmnd, | 792 | struct scsi_cmnd *scsi_cmnd, |
@@ -796,7 +796,7 @@ zfcp_scsi_dbf_event_result(const char *tag, int level, | |||
796 | adapter, scsi_cmnd, fsf_req, 0); | 796 | adapter, scsi_cmnd, fsf_req, 0); |
797 | } | 797 | } |
798 | 798 | ||
799 | inline void | 799 | void |
800 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | 800 | zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, |
801 | struct scsi_cmnd *scsi_cmnd, | 801 | struct scsi_cmnd *scsi_cmnd, |
802 | struct zfcp_fsf_req *new_fsf_req, | 802 | struct zfcp_fsf_req *new_fsf_req, |
@@ -806,7 +806,7 @@ zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter, | |||
806 | adapter, scsi_cmnd, new_fsf_req, old_req_id); | 806 | adapter, scsi_cmnd, new_fsf_req, old_req_id); |
807 | } | 807 | } |
808 | 808 | ||
809 | inline void | 809 | void |
810 | zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, | 810 | zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag, struct zfcp_unit *unit, |
811 | struct scsi_cmnd *scsi_cmnd) | 811 | struct scsi_cmnd *scsi_cmnd) |
812 | { | 812 | { |
@@ -884,7 +884,7 @@ zfcp_scsi_dbf_view_format(debug_info_t * id, struct debug_view *view, | |||
884 | return len; | 884 | return len; |
885 | } | 885 | } |
886 | 886 | ||
887 | struct debug_view zfcp_scsi_dbf_view = { | 887 | static struct debug_view zfcp_scsi_dbf_view = { |
888 | "structured", | 888 | "structured", |
889 | NULL, | 889 | NULL, |
890 | &zfcp_dbf_view_header, | 890 | &zfcp_dbf_view_header, |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c88babce9bca..88642dec080c 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -200,7 +200,7 @@ void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req, unsigned long timeout) | |||
200 | * returns: 0 - initiated action successfully | 200 | * returns: 0 - initiated action successfully |
201 | * <0 - failed to initiate action | 201 | * <0 - failed to initiate action |
202 | */ | 202 | */ |
203 | int | 203 | static int |
204 | zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) | 204 | zfcp_erp_adapter_reopen_internal(struct zfcp_adapter *adapter, int clear_mask) |
205 | { | 205 | { |
206 | int retval; | 206 | int retval; |
@@ -295,7 +295,7 @@ zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear_mask) | |||
295 | * zfcp_erp_adisc - send ADISC ELS command | 295 | * zfcp_erp_adisc - send ADISC ELS command |
296 | * @port: port structure | 296 | * @port: port structure |
297 | */ | 297 | */ |
298 | int | 298 | static int |
299 | zfcp_erp_adisc(struct zfcp_port *port) | 299 | zfcp_erp_adisc(struct zfcp_port *port) |
300 | { | 300 | { |
301 | struct zfcp_adapter *adapter = port->adapter; | 301 | struct zfcp_adapter *adapter = port->adapter; |
@@ -380,7 +380,7 @@ zfcp_erp_adisc(struct zfcp_port *port) | |||
380 | * | 380 | * |
381 | * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. | 381 | * If ADISC failed (LS_RJT or timed out) forced reopen of the port is triggered. |
382 | */ | 382 | */ |
383 | void | 383 | static void |
384 | zfcp_erp_adisc_handler(unsigned long data) | 384 | zfcp_erp_adisc_handler(unsigned long data) |
385 | { | 385 | { |
386 | struct zfcp_send_els *send_els; | 386 | struct zfcp_send_els *send_els; |
@@ -3141,7 +3141,6 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3141 | break; | 3141 | break; |
3142 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 3142 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
3143 | if (result != ZFCP_ERP_SUCCEEDED) { | 3143 | if (result != ZFCP_ERP_SUCCEEDED) { |
3144 | struct zfcp_port *port; | ||
3145 | list_for_each_entry(port, &adapter->port_list_head, list) | 3144 | list_for_each_entry(port, &adapter->port_list_head, list) |
3146 | if (port->rport && | 3145 | if (port->rport && |
3147 | !atomic_test_mask(ZFCP_STATUS_PORT_WKA, | 3146 | !atomic_test_mask(ZFCP_STATUS_PORT_WKA, |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index b8794d77285d..cda0cc095ad1 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -119,8 +119,8 @@ extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); | |||
119 | extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); | 119 | extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); |
120 | extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); | 120 | extern void zfcp_set_fcp_dl(struct fcp_cmnd_iu *, fcp_dl_t); |
121 | extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); | 121 | extern char *zfcp_get_fcp_rsp_info_ptr(struct fcp_rsp_iu *); |
122 | extern void set_host_byte(u32 *, char); | 122 | extern void set_host_byte(int *, char); |
123 | extern void set_driver_byte(u32 *, char); | 123 | extern void set_driver_byte(int *, char); |
124 | extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); | 124 | extern char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *); |
125 | extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); | 125 | extern fcp_dl_t zfcp_get_fcp_dl(struct fcp_cmnd_iu *); |
126 | 126 | ||
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 067f1519eb04..4b3ae3f22e78 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -4563,7 +4563,7 @@ zfcp_fsf_req_sbal_check(unsigned long *flags, | |||
4563 | /* | 4563 | /* |
4564 | * set qtcb pointer in fsf_req and initialize QTCB | 4564 | * set qtcb pointer in fsf_req and initialize QTCB |
4565 | */ | 4565 | */ |
4566 | static inline void | 4566 | static void |
4567 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) | 4567 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) |
4568 | { | 4568 | { |
4569 | if (likely(fsf_req->qtcb != NULL)) { | 4569 | if (likely(fsf_req->qtcb != NULL)) { |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index dbd9f48e863e..1e12a78e8edd 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -21,22 +21,22 @@ | |||
21 | 21 | ||
22 | #include "zfcp_ext.h" | 22 | #include "zfcp_ext.h" |
23 | 23 | ||
24 | static inline void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); | 24 | static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *, int); |
25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get | 25 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_get |
26 | (struct zfcp_qdio_queue *, int, int); | 26 | (struct zfcp_qdio_queue *, int, int); |
27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp | 27 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_resp |
28 | (struct zfcp_fsf_req *, int, int); | 28 | (struct zfcp_fsf_req *, int, int); |
29 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain | 29 | static volatile struct qdio_buffer_element *zfcp_qdio_sbal_chain |
30 | (struct zfcp_fsf_req *, unsigned long); | 30 | (struct zfcp_fsf_req *, unsigned long); |
31 | static inline volatile struct qdio_buffer_element *zfcp_qdio_sbale_next | 31 | static volatile struct qdio_buffer_element *zfcp_qdio_sbale_next |
32 | (struct zfcp_fsf_req *, unsigned long); | 32 | (struct zfcp_fsf_req *, unsigned long); |
33 | static inline int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); | 33 | static int zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *, int, int); |
34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); | 34 | static inline int zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *); |
35 | static inline void zfcp_qdio_sbale_fill | 35 | static void zfcp_qdio_sbale_fill |
36 | (struct zfcp_fsf_req *, unsigned long, void *, int); | 36 | (struct zfcp_fsf_req *, unsigned long, void *, int); |
37 | static inline int zfcp_qdio_sbals_from_segment | 37 | static int zfcp_qdio_sbals_from_segment |
38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); | 38 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long); |
39 | static inline int zfcp_qdio_sbals_from_buffer | 39 | static int zfcp_qdio_sbals_from_buffer |
40 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); | 40 | (struct zfcp_fsf_req *, unsigned long, void *, unsigned long, int); |
41 | 41 | ||
42 | static qdio_handler_t zfcp_qdio_request_handler; | 42 | static qdio_handler_t zfcp_qdio_request_handler; |
@@ -201,7 +201,7 @@ zfcp_qdio_allocate(struct zfcp_adapter *adapter) | |||
201 | * returns: error flag | 201 | * returns: error flag |
202 | * | 202 | * |
203 | */ | 203 | */ |
204 | static inline int | 204 | static int |
205 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, | 205 | zfcp_qdio_handler_error_check(struct zfcp_adapter *adapter, unsigned int status, |
206 | unsigned int qdio_error, unsigned int siga_error, | 206 | unsigned int qdio_error, unsigned int siga_error, |
207 | int first_element, int elements_processed) | 207 | int first_element, int elements_processed) |
@@ -462,7 +462,7 @@ zfcp_qdio_sbale_get(struct zfcp_qdio_queue *queue, int sbal, int sbale) | |||
462 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for | 462 | * zfcp_qdio_sbale_req - return pointer to SBALE of request_queue for |
463 | * a struct zfcp_fsf_req | 463 | * a struct zfcp_fsf_req |
464 | */ | 464 | */ |
465 | inline volatile struct qdio_buffer_element * | 465 | volatile struct qdio_buffer_element * |
466 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | 466 | zfcp_qdio_sbale_req(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) |
467 | { | 467 | { |
468 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, | 468 | return zfcp_qdio_sbale_get(&fsf_req->adapter->request_queue, |
@@ -484,7 +484,7 @@ zfcp_qdio_sbale_resp(struct zfcp_fsf_req *fsf_req, int sbal, int sbale) | |||
484 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for | 484 | * zfcp_qdio_sbale_curr - return current SBALE on request_queue for |
485 | * a struct zfcp_fsf_req | 485 | * a struct zfcp_fsf_req |
486 | */ | 486 | */ |
487 | inline volatile struct qdio_buffer_element * | 487 | volatile struct qdio_buffer_element * |
488 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | 488 | zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) |
489 | { | 489 | { |
490 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, | 490 | return zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, |
@@ -499,7 +499,7 @@ zfcp_qdio_sbale_curr(struct zfcp_fsf_req *fsf_req) | |||
499 | * | 499 | * |
500 | * Note: We can assume at least one free SBAL in the request_queue when called. | 500 | * Note: We can assume at least one free SBAL in the request_queue when called. |
501 | */ | 501 | */ |
502 | static inline void | 502 | static void |
503 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | 503 | zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) |
504 | { | 504 | { |
505 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); | 505 | int count = atomic_read(&fsf_req->adapter->request_queue.free_count); |
@@ -517,7 +517,7 @@ zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals) | |||
517 | * | 517 | * |
518 | * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. | 518 | * This function changes sbal_curr, sbale_curr, sbal_number of fsf_req. |
519 | */ | 519 | */ |
520 | static inline volatile struct qdio_buffer_element * | 520 | static volatile struct qdio_buffer_element * |
521 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 521 | zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
522 | { | 522 | { |
523 | volatile struct qdio_buffer_element *sbale; | 523 | volatile struct qdio_buffer_element *sbale; |
@@ -554,7 +554,7 @@ zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
554 | /** | 554 | /** |
555 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed | 555 | * zfcp_qdio_sbale_next - switch to next SBALE, chain SBALs if needed |
556 | */ | 556 | */ |
557 | static inline volatile struct qdio_buffer_element * | 557 | static volatile struct qdio_buffer_element * |
558 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | 558 | zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) |
559 | { | 559 | { |
560 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) | 560 | if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL) |
@@ -569,7 +569,7 @@ zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype) | |||
569 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue | 569 | * zfcp_qdio_sbals_zero - initialize SBALs between first and last in queue |
570 | * with zero from | 570 | * with zero from |
571 | */ | 571 | */ |
572 | static inline int | 572 | static int |
573 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) | 573 | zfcp_qdio_sbals_zero(struct zfcp_qdio_queue *queue, int first, int last) |
574 | { | 574 | { |
575 | struct qdio_buffer **buf = queue->buffer; | 575 | struct qdio_buffer **buf = queue->buffer; |
@@ -603,7 +603,7 @@ zfcp_qdio_sbals_wipe(struct zfcp_fsf_req *fsf_req) | |||
603 | * zfcp_qdio_sbale_fill - set address and lenght in current SBALE | 603 | * zfcp_qdio_sbale_fill - set address and lenght in current SBALE |
604 | * on request_queue | 604 | * on request_queue |
605 | */ | 605 | */ |
606 | static inline void | 606 | static void |
607 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 607 | zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
608 | void *addr, int length) | 608 | void *addr, int length) |
609 | { | 609 | { |
@@ -624,7 +624,7 @@ zfcp_qdio_sbale_fill(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
624 | * Alignment and length of the segment determine how many SBALEs are needed | 624 | * Alignment and length of the segment determine how many SBALEs are needed |
625 | * for the memory segment. | 625 | * for the memory segment. |
626 | */ | 626 | */ |
627 | static inline int | 627 | static int |
628 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 628 | zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
629 | void *start_addr, unsigned long total_length) | 629 | void *start_addr, unsigned long total_length) |
630 | { | 630 | { |
@@ -659,7 +659,7 @@ zfcp_qdio_sbals_from_segment(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
659 | * @sg_count: number of elements in scatter-gather list | 659 | * @sg_count: number of elements in scatter-gather list |
660 | * @max_sbals: upper bound for number of SBALs to be used | 660 | * @max_sbals: upper bound for number of SBALs to be used |
661 | */ | 661 | */ |
662 | inline int | 662 | int |
663 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 663 | zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
664 | struct scatterlist *sg, int sg_count, int max_sbals) | 664 | struct scatterlist *sg, int sg_count, int max_sbals) |
665 | { | 665 | { |
@@ -707,7 +707,7 @@ out: | |||
707 | * @length: length of buffer | 707 | * @length: length of buffer |
708 | * @max_sbals: upper bound for number of SBALs to be used | 708 | * @max_sbals: upper bound for number of SBALs to be used |
709 | */ | 709 | */ |
710 | static inline int | 710 | static int |
711 | zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | 711 | zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, |
712 | void *buffer, unsigned long length, int max_sbals) | 712 | void *buffer, unsigned long length, int max_sbals) |
713 | { | 713 | { |
@@ -728,7 +728,7 @@ zfcp_qdio_sbals_from_buffer(struct zfcp_fsf_req *fsf_req, unsigned long sbtype, | |||
728 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used | 728 | * @scsi_cmnd: either scatter-gather list or buffer contained herein is used |
729 | * to fill SBALs | 729 | * to fill SBALs |
730 | */ | 730 | */ |
731 | inline int | 731 | int |
732 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, | 732 | zfcp_qdio_sbals_from_scsicmnd(struct zfcp_fsf_req *fsf_req, |
733 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) | 733 | unsigned long sbtype, struct scsi_cmnd *scsi_cmnd) |
734 | { | 734 | { |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 452d96f92a14..99db02062c3b 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -90,7 +90,7 @@ zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) | |||
90 | return fcp_sns_info_ptr; | 90 | return fcp_sns_info_ptr; |
91 | } | 91 | } |
92 | 92 | ||
93 | fcp_dl_t * | 93 | static fcp_dl_t * |
94 | zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) | 94 | zfcp_get_fcp_dl_ptr(struct fcp_cmnd_iu * fcp_cmd) |
95 | { | 95 | { |
96 | int additional_length = fcp_cmd->add_fcp_cdb_length << 2; | 96 | int additional_length = fcp_cmd->add_fcp_cdb_length << 2; |
@@ -124,19 +124,19 @@ zfcp_set_fcp_dl(struct fcp_cmnd_iu *fcp_cmd, fcp_dl_t fcp_dl) | |||
124 | * regarding the specified byte | 124 | * regarding the specified byte |
125 | */ | 125 | */ |
126 | static inline void | 126 | static inline void |
127 | set_byte(u32 * result, char status, char pos) | 127 | set_byte(int *result, char status, char pos) |
128 | { | 128 | { |
129 | *result |= status << (pos * 8); | 129 | *result |= status << (pos * 8); |
130 | } | 130 | } |
131 | 131 | ||
132 | void | 132 | void |
133 | set_host_byte(u32 * result, char status) | 133 | set_host_byte(int *result, char status) |
134 | { | 134 | { |
135 | set_byte(result, status, 2); | 135 | set_byte(result, status, 2); |
136 | } | 136 | } |
137 | 137 | ||
138 | void | 138 | void |
139 | set_driver_byte(u32 * result, char status) | 139 | set_driver_byte(int *result, char status) |
140 | { | 140 | { |
141 | set_byte(result, status, 3); | 141 | set_byte(result, status, 3); |
142 | } | 142 | } |
@@ -280,7 +280,7 @@ out: | |||
280 | return retval; | 280 | return retval; |
281 | } | 281 | } |
282 | 282 | ||
283 | void | 283 | static void |
284 | zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) | 284 | zfcp_scsi_command_sync_handler(struct scsi_cmnd *scpnt) |
285 | { | 285 | { |
286 | struct completion *wait = (struct completion *) scpnt->SCp.ptr; | 286 | struct completion *wait = (struct completion *) scpnt->SCp.ptr; |
@@ -324,7 +324,7 @@ zfcp_scsi_command_sync(struct zfcp_unit *unit, struct scsi_cmnd *scpnt, | |||
324 | * returns: 0 - success, SCSI command enqueued | 324 | * returns: 0 - success, SCSI command enqueued |
325 | * !0 - failure | 325 | * !0 - failure |
326 | */ | 326 | */ |
327 | int | 327 | static int |
328 | zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, | 328 | zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, |
329 | void (*done) (struct scsi_cmnd *)) | 329 | void (*done) (struct scsi_cmnd *)) |
330 | { | 330 | { |
@@ -380,7 +380,7 @@ zfcp_unit_lookup(struct zfcp_adapter *adapter, int channel, unsigned int id, | |||
380 | * will handle late commands. (Usually, the normal completion of late | 380 | * will handle late commands. (Usually, the normal completion of late |
381 | * commands is ignored with respect to the running abort operation.) | 381 | * commands is ignored with respect to the running abort operation.) |
382 | */ | 382 | */ |
383 | int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | 383 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) |
384 | { | 384 | { |
385 | struct Scsi_Host *scsi_host; | 385 | struct Scsi_Host *scsi_host; |
386 | struct zfcp_adapter *adapter; | 386 | struct zfcp_adapter *adapter; |
@@ -445,7 +445,7 @@ int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) | |||
445 | return retval; | 445 | return retval; |
446 | } | 446 | } |
447 | 447 | ||
448 | int | 448 | static int |
449 | zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) | 449 | zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) |
450 | { | 450 | { |
451 | int retval; | 451 | int retval; |
@@ -541,7 +541,7 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, | |||
541 | /** | 541 | /** |
542 | * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset | 542 | * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset |
543 | */ | 543 | */ |
544 | int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | 544 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) |
545 | { | 545 | { |
546 | struct zfcp_unit *unit; | 546 | struct zfcp_unit *unit; |
547 | struct zfcp_adapter *adapter; | 547 | struct zfcp_adapter *adapter; |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index 1e788e815ce7..090743d2f914 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -9,8 +9,14 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/proc_fs.h> | 10 | #include <linux/proc_fs.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/delay.h> | ||
12 | #include <asm/ebcdic.h> | 13 | #include <asm/ebcdic.h> |
13 | 14 | ||
15 | /* Sigh, math-emu. Don't ask. */ | ||
16 | #include <asm/sfp-util.h> | ||
17 | #include <math-emu/soft-fp.h> | ||
18 | #include <math-emu/single.h> | ||
19 | |||
14 | struct sysinfo_1_1_1 { | 20 | struct sysinfo_1_1_1 { |
15 | char reserved_0[32]; | 21 | char reserved_0[32]; |
16 | char manufacturer[16]; | 22 | char manufacturer[16]; |
@@ -198,7 +204,7 @@ static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len) | |||
198 | * if the higher order 8 bits are not zero. Printing | 204 | * if the higher order 8 bits are not zero. Printing |
199 | * a floating point number in the kernel is a no-no, | 205 | * a floating point number in the kernel is a no-no, |
200 | * always print the number as 32 bit unsigned integer. | 206 | * always print the number as 32 bit unsigned integer. |
201 | * The user-space needs to know about the stange | 207 | * The user-space needs to know about the strange |
202 | * encoding of the alternate cpu capability. | 208 | * encoding of the alternate cpu capability. |
203 | */ | 209 | */ |
204 | len += sprintf(page + len, "Capability: %u %u\n", | 210 | len += sprintf(page + len, "Capability: %u %u\n", |
@@ -351,3 +357,58 @@ static __init int create_proc_sysinfo(void) | |||
351 | 357 | ||
352 | __initcall(create_proc_sysinfo); | 358 | __initcall(create_proc_sysinfo); |
353 | 359 | ||
360 | /* | ||
361 | * CPU capability might have changed. Therefore recalculate loops_per_jiffy. | ||
362 | */ | ||
363 | void s390_adjust_jiffies(void) | ||
364 | { | ||
365 | struct sysinfo_1_2_2 *info; | ||
366 | const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */ | ||
367 | FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); | ||
368 | FP_DECL_EX; | ||
369 | unsigned int capability; | ||
370 | |||
371 | info = (void *) get_zeroed_page(GFP_KERNEL); | ||
372 | if (!info) | ||
373 | return; | ||
374 | |||
375 | if (stsi(info, 1, 2, 2) != -ENOSYS) { | ||
376 | /* | ||
377 | * Major sigh. The cpu capability encoding is "special". | ||
378 | * If the first 9 bits of info->capability are 0 then it | ||
379 | * is a 32 bit unsigned integer in the range 0 .. 2^23. | ||
380 | * If the first 9 bits are != 0 then it is a 32 bit float. | ||
381 | * In addition a lower value indicates a proportionally | ||
382 | * higher cpu capacity. Bogomips are the other way round. | ||
383 | * To get to a halfway suitable number we divide 1e7 | ||
384 | * by the cpu capability number. Yes, that means a floating | ||
385 | * point division .. math-emu here we come :-) | ||
386 | */ | ||
387 | FP_UNPACK_SP(SA, &fmil); | ||
388 | if ((info->capability >> 23) == 0) | ||
389 | FP_FROM_INT_S(SB, info->capability, 32, int); | ||
390 | else | ||
391 | FP_UNPACK_SP(SB, &info->capability); | ||
392 | FP_DIV_S(SR, SA, SB); | ||
393 | FP_TO_INT_S(capability, SR, 32, 0); | ||
394 | } else | ||
395 | /* | ||
396 | * Really old machine without stsi block for basic | ||
397 | * cpu information. Report 42.0 bogomips. | ||
398 | */ | ||
399 | capability = 42; | ||
400 | loops_per_jiffy = capability * (500000/HZ); | ||
401 | free_page((unsigned long) info); | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * calibrate the delay loop | ||
406 | */ | ||
407 | void __init calibrate_delay(void) | ||
408 | { | ||
409 | s390_adjust_jiffies(); | ||
410 | /* Print the good old Bogomips line .. */ | ||
411 | printk(KERN_DEBUG "Calibrating delay loop (skipped)... " | ||
412 | "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ), | ||
413 | (loops_per_jiffy/(5000/HZ)) % 100); | ||
414 | } | ||
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h index 356a0b183539..296f4f1a20e1 100644 --- a/include/asm-s390/compat.h +++ b/include/asm-s390/compat.h | |||
@@ -6,6 +6,34 @@ | |||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | 8 | ||
9 | #define PSW32_MASK_PER 0x40000000UL | ||
10 | #define PSW32_MASK_DAT 0x04000000UL | ||
11 | #define PSW32_MASK_IO 0x02000000UL | ||
12 | #define PSW32_MASK_EXT 0x01000000UL | ||
13 | #define PSW32_MASK_KEY 0x00F00000UL | ||
14 | #define PSW32_MASK_MCHECK 0x00040000UL | ||
15 | #define PSW32_MASK_WAIT 0x00020000UL | ||
16 | #define PSW32_MASK_PSTATE 0x00010000UL | ||
17 | #define PSW32_MASK_ASC 0x0000C000UL | ||
18 | #define PSW32_MASK_CC 0x00003000UL | ||
19 | #define PSW32_MASK_PM 0x00000f00UL | ||
20 | |||
21 | #define PSW32_ADDR_AMODE31 0x80000000UL | ||
22 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | ||
23 | |||
24 | #define PSW32_BASE_BITS 0x00080000UL | ||
25 | |||
26 | #define PSW32_ASC_PRIMARY 0x00000000UL | ||
27 | #define PSW32_ASC_ACCREG 0x00004000UL | ||
28 | #define PSW32_ASC_SECONDARY 0x00008000UL | ||
29 | #define PSW32_ASC_HOME 0x0000C000UL | ||
30 | |||
31 | #define PSW32_MASK_MERGE(CURRENT,NEW) \ | ||
32 | (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \ | ||
33 | ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM))) | ||
34 | |||
35 | extern long psw32_user_bits; | ||
36 | |||
9 | #define COMPAT_USER_HZ 100 | 37 | #define COMPAT_USER_HZ 100 |
10 | 38 | ||
11 | typedef u32 compat_size_t; | 39 | typedef u32 compat_size_t; |
diff --git a/include/asm-s390/etr.h b/include/asm-s390/etr.h new file mode 100644 index 000000000000..b498f19bb9a7 --- /dev/null +++ b/include/asm-s390/etr.h | |||
@@ -0,0 +1,219 @@ | |||
1 | /* | ||
2 | * include/asm-s390/etr.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
6 | */ | ||
7 | #ifndef __S390_ETR_H | ||
8 | #define __S390_ETR_H | ||
9 | |||
10 | /* ETR attachment control register */ | ||
11 | struct etr_eacr { | ||
12 | unsigned int e0 : 1; /* port 0 stepping control */ | ||
13 | unsigned int e1 : 1; /* port 1 stepping control */ | ||
14 | unsigned int _pad0 : 5; /* must be 00100 */ | ||
15 | unsigned int dp : 1; /* data port control */ | ||
16 | unsigned int p0 : 1; /* port 0 change recognition control */ | ||
17 | unsigned int p1 : 1; /* port 1 change recognition control */ | ||
18 | unsigned int _pad1 : 3; /* must be 000 */ | ||
19 | unsigned int ea : 1; /* ETR alert control */ | ||
20 | unsigned int es : 1; /* ETR sync check control */ | ||
21 | unsigned int sl : 1; /* switch to local control */ | ||
22 | } __attribute__ ((packed)); | ||
23 | |||
24 | /* Port state returned by steai */ | ||
25 | enum etr_psc { | ||
26 | etr_psc_operational = 0, | ||
27 | etr_psc_semi_operational = 1, | ||
28 | etr_psc_protocol_error = 4, | ||
29 | etr_psc_no_symbols = 8, | ||
30 | etr_psc_no_signal = 12, | ||
31 | etr_psc_pps_mode = 13 | ||
32 | }; | ||
33 | |||
34 | /* Logical port state returned by stetr */ | ||
35 | enum etr_lpsc { | ||
36 | etr_lpsc_operational_step = 0, | ||
37 | etr_lpsc_operational_alt = 1, | ||
38 | etr_lpsc_semi_operational = 2, | ||
39 | etr_lpsc_protocol_error = 4, | ||
40 | etr_lpsc_no_symbol_sync = 8, | ||
41 | etr_lpsc_no_signal = 12, | ||
42 | etr_lpsc_pps_mode = 13 | ||
43 | }; | ||
44 | |||
45 | /* ETR status words */ | ||
46 | struct etr_esw { | ||
47 | struct etr_eacr eacr; /* attachment control register */ | ||
48 | unsigned int y : 1; /* stepping mode */ | ||
49 | unsigned int _pad0 : 5; /* must be 00000 */ | ||
50 | unsigned int p : 1; /* stepping port number */ | ||
51 | unsigned int q : 1; /* data port number */ | ||
52 | unsigned int psc0 : 4; /* port 0 state code */ | ||
53 | unsigned int psc1 : 4; /* port 1 state code */ | ||
54 | } __attribute__ ((packed)); | ||
55 | |||
56 | /* Second level data register status word */ | ||
57 | struct etr_slsw { | ||
58 | unsigned int vv1 : 1; /* copy of validity bit data frame 1 */ | ||
59 | unsigned int vv2 : 1; /* copy of validity bit data frame 2 */ | ||
60 | unsigned int vv3 : 1; /* copy of validity bit data frame 3 */ | ||
61 | unsigned int vv4 : 1; /* copy of validity bit data frame 4 */ | ||
62 | unsigned int _pad0 : 19; /* must by all zeroes */ | ||
63 | unsigned int n : 1; /* EAF port number */ | ||
64 | unsigned int v1 : 1; /* validity bit ETR data frame 1 */ | ||
65 | unsigned int v2 : 1; /* validity bit ETR data frame 2 */ | ||
66 | unsigned int v3 : 1; /* validity bit ETR data frame 3 */ | ||
67 | unsigned int v4 : 1; /* validity bit ETR data frame 4 */ | ||
68 | unsigned int _pad1 : 4; /* must be 0000 */ | ||
69 | } __attribute__ ((packed)); | ||
70 | |||
71 | /* ETR data frames */ | ||
72 | struct etr_edf1 { | ||
73 | unsigned int u : 1; /* untuned bit */ | ||
74 | unsigned int _pad0 : 1; /* must be 0 */ | ||
75 | unsigned int r : 1; /* service request bit */ | ||
76 | unsigned int _pad1 : 4; /* must be 0000 */ | ||
77 | unsigned int a : 1; /* time adjustment bit */ | ||
78 | unsigned int net_id : 8; /* ETR network id */ | ||
79 | unsigned int etr_id : 8; /* id of ETR which sends data frames */ | ||
80 | unsigned int etr_pn : 8; /* port number of ETR output port */ | ||
81 | } __attribute__ ((packed)); | ||
82 | |||
83 | struct etr_edf2 { | ||
84 | unsigned int etv : 32; /* Upper 32 bits of TOD. */ | ||
85 | } __attribute__ ((packed)); | ||
86 | |||
87 | struct etr_edf3 { | ||
88 | unsigned int rc : 8; /* failure reason code */ | ||
89 | unsigned int _pad0 : 3; /* must be 000 */ | ||
90 | unsigned int c : 1; /* ETR coupled bit */ | ||
91 | unsigned int tc : 4; /* ETR type code */ | ||
92 | unsigned int blto : 8; /* biased local time offset */ | ||
93 | /* (blto - 128) * 15 = minutes */ | ||
94 | unsigned int buo : 8; /* biased utc offset */ | ||
95 | /* (buo - 128) = leap seconds */ | ||
96 | } __attribute__ ((packed)); | ||
97 | |||
98 | struct etr_edf4 { | ||
99 | unsigned int ed : 8; /* ETS device dependent data */ | ||
100 | unsigned int _pad0 : 1; /* must be 0 */ | ||
101 | unsigned int buc : 5; /* biased ut1 correction */ | ||
102 | /* (buc - 16) * 0.1 seconds */ | ||
103 | unsigned int em : 6; /* ETS error magnitude */ | ||
104 | unsigned int dc : 6; /* ETS drift code */ | ||
105 | unsigned int sc : 6; /* ETS steering code */ | ||
106 | } __attribute__ ((packed)); | ||
107 | |||
108 | /* | ||
109 | * ETR attachment information block, two formats | ||
110 | * format 1 has 4 reserved words with a size of 64 bytes | ||
111 | * format 2 has 16 reserved words with a size of 96 bytes | ||
112 | */ | ||
113 | struct etr_aib { | ||
114 | struct etr_esw esw; | ||
115 | struct etr_slsw slsw; | ||
116 | unsigned long long tsp; | ||
117 | struct etr_edf1 edf1; | ||
118 | struct etr_edf2 edf2; | ||
119 | struct etr_edf3 edf3; | ||
120 | struct etr_edf4 edf4; | ||
121 | unsigned int reserved[16]; | ||
122 | } __attribute__ ((packed,aligned(8))); | ||
123 | |||
124 | /* ETR interruption parameter */ | ||
125 | struct etr_interruption_parameter { | ||
126 | unsigned int _pad0 : 8; | ||
127 | unsigned int pc0 : 1; /* port 0 state change */ | ||
128 | unsigned int pc1 : 1; /* port 1 state change */ | ||
129 | unsigned int _pad1 : 3; | ||
130 | unsigned int eai : 1; /* ETR alert indication */ | ||
131 | unsigned int _pad2 : 18; | ||
132 | } __attribute__ ((packed)); | ||
133 | |||
134 | /* Query TOD offset result */ | ||
135 | struct etr_ptff_qto { | ||
136 | unsigned long long physical_clock; | ||
137 | unsigned long long tod_offset; | ||
138 | unsigned long long logical_tod_offset; | ||
139 | unsigned long long tod_epoch_difference; | ||
140 | } __attribute__ ((packed)); | ||
141 | |||
142 | /* Inline assembly helper functions */ | ||
143 | static inline int etr_setr(struct etr_eacr *ctrl) | ||
144 | { | ||
145 | int rc = -ENOSYS; | ||
146 | |||
147 | asm volatile( | ||
148 | " .insn s,0xb2160000,0(%2)\n" | ||
149 | "0: la %0,0\n" | ||
150 | "1:\n" | ||
151 | EX_TABLE(0b,1b) | ||
152 | : "+d" (rc) : "m" (*ctrl), "a" (ctrl)); | ||
153 | return rc; | ||
154 | } | ||
155 | |||
156 | /* Stores a format 1 aib with 64 bytes */ | ||
157 | static inline int etr_stetr(struct etr_aib *aib) | ||
158 | { | ||
159 | int rc = -ENOSYS; | ||
160 | |||
161 | asm volatile( | ||
162 | " .insn s,0xb2170000,0(%2)\n" | ||
163 | "0: la %0,0\n" | ||
164 | "1:\n" | ||
165 | EX_TABLE(0b,1b) | ||
166 | : "+d" (rc) : "m" (*aib), "a" (aib)); | ||
167 | return rc; | ||
168 | } | ||
169 | |||
170 | /* Stores a format 2 aib with 96 bytes for specified port */ | ||
171 | static inline int etr_steai(struct etr_aib *aib, unsigned int func) | ||
172 | { | ||
173 | register unsigned int reg0 asm("0") = func; | ||
174 | int rc = -ENOSYS; | ||
175 | |||
176 | asm volatile( | ||
177 | " .insn s,0xb2b30000,0(%2)\n" | ||
178 | "0: la %0,0\n" | ||
179 | "1:\n" | ||
180 | EX_TABLE(0b,1b) | ||
181 | : "+d" (rc) : "m" (*aib), "a" (aib), "d" (reg0)); | ||
182 | return rc; | ||
183 | } | ||
184 | |||
185 | /* Function codes for the steai instruction. */ | ||
186 | #define ETR_STEAI_STEPPING_PORT 0x10 | ||
187 | #define ETR_STEAI_ALTERNATE_PORT 0x11 | ||
188 | #define ETR_STEAI_PORT_0 0x12 | ||
189 | #define ETR_STEAI_PORT_1 0x13 | ||
190 | |||
191 | static inline int etr_ptff(void *ptff_block, unsigned int func) | ||
192 | { | ||
193 | register unsigned int reg0 asm("0") = func; | ||
194 | register unsigned long reg1 asm("1") = (unsigned long) ptff_block; | ||
195 | int rc = -ENOSYS; | ||
196 | |||
197 | asm volatile( | ||
198 | " .word 0x0104\n" | ||
199 | " ipm %0\n" | ||
200 | " srl %0,28\n" | ||
201 | : "=d" (rc), "=m" (ptff_block) | ||
202 | : "d" (reg0), "d" (reg1), "m" (ptff_block) : "cc"); | ||
203 | return rc; | ||
204 | } | ||
205 | |||
206 | /* Function codes for the ptff instruction. */ | ||
207 | #define ETR_PTFF_QAF 0x00 /* query available functions */ | ||
208 | #define ETR_PTFF_QTO 0x01 /* query tod offset */ | ||
209 | #define ETR_PTFF_QSI 0x02 /* query steering information */ | ||
210 | #define ETR_PTFF_ATO 0x40 /* adjust tod offset */ | ||
211 | #define ETR_PTFF_STO 0x41 /* set tod offset */ | ||
212 | #define ETR_PTFF_SFS 0x42 /* set fine steering rate */ | ||
213 | #define ETR_PTFF_SGS 0x43 /* set gross steering rate */ | ||
214 | |||
215 | /* Functions needed by the machine check handler */ | ||
216 | extern void etr_switch_to_local(void); | ||
217 | extern void etr_sync_check(void); | ||
218 | |||
219 | #endif /* __S390_ETR_H */ | ||
diff --git a/include/asm-s390/hardirq.h b/include/asm-s390/hardirq.h index c2f6a8782d31..31beb18cb3d1 100644 --- a/include/asm-s390/hardirq.h +++ b/include/asm-s390/hardirq.h | |||
@@ -32,6 +32,6 @@ typedef struct { | |||
32 | 32 | ||
33 | #define HARDIRQ_BITS 8 | 33 | #define HARDIRQ_BITS 8 |
34 | 34 | ||
35 | extern void account_ticks(void); | 35 | extern void account_ticks(u64 time); |
36 | 36 | ||
37 | #endif /* __ASM_HARDIRQ_H */ | 37 | #endif /* __ASM_HARDIRQ_H */ |
diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h index efb7de9c1c6b..a4c2d550dad4 100644 --- a/include/asm-s390/io.h +++ b/include/asm-s390/io.h | |||
@@ -28,11 +28,7 @@ static inline unsigned long virt_to_phys(volatile void * address) | |||
28 | { | 28 | { |
29 | unsigned long real_address; | 29 | unsigned long real_address; |
30 | asm volatile( | 30 | asm volatile( |
31 | #ifndef __s390x__ | ||
32 | " lra %0,0(%1)\n" | 31 | " lra %0,0(%1)\n" |
33 | #else /* __s390x__ */ | ||
34 | " lrag %0,0(%1)\n" | ||
35 | #endif /* __s390x__ */ | ||
36 | " jz 0f\n" | 32 | " jz 0f\n" |
37 | " la %0,0\n" | 33 | " la %0,0\n" |
38 | "0:" | 34 | "0:" |
diff --git a/include/asm-s390/kdebug.h b/include/asm-s390/kdebug.h index 40cc68025e01..1b50f89819a4 100644 --- a/include/asm-s390/kdebug.h +++ b/include/asm-s390/kdebug.h | |||
@@ -26,7 +26,6 @@ extern int register_page_fault_notifier(struct notifier_block *); | |||
26 | extern int unregister_page_fault_notifier(struct notifier_block *); | 26 | extern int unregister_page_fault_notifier(struct notifier_block *); |
27 | extern struct atomic_notifier_head s390die_chain; | 27 | extern struct atomic_notifier_head s390die_chain; |
28 | 28 | ||
29 | |||
30 | enum die_val { | 29 | enum die_val { |
31 | DIE_OOPS = 1, | 30 | DIE_OOPS = 1, |
32 | DIE_BPT, | 31 | DIE_BPT, |
@@ -56,4 +55,6 @@ static inline int notify_die(enum die_val val, const char *str, | |||
56 | return atomic_notifier_call_chain(&s390die_chain, val, &args); | 55 | return atomic_notifier_call_chain(&s390die_chain, val, &args); |
57 | } | 56 | } |
58 | 57 | ||
58 | extern void die(const char *, struct pt_regs *, long); | ||
59 | |||
59 | #endif | 60 | #endif |
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h index 74f7389bd3ee..4a31d0a7ee83 100644 --- a/include/asm-s390/lowcore.h +++ b/include/asm-s390/lowcore.h | |||
@@ -220,7 +220,8 @@ struct _lowcore | |||
220 | __u32 kernel_asce; /* 0xc4c */ | 220 | __u32 kernel_asce; /* 0xc4c */ |
221 | __u32 user_asce; /* 0xc50 */ | 221 | __u32 user_asce; /* 0xc50 */ |
222 | __u32 panic_stack; /* 0xc54 */ | 222 | __u32 panic_stack; /* 0xc54 */ |
223 | __u8 pad10[0xc60-0xc58]; /* 0xc58 */ | 223 | __u32 user_exec_asce; /* 0xc58 */ |
224 | __u8 pad10[0xc60-0xc5c]; /* 0xc5c */ | ||
224 | /* entry.S sensitive area start */ | 225 | /* entry.S sensitive area start */ |
225 | struct cpuinfo_S390 cpu_data; /* 0xc60 */ | 226 | struct cpuinfo_S390 cpu_data; /* 0xc60 */ |
226 | __u32 ipl_device; /* 0xc7c */ | 227 | __u32 ipl_device; /* 0xc7c */ |
@@ -310,7 +311,8 @@ struct _lowcore | |||
310 | __u64 kernel_asce; /* 0xd58 */ | 311 | __u64 kernel_asce; /* 0xd58 */ |
311 | __u64 user_asce; /* 0xd60 */ | 312 | __u64 user_asce; /* 0xd60 */ |
312 | __u64 panic_stack; /* 0xd68 */ | 313 | __u64 panic_stack; /* 0xd68 */ |
313 | __u8 pad10[0xd80-0xd70]; /* 0xd70 */ | 314 | __u64 user_exec_asce; /* 0xd70 */ |
315 | __u8 pad10[0xd80-0xd78]; /* 0xd78 */ | ||
314 | /* entry.S sensitive area start */ | 316 | /* entry.S sensitive area start */ |
315 | struct cpuinfo_S390 cpu_data; /* 0xd80 */ | 317 | struct cpuinfo_S390 cpu_data; /* 0xd80 */ |
316 | __u32 ipl_device; /* 0xdb8 */ | 318 | __u32 ipl_device; /* 0xdb8 */ |
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h index bcf24a873874..1d21da220d49 100644 --- a/include/asm-s390/mmu_context.h +++ b/include/asm-s390/mmu_context.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #ifndef __S390_MMU_CONTEXT_H | 9 | #ifndef __S390_MMU_CONTEXT_H |
10 | #define __S390_MMU_CONTEXT_H | 10 | #define __S390_MMU_CONTEXT_H |
11 | 11 | ||
12 | #include <asm/pgalloc.h> | ||
12 | /* | 13 | /* |
13 | * get a new mmu context.. S390 don't know about contexts. | 14 | * get a new mmu context.. S390 don't know about contexts. |
14 | */ | 15 | */ |
@@ -16,29 +17,44 @@ | |||
16 | 17 | ||
17 | #define destroy_context(mm) do { } while (0) | 18 | #define destroy_context(mm) do { } while (0) |
18 | 19 | ||
20 | #ifndef __s390x__ | ||
21 | #define LCTL_OPCODE "lctl" | ||
22 | #define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK) | ||
23 | #else | ||
24 | #define LCTL_OPCODE "lctlg" | ||
25 | #define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK) | ||
26 | #endif | ||
27 | |||
19 | static inline void enter_lazy_tlb(struct mm_struct *mm, | 28 | static inline void enter_lazy_tlb(struct mm_struct *mm, |
20 | struct task_struct *tsk) | 29 | struct task_struct *tsk) |
21 | { | 30 | { |
22 | } | 31 | } |
23 | 32 | ||
24 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | 33 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
25 | struct task_struct *tsk) | 34 | struct task_struct *tsk) |
26 | { | 35 | { |
27 | if (prev != next) { | 36 | pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); |
28 | #ifndef __s390x__ | 37 | |
29 | S390_lowcore.user_asce = (__pa(next->pgd)&PAGE_MASK) | | 38 | if (prev != next) { |
30 | (_SEGMENT_TABLE|USER_STD_MASK); | 39 | S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | |
31 | /* Load home space page table origin. */ | 40 | PGTABLE_BITS; |
32 | asm volatile("lctl 13,13,%0" | 41 | if (shadow_pgd) { |
33 | : : "m" (S390_lowcore.user_asce) ); | 42 | /* Load primary/secondary space page table origin. */ |
34 | #else /* __s390x__ */ | 43 | S390_lowcore.user_exec_asce = |
35 | S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) | | 44 | (__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS; |
36 | (_REGION_TABLE|USER_STD_MASK); | 45 | asm volatile(LCTL_OPCODE" 1,1,%0\n" |
37 | /* Load home space page table origin. */ | 46 | LCTL_OPCODE" 7,7,%1" |
38 | asm volatile("lctlg 13,13,%0" | 47 | : : "m" (S390_lowcore.user_exec_asce), |
39 | : : "m" (S390_lowcore.user_asce) ); | 48 | "m" (S390_lowcore.user_asce) ); |
40 | #endif /* __s390x__ */ | 49 | } else if (switch_amode) { |
41 | } | 50 | /* Load primary space page table origin. */ |
51 | asm volatile(LCTL_OPCODE" 1,1,%0" | ||
52 | : : "m" (S390_lowcore.user_asce) ); | ||
53 | } else | ||
54 | /* Load home space page table origin. */ | ||
55 | asm volatile(LCTL_OPCODE" 13,13,%0" | ||
56 | : : "m" (S390_lowcore.user_asce) ); | ||
57 | } | ||
42 | cpu_set(smp_processor_id(), next->cpu_vm_mask); | 58 | cpu_set(smp_processor_id(), next->cpu_vm_mask); |
43 | } | 59 | } |
44 | 60 | ||
@@ -51,4 +67,4 @@ static inline void activate_mm(struct mm_struct *prev, | |||
51 | set_fs(current->thread.mm_segment); | 67 | set_fs(current->thread.mm_segment); |
52 | } | 68 | } |
53 | 69 | ||
54 | #endif | 70 | #endif /* __S390_MMU_CONTEXT_H */ |
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h index 0707a7e2fc16..56c8a6c80e2e 100644 --- a/include/asm-s390/pgalloc.h +++ b/include/asm-s390/pgalloc.h | |||
@@ -47,6 +47,17 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
47 | 47 | ||
48 | if (!pgd) | 48 | if (!pgd) |
49 | return NULL; | 49 | return NULL; |
50 | if (s390_noexec) { | ||
51 | pgd_t *shadow_pgd = (pgd_t *) | ||
52 | __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); | ||
53 | struct page *page = virt_to_page(pgd); | ||
54 | |||
55 | if (!shadow_pgd) { | ||
56 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); | ||
57 | return NULL; | ||
58 | } | ||
59 | page->lru.next = (void *) shadow_pgd; | ||
60 | } | ||
50 | for (i = 0; i < PTRS_PER_PGD; i++) | 61 | for (i = 0; i < PTRS_PER_PGD; i++) |
51 | #ifndef __s390x__ | 62 | #ifndef __s390x__ |
52 | pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); | 63 | pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE)); |
@@ -58,6 +69,10 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
58 | 69 | ||
59 | static inline void pgd_free(pgd_t *pgd) | 70 | static inline void pgd_free(pgd_t *pgd) |
60 | { | 71 | { |
72 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); | ||
73 | |||
74 | if (shadow_pgd) | ||
75 | free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); | ||
61 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); | 76 | free_pages((unsigned long) pgd, PGD_ALLOC_ORDER); |
62 | } | 77 | } |
63 | 78 | ||
@@ -71,6 +86,7 @@ static inline void pgd_free(pgd_t *pgd) | |||
71 | #define pmd_free(x) do { } while (0) | 86 | #define pmd_free(x) do { } while (0) |
72 | #define __pmd_free_tlb(tlb,x) do { } while (0) | 87 | #define __pmd_free_tlb(tlb,x) do { } while (0) |
73 | #define pgd_populate(mm, pmd, pte) BUG() | 88 | #define pgd_populate(mm, pmd, pte) BUG() |
89 | #define pgd_populate_kernel(mm, pmd, pte) BUG() | ||
74 | #else /* __s390x__ */ | 90 | #else /* __s390x__ */ |
75 | static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | 91 | static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) |
76 | { | 92 | { |
@@ -79,6 +95,17 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | |||
79 | 95 | ||
80 | if (!pmd) | 96 | if (!pmd) |
81 | return NULL; | 97 | return NULL; |
98 | if (s390_noexec) { | ||
99 | pmd_t *shadow_pmd = (pmd_t *) | ||
100 | __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); | ||
101 | struct page *page = virt_to_page(pmd); | ||
102 | |||
103 | if (!shadow_pmd) { | ||
104 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); | ||
105 | return NULL; | ||
106 | } | ||
107 | page->lru.next = (void *) shadow_pmd; | ||
108 | } | ||
82 | for (i=0; i < PTRS_PER_PMD; i++) | 109 | for (i=0; i < PTRS_PER_PMD; i++) |
83 | pmd_clear(pmd + i); | 110 | pmd_clear(pmd + i); |
84 | return pmd; | 111 | return pmd; |
@@ -86,6 +113,10 @@ static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | |||
86 | 113 | ||
87 | static inline void pmd_free (pmd_t *pmd) | 114 | static inline void pmd_free (pmd_t *pmd) |
88 | { | 115 | { |
116 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | ||
117 | |||
118 | if (shadow_pmd) | ||
119 | free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); | ||
89 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); | 120 | free_pages((unsigned long) pmd, PMD_ALLOC_ORDER); |
90 | } | 121 | } |
91 | 122 | ||
@@ -95,11 +126,22 @@ static inline void pmd_free (pmd_t *pmd) | |||
95 | pmd_free(pmd); \ | 126 | pmd_free(pmd); \ |
96 | } while (0) | 127 | } while (0) |
97 | 128 | ||
98 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | 129 | static inline void |
130 | pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | ||
99 | { | 131 | { |
100 | pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); | 132 | pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); |
101 | } | 133 | } |
102 | 134 | ||
135 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | ||
136 | { | ||
137 | pgd_t *shadow_pgd = get_shadow_pgd(pgd); | ||
138 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | ||
139 | |||
140 | if (shadow_pgd && shadow_pmd) | ||
141 | pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); | ||
142 | pgd_populate_kernel(mm, pgd, pmd); | ||
143 | } | ||
144 | |||
103 | #endif /* __s390x__ */ | 145 | #endif /* __s390x__ */ |
104 | 146 | ||
105 | static inline void | 147 | static inline void |
@@ -119,7 +161,13 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | |||
119 | static inline void | 161 | static inline void |
120 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) | 162 | pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) |
121 | { | 163 | { |
122 | pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page)); | 164 | pte_t *pte = (pte_t *)page_to_phys(page); |
165 | pmd_t *shadow_pmd = get_shadow_pmd(pmd); | ||
166 | pte_t *shadow_pte = get_shadow_pte(pte); | ||
167 | |||
168 | pmd_populate_kernel(mm, pmd, pte); | ||
169 | if (shadow_pmd && shadow_pte) | ||
170 | pmd_populate_kernel(mm, shadow_pmd, shadow_pte); | ||
123 | } | 171 | } |
124 | 172 | ||
125 | /* | 173 | /* |
@@ -133,6 +181,17 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) | |||
133 | 181 | ||
134 | if (!pte) | 182 | if (!pte) |
135 | return NULL; | 183 | return NULL; |
184 | if (s390_noexec) { | ||
185 | pte_t *shadow_pte = (pte_t *) | ||
186 | __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
187 | struct page *page = virt_to_page(pte); | ||
188 | |||
189 | if (!shadow_pte) { | ||
190 | free_page((unsigned long) pte); | ||
191 | return NULL; | ||
192 | } | ||
193 | page->lru.next = (void *) shadow_pte; | ||
194 | } | ||
136 | for (i=0; i < PTRS_PER_PTE; i++) { | 195 | for (i=0; i < PTRS_PER_PTE; i++) { |
137 | pte_clear(mm, vmaddr, pte + i); | 196 | pte_clear(mm, vmaddr, pte + i); |
138 | vmaddr += PAGE_SIZE; | 197 | vmaddr += PAGE_SIZE; |
@@ -151,14 +210,30 @@ pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr) | |||
151 | 210 | ||
152 | static inline void pte_free_kernel(pte_t *pte) | 211 | static inline void pte_free_kernel(pte_t *pte) |
153 | { | 212 | { |
154 | free_page((unsigned long) pte); | 213 | pte_t *shadow_pte = get_shadow_pte(pte); |
214 | |||
215 | if (shadow_pte) | ||
216 | free_page((unsigned long) shadow_pte); | ||
217 | free_page((unsigned long) pte); | ||
155 | } | 218 | } |
156 | 219 | ||
157 | static inline void pte_free(struct page *pte) | 220 | static inline void pte_free(struct page *pte) |
158 | { | 221 | { |
159 | __free_page(pte); | 222 | struct page *shadow_page = get_shadow_page(pte); |
223 | |||
224 | if (shadow_page) | ||
225 | __free_page(shadow_page); | ||
226 | __free_page(pte); | ||
160 | } | 227 | } |
161 | 228 | ||
162 | #define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte) | 229 | #define __pte_free_tlb(tlb, pte) \ |
230 | ({ \ | ||
231 | struct mmu_gather *__tlb = (tlb); \ | ||
232 | struct page *__pte = (pte); \ | ||
233 | struct page *shadow_page = get_shadow_page(__pte); \ | ||
234 | if (shadow_page) \ | ||
235 | tlb_remove_page(__tlb, shadow_page); \ | ||
236 | tlb_remove_page(__tlb, __pte); \ | ||
237 | }) | ||
163 | 238 | ||
164 | #endif /* _S390_PGALLOC_H */ | 239 | #endif /* _S390_PGALLOC_H */ |
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index ae61aca5d483..13c16546eff5 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
@@ -40,6 +40,7 @@ struct mm_struct; | |||
40 | 40 | ||
41 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); | 41 | extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); |
42 | extern void paging_init(void); | 42 | extern void paging_init(void); |
43 | extern void vmem_map_init(void); | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * The S390 doesn't have any external MMU info: the kernel page | 46 | * The S390 doesn't have any external MMU info: the kernel page |
@@ -223,6 +224,8 @@ extern unsigned long vmalloc_end; | |||
223 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | 224 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ |
224 | #define _PAGE_TYPE_RO 0x200 | 225 | #define _PAGE_TYPE_RO 0x200 |
225 | #define _PAGE_TYPE_RW 0x000 | 226 | #define _PAGE_TYPE_RW 0x000 |
227 | #define _PAGE_TYPE_EX_RO 0x202 | ||
228 | #define _PAGE_TYPE_EX_RW 0x002 | ||
226 | 229 | ||
227 | /* | 230 | /* |
228 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | 231 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, |
@@ -243,11 +246,13 @@ extern unsigned long vmalloc_end; | |||
243 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | 246 | * _PAGE_TYPE_FILE 11?1 -> 11?1 |
244 | * _PAGE_TYPE_RO 0100 -> 1100 | 247 | * _PAGE_TYPE_RO 0100 -> 1100 |
245 | * _PAGE_TYPE_RW 0000 -> 1000 | 248 | * _PAGE_TYPE_RW 0000 -> 1000 |
249 | * _PAGE_TYPE_EX_RO 0110 -> 1110 | ||
250 | * _PAGE_TYPE_EX_RW 0010 -> 1010 | ||
246 | * | 251 | * |
247 | * pte_none is true for bits combinations 1000, 1100 | 252 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 |
248 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 | 253 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 |
249 | * pte_file is true for bits combinations 1101, 1111 | 254 | * pte_file is true for bits combinations 1101, 1111 |
250 | * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. | 255 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. |
251 | */ | 256 | */ |
252 | 257 | ||
253 | #ifndef __s390x__ | 258 | #ifndef __s390x__ |
@@ -312,33 +317,100 @@ extern unsigned long vmalloc_end; | |||
312 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) | 317 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) |
313 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | 318 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) |
314 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) | 319 | #define PAGE_RW __pgprot(_PAGE_TYPE_RW) |
320 | #define PAGE_EX_RO __pgprot(_PAGE_TYPE_EX_RO) | ||
321 | #define PAGE_EX_RW __pgprot(_PAGE_TYPE_EX_RW) | ||
315 | 322 | ||
316 | #define PAGE_KERNEL PAGE_RW | 323 | #define PAGE_KERNEL PAGE_RW |
317 | #define PAGE_COPY PAGE_RO | 324 | #define PAGE_COPY PAGE_RO |
318 | 325 | ||
319 | /* | 326 | /* |
320 | * The S390 can't do page protection for execute, and considers that the | 327 | * Dependent on the EXEC_PROTECT option s390 can do execute protection. |
321 | * same are read. Also, write permissions imply read permissions. This is | 328 | * Write permission always implies read permission. In theory with a |
322 | * the closest we can get.. | 329 | * primary/secondary page table execute only can be implemented but |
330 | * it would cost an additional bit in the pte to distinguish all the | ||
331 | * different pte types. To avoid that execute permission currently | ||
332 | * implies read permission as well. | ||
323 | */ | 333 | */ |
324 | /*xwr*/ | 334 | /*xwr*/ |
325 | #define __P000 PAGE_NONE | 335 | #define __P000 PAGE_NONE |
326 | #define __P001 PAGE_RO | 336 | #define __P001 PAGE_RO |
327 | #define __P010 PAGE_RO | 337 | #define __P010 PAGE_RO |
328 | #define __P011 PAGE_RO | 338 | #define __P011 PAGE_RO |
329 | #define __P100 PAGE_RO | 339 | #define __P100 PAGE_EX_RO |
330 | #define __P101 PAGE_RO | 340 | #define __P101 PAGE_EX_RO |
331 | #define __P110 PAGE_RO | 341 | #define __P110 PAGE_EX_RO |
332 | #define __P111 PAGE_RO | 342 | #define __P111 PAGE_EX_RO |
333 | 343 | ||
334 | #define __S000 PAGE_NONE | 344 | #define __S000 PAGE_NONE |
335 | #define __S001 PAGE_RO | 345 | #define __S001 PAGE_RO |
336 | #define __S010 PAGE_RW | 346 | #define __S010 PAGE_RW |
337 | #define __S011 PAGE_RW | 347 | #define __S011 PAGE_RW |
338 | #define __S100 PAGE_RO | 348 | #define __S100 PAGE_EX_RO |
339 | #define __S101 PAGE_RO | 349 | #define __S101 PAGE_EX_RO |
340 | #define __S110 PAGE_RW | 350 | #define __S110 PAGE_EX_RW |
341 | #define __S111 PAGE_RW | 351 | #define __S111 PAGE_EX_RW |
352 | |||
353 | #ifndef __s390x__ | ||
354 | # define PMD_SHADOW_SHIFT 1 | ||
355 | # define PGD_SHADOW_SHIFT 1 | ||
356 | #else /* __s390x__ */ | ||
357 | # define PMD_SHADOW_SHIFT 2 | ||
358 | # define PGD_SHADOW_SHIFT 2 | ||
359 | #endif /* __s390x__ */ | ||
360 | |||
361 | static inline struct page *get_shadow_page(struct page *page) | ||
362 | { | ||
363 | if (s390_noexec && !list_empty(&page->lru)) | ||
364 | return virt_to_page(page->lru.next); | ||
365 | return NULL; | ||
366 | } | ||
367 | |||
368 | static inline pte_t *get_shadow_pte(pte_t *ptep) | ||
369 | { | ||
370 | unsigned long pteptr = (unsigned long) (ptep); | ||
371 | |||
372 | if (s390_noexec) { | ||
373 | unsigned long offset = pteptr & (PAGE_SIZE - 1); | ||
374 | void *addr = (void *) (pteptr ^ offset); | ||
375 | struct page *page = virt_to_page(addr); | ||
376 | if (!list_empty(&page->lru)) | ||
377 | return (pte_t *) ((unsigned long) page->lru.next | | ||
378 | offset); | ||
379 | } | ||
380 | return NULL; | ||
381 | } | ||
382 | |||
383 | static inline pmd_t *get_shadow_pmd(pmd_t *pmdp) | ||
384 | { | ||
385 | unsigned long pmdptr = (unsigned long) (pmdp); | ||
386 | |||
387 | if (s390_noexec) { | ||
388 | unsigned long offset = pmdptr & | ||
389 | ((PAGE_SIZE << PMD_SHADOW_SHIFT) - 1); | ||
390 | void *addr = (void *) (pmdptr ^ offset); | ||
391 | struct page *page = virt_to_page(addr); | ||
392 | if (!list_empty(&page->lru)) | ||
393 | return (pmd_t *) ((unsigned long) page->lru.next | | ||
394 | offset); | ||
395 | } | ||
396 | return NULL; | ||
397 | } | ||
398 | |||
399 | static inline pgd_t *get_shadow_pgd(pgd_t *pgdp) | ||
400 | { | ||
401 | unsigned long pgdptr = (unsigned long) (pgdp); | ||
402 | |||
403 | if (s390_noexec) { | ||
404 | unsigned long offset = pgdptr & | ||
405 | ((PAGE_SIZE << PGD_SHADOW_SHIFT) - 1); | ||
406 | void *addr = (void *) (pgdptr ^ offset); | ||
407 | struct page *page = virt_to_page(addr); | ||
408 | if (!list_empty(&page->lru)) | ||
409 | return (pgd_t *) ((unsigned long) page->lru.next | | ||
410 | offset); | ||
411 | } | ||
412 | return NULL; | ||
413 | } | ||
342 | 414 | ||
343 | /* | 415 | /* |
344 | * Certain architectures need to do special things when PTEs | 416 | * Certain architectures need to do special things when PTEs |
@@ -347,7 +419,16 @@ extern unsigned long vmalloc_end; | |||
347 | */ | 419 | */ |
348 | static inline void set_pte(pte_t *pteptr, pte_t pteval) | 420 | static inline void set_pte(pte_t *pteptr, pte_t pteval) |
349 | { | 421 | { |
422 | pte_t *shadow_pte = get_shadow_pte(pteptr); | ||
423 | |||
350 | *pteptr = pteval; | 424 | *pteptr = pteval; |
425 | if (shadow_pte) { | ||
426 | if (!(pte_val(pteval) & _PAGE_INVALID) && | ||
427 | (pte_val(pteval) & _PAGE_SWX)) | ||
428 | pte_val(*shadow_pte) = pte_val(pteval) | _PAGE_RO; | ||
429 | else | ||
430 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | ||
431 | } | ||
351 | } | 432 | } |
352 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 433 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
353 | 434 | ||
@@ -465,7 +546,7 @@ static inline int pte_read(pte_t pte) | |||
465 | 546 | ||
466 | static inline void pgd_clear(pgd_t * pgdp) { } | 547 | static inline void pgd_clear(pgd_t * pgdp) { } |
467 | 548 | ||
468 | static inline void pmd_clear(pmd_t * pmdp) | 549 | static inline void pmd_clear_kernel(pmd_t * pmdp) |
469 | { | 550 | { |
470 | pmd_val(pmdp[0]) = _PAGE_TABLE_INV; | 551 | pmd_val(pmdp[0]) = _PAGE_TABLE_INV; |
471 | pmd_val(pmdp[1]) = _PAGE_TABLE_INV; | 552 | pmd_val(pmdp[1]) = _PAGE_TABLE_INV; |
@@ -473,24 +554,55 @@ static inline void pmd_clear(pmd_t * pmdp) | |||
473 | pmd_val(pmdp[3]) = _PAGE_TABLE_INV; | 554 | pmd_val(pmdp[3]) = _PAGE_TABLE_INV; |
474 | } | 555 | } |
475 | 556 | ||
557 | static inline void pmd_clear(pmd_t * pmdp) | ||
558 | { | ||
559 | pmd_t *shadow_pmd = get_shadow_pmd(pmdp); | ||
560 | |||
561 | pmd_clear_kernel(pmdp); | ||
562 | if (shadow_pmd) | ||
563 | pmd_clear_kernel(shadow_pmd); | ||
564 | } | ||
565 | |||
476 | #else /* __s390x__ */ | 566 | #else /* __s390x__ */ |
477 | 567 | ||
478 | static inline void pgd_clear(pgd_t * pgdp) | 568 | static inline void pgd_clear_kernel(pgd_t * pgdp) |
479 | { | 569 | { |
480 | pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; | 570 | pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; |
481 | } | 571 | } |
482 | 572 | ||
483 | static inline void pmd_clear(pmd_t * pmdp) | 573 | static inline void pgd_clear(pgd_t * pgdp) |
574 | { | ||
575 | pgd_t *shadow_pgd = get_shadow_pgd(pgdp); | ||
576 | |||
577 | pgd_clear_kernel(pgdp); | ||
578 | if (shadow_pgd) | ||
579 | pgd_clear_kernel(shadow_pgd); | ||
580 | } | ||
581 | |||
582 | static inline void pmd_clear_kernel(pmd_t * pmdp) | ||
484 | { | 583 | { |
485 | pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | 584 | pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; |
486 | pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; | 585 | pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; |
487 | } | 586 | } |
488 | 587 | ||
588 | static inline void pmd_clear(pmd_t * pmdp) | ||
589 | { | ||
590 | pmd_t *shadow_pmd = get_shadow_pmd(pmdp); | ||
591 | |||
592 | pmd_clear_kernel(pmdp); | ||
593 | if (shadow_pmd) | ||
594 | pmd_clear_kernel(shadow_pmd); | ||
595 | } | ||
596 | |||
489 | #endif /* __s390x__ */ | 597 | #endif /* __s390x__ */ |
490 | 598 | ||
491 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 599 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
492 | { | 600 | { |
601 | pte_t *shadow_pte = get_shadow_pte(ptep); | ||
602 | |||
493 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 603 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; |
604 | if (shadow_pte) | ||
605 | pte_val(*shadow_pte) = _PAGE_TYPE_EMPTY; | ||
494 | } | 606 | } |
495 | 607 | ||
496 | /* | 608 | /* |
@@ -608,8 +720,11 @@ ptep_clear_flush(struct vm_area_struct *vma, | |||
608 | unsigned long address, pte_t *ptep) | 720 | unsigned long address, pte_t *ptep) |
609 | { | 721 | { |
610 | pte_t pte = *ptep; | 722 | pte_t pte = *ptep; |
723 | pte_t *shadow_pte = get_shadow_pte(ptep); | ||
611 | 724 | ||
612 | __ptep_ipte(address, ptep); | 725 | __ptep_ipte(address, ptep); |
726 | if (shadow_pte) | ||
727 | __ptep_ipte(address, shadow_pte); | ||
613 | return pte; | 728 | return pte; |
614 | } | 729 | } |
615 | 730 | ||
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index cbbedc63ba25..4c1b73940351 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h | |||
@@ -50,6 +50,7 @@ struct cpuinfo_S390 | |||
50 | unsigned long pgtable_cache_sz; | 50 | unsigned long pgtable_cache_sz; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | extern void s390_adjust_jiffies(void); | ||
53 | extern void print_cpu_info(struct cpuinfo_S390 *); | 54 | extern void print_cpu_info(struct cpuinfo_S390 *); |
54 | 55 | ||
55 | /* Lazy FPU handling on uni-processor */ | 56 | /* Lazy FPU handling on uni-processor */ |
@@ -144,7 +145,8 @@ struct stack_frame { | |||
144 | #ifndef __s390x__ | 145 | #ifndef __s390x__ |
145 | 146 | ||
146 | #define start_thread(regs, new_psw, new_stackp) do { \ | 147 | #define start_thread(regs, new_psw, new_stackp) do { \ |
147 | regs->psw.mask = PSW_USER_BITS; \ | 148 | set_fs(USER_DS); \ |
149 | regs->psw.mask = psw_user_bits; \ | ||
148 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 150 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ |
149 | regs->gprs[15] = new_stackp ; \ | 151 | regs->gprs[15] = new_stackp ; \ |
150 | } while (0) | 152 | } while (0) |
@@ -152,13 +154,15 @@ struct stack_frame { | |||
152 | #else /* __s390x__ */ | 154 | #else /* __s390x__ */ |
153 | 155 | ||
154 | #define start_thread(regs, new_psw, new_stackp) do { \ | 156 | #define start_thread(regs, new_psw, new_stackp) do { \ |
155 | regs->psw.mask = PSW_USER_BITS; \ | 157 | set_fs(USER_DS); \ |
158 | regs->psw.mask = psw_user_bits; \ | ||
156 | regs->psw.addr = new_psw; \ | 159 | regs->psw.addr = new_psw; \ |
157 | regs->gprs[15] = new_stackp; \ | 160 | regs->gprs[15] = new_stackp; \ |
158 | } while (0) | 161 | } while (0) |
159 | 162 | ||
160 | #define start_thread31(regs, new_psw, new_stackp) do { \ | 163 | #define start_thread31(regs, new_psw, new_stackp) do { \ |
161 | regs->psw.mask = PSW_USER32_BITS; \ | 164 | set_fs(USER_DS); \ |
165 | regs->psw.mask = psw_user32_bits; \ | ||
162 | regs->psw.addr = new_psw; \ | 166 | regs->psw.addr = new_psw; \ |
163 | regs->gprs[15] = new_stackp; \ | 167 | regs->gprs[15] = new_stackp; \ |
164 | } while (0) | 168 | } while (0) |
@@ -201,9 +205,8 @@ unsigned long get_wchan(struct task_struct *p); | |||
201 | static inline void cpu_relax(void) | 205 | static inline void cpu_relax(void) |
202 | { | 206 | { |
203 | if (MACHINE_HAS_DIAG44) | 207 | if (MACHINE_HAS_DIAG44) |
204 | asm volatile("diag 0,0,68" : : : "memory"); | 208 | asm volatile("diag 0,0,68"); |
205 | else | 209 | barrier(); |
206 | barrier(); | ||
207 | } | 210 | } |
208 | 211 | ||
209 | /* | 212 | /* |
@@ -328,6 +331,18 @@ static inline void disabled_wait(unsigned long code) | |||
328 | } | 331 | } |
329 | 332 | ||
330 | /* | 333 | /* |
334 | * Basic Machine Check/Program Check Handler. | ||
335 | */ | ||
336 | |||
337 | extern void s390_base_mcck_handler(void); | ||
338 | extern void s390_base_pgm_handler(void); | ||
339 | extern void s390_base_ext_handler(void); | ||
340 | |||
341 | extern void (*s390_base_mcck_handler_fn)(void); | ||
342 | extern void (*s390_base_pgm_handler_fn)(void); | ||
343 | extern void (*s390_base_ext_handler_fn)(void); | ||
344 | |||
345 | /* | ||
331 | * CPU idle notifier chain. | 346 | * CPU idle notifier chain. |
332 | */ | 347 | */ |
333 | #define CPU_IDLE 0 | 348 | #define CPU_IDLE 0 |
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index 7b768c5c68a8..fa6ca87080e8 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h | |||
@@ -266,17 +266,12 @@ typedef struct | |||
266 | #define PSW_ASC_SECONDARY 0x0000800000000000UL | 266 | #define PSW_ASC_SECONDARY 0x0000800000000000UL |
267 | #define PSW_ASC_HOME 0x0000C00000000000UL | 267 | #define PSW_ASC_HOME 0x0000C00000000000UL |
268 | 268 | ||
269 | #define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ | 269 | extern long psw_user32_bits; |
270 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ | ||
271 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY) | ||
272 | 270 | ||
273 | #endif /* __s390x__ */ | 271 | #endif /* __s390x__ */ |
274 | 272 | ||
275 | #define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ | 273 | extern long psw_kernel_bits; |
276 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY) | 274 | extern long psw_user_bits; |
277 | #define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ | ||
278 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ | ||
279 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY) | ||
280 | 275 | ||
281 | /* This macro merges a NEW PSW mask specified by the user into | 276 | /* This macro merges a NEW PSW mask specified by the user into |
282 | the currently active PSW mask CURRENT, modifying only those | 277 | the currently active PSW mask CURRENT, modifying only those |
diff --git a/include/asm-s390/reset.h b/include/asm-s390/reset.h index 532e65a2aafc..f584f4a52581 100644 --- a/include/asm-s390/reset.h +++ b/include/asm-s390/reset.h | |||
@@ -18,7 +18,4 @@ struct reset_call { | |||
18 | extern void register_reset_call(struct reset_call *reset); | 18 | extern void register_reset_call(struct reset_call *reset); |
19 | extern void unregister_reset_call(struct reset_call *reset); | 19 | extern void unregister_reset_call(struct reset_call *reset); |
20 | extern void s390_reset_system(void); | 20 | extern void s390_reset_system(void); |
21 | extern void (*s390_reset_mcck_handler)(void); | ||
22 | extern void (*s390_reset_pgm_handler)(void); | ||
23 | |||
24 | #endif /* _ASM_S390_RESET_H */ | 21 | #endif /* _ASM_S390_RESET_H */ |
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h new file mode 100644 index 000000000000..468b97018405 --- /dev/null +++ b/include/asm-s390/sclp.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * include/asm-s390/sclp.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef _ASM_S390_SCLP_H | ||
9 | #define _ASM_S390_SCLP_H | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | |||
13 | struct sccb_header { | ||
14 | u16 length; | ||
15 | u8 function_code; | ||
16 | u8 control_mask[3]; | ||
17 | u16 response_code; | ||
18 | } __attribute__((packed)); | ||
19 | |||
20 | #define LOADPARM_LEN 8 | ||
21 | |||
22 | struct sclp_readinfo_sccb { | ||
23 | struct sccb_header header; /* 0-7 */ | ||
24 | u16 rnmax; /* 8-9 */ | ||
25 | u8 rnsize; /* 10 */ | ||
26 | u8 _reserved0[24 - 11]; /* 11-23 */ | ||
27 | u8 loadparm[LOADPARM_LEN]; /* 24-31 */ | ||
28 | u8 _reserved1[91 - 32]; /* 32-90 */ | ||
29 | u8 flags; /* 91 */ | ||
30 | u8 _reserved2[100 - 92]; /* 92-99 */ | ||
31 | u32 rnsize2; /* 100-103 */ | ||
32 | u64 rnmax2; /* 104-111 */ | ||
33 | u8 _reserved3[4096 - 112]; /* 112-4095 */ | ||
34 | } __attribute__((packed, aligned(4096))); | ||
35 | |||
36 | extern struct sclp_readinfo_sccb s390_readinfo_sccb; | ||
37 | extern void sclp_readinfo_early(void); | ||
38 | |||
39 | #endif /* _ASM_S390_SCLP_H */ | ||
diff --git a/include/asm-s390/sections.h b/include/asm-s390/sections.h index 3a0b8ffeab7a..1c5a2c4ccdad 100644 --- a/include/asm-s390/sections.h +++ b/include/asm-s390/sections.h | |||
@@ -3,4 +3,6 @@ | |||
3 | 3 | ||
4 | #include <asm-generic/sections.h> | 4 | #include <asm-generic/sections.h> |
5 | 5 | ||
6 | extern char _eshared[]; | ||
7 | |||
6 | #endif | 8 | #endif |
diff --git a/include/asm-s390/setup.h b/include/asm-s390/setup.h index 9574fe80a046..3388bb52597c 100644 --- a/include/asm-s390/setup.h +++ b/include/asm-s390/setup.h | |||
@@ -42,6 +42,18 @@ struct mem_chunk { | |||
42 | 42 | ||
43 | extern struct mem_chunk memory_chunk[]; | 43 | extern struct mem_chunk memory_chunk[]; |
44 | 44 | ||
45 | #ifdef CONFIG_S390_SWITCH_AMODE | ||
46 | extern unsigned int switch_amode; | ||
47 | #else | ||
48 | #define switch_amode (0) | ||
49 | #endif | ||
50 | |||
51 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
52 | extern unsigned int s390_noexec; | ||
53 | #else | ||
54 | #define s390_noexec (0) | ||
55 | #endif | ||
56 | |||
45 | /* | 57 | /* |
46 | * Machine features detected in head.S | 58 | * Machine features detected in head.S |
47 | */ | 59 | */ |
@@ -74,6 +86,9 @@ extern unsigned int console_mode; | |||
74 | extern unsigned int console_devno; | 86 | extern unsigned int console_devno; |
75 | extern unsigned int console_irq; | 87 | extern unsigned int console_irq; |
76 | 88 | ||
89 | extern char vmhalt_cmd[]; | ||
90 | extern char vmpoff_cmd[]; | ||
91 | |||
77 | #define CONSOLE_IS_UNDEFINED (console_mode == 0) | 92 | #define CONSOLE_IS_UNDEFINED (console_mode == 0) |
78 | #define CONSOLE_IS_SCLP (console_mode == 1) | 93 | #define CONSOLE_IS_SCLP (console_mode == 1) |
79 | #define CONSOLE_IS_3215 (console_mode == 2) | 94 | #define CONSOLE_IS_3215 (console_mode == 2) |
@@ -141,13 +156,19 @@ struct ipl_parameter_block { | |||
141 | extern u32 ipl_flags; | 156 | extern u32 ipl_flags; |
142 | extern u16 ipl_devno; | 157 | extern u16 ipl_devno; |
143 | 158 | ||
144 | void do_reipl(void); | 159 | extern void do_reipl(void); |
160 | extern void ipl_save_parameters(void); | ||
145 | 161 | ||
146 | enum { | 162 | enum { |
147 | IPL_DEVNO_VALID = 1, | 163 | IPL_DEVNO_VALID = 1, |
148 | IPL_PARMBLOCK_VALID = 2, | 164 | IPL_PARMBLOCK_VALID = 2, |
165 | IPL_NSS_VALID = 4, | ||
149 | }; | 166 | }; |
150 | 167 | ||
168 | #define NSS_NAME_SIZE 8 | ||
169 | |||
170 | extern char kernel_nss_name[]; | ||
171 | |||
151 | #define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ | 172 | #define IPL_PARMBLOCK_START ((struct ipl_parameter_block *) \ |
152 | IPL_PARMBLOCK_ORIGIN) | 173 | IPL_PARMBLOCK_ORIGIN) |
153 | #define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) | 174 | #define IPL_PARMBLOCK_SIZE (IPL_PARMBLOCK_START->hdr.len) |
diff --git a/arch/s390/math-emu/sfp-util.h b/include/asm-s390/sfp-util.h index 5b6ca4570ea4..8cabcd23d976 100644 --- a/arch/s390/math-emu/sfp-util.h +++ b/include/asm-s390/sfp-util.h | |||
@@ -52,12 +52,12 @@ | |||
52 | }) | 52 | }) |
53 | 53 | ||
54 | #define udiv_qrnnd(q, r, n1, n0, d) \ | 54 | #define udiv_qrnnd(q, r, n1, n0, d) \ |
55 | do { unsigned long __r; \ | 55 | do { unsigned int __r; \ |
56 | (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ | 56 | (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ |
57 | (r) = __r; \ | 57 | (r) = __r; \ |
58 | } while (0) | 58 | } while (0) |
59 | extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long, | 59 | extern unsigned long __udiv_qrnnd (unsigned int *, unsigned int, |
60 | unsigned long , unsigned long); | 60 | unsigned int , unsigned int); |
61 | 61 | ||
62 | #define UDIV_NEEDS_NORMALIZATION 0 | 62 | #define UDIV_NEEDS_NORMALIZATION 0 |
63 | 63 | ||
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h index 7097c96ed026..b957e4cda464 100644 --- a/include/asm-s390/smp.h +++ b/include/asm-s390/smp.h | |||
@@ -31,6 +31,10 @@ typedef struct | |||
31 | __u16 cpu; | 31 | __u16 cpu; |
32 | } sigp_info; | 32 | } sigp_info; |
33 | 33 | ||
34 | extern void machine_restart_smp(char *); | ||
35 | extern void machine_halt_smp(void); | ||
36 | extern void machine_power_off_smp(void); | ||
37 | |||
34 | extern void smp_setup_cpu_possible_map(void); | 38 | extern void smp_setup_cpu_possible_map(void); |
35 | extern int smp_call_function_on(void (*func) (void *info), void *info, | 39 | extern int smp_call_function_on(void (*func) (void *info), void *info, |
36 | int nonatomic, int wait, int cpu); | 40 | int nonatomic, int wait, int cpu); |
@@ -106,7 +110,7 @@ smp_call_function_on(void (*func) (void *info), void *info, | |||
106 | static inline void smp_send_stop(void) | 110 | static inline void smp_send_stop(void) |
107 | { | 111 | { |
108 | /* Disable all interrupts/machine checks */ | 112 | /* Disable all interrupts/machine checks */ |
109 | __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); | 113 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
110 | } | 114 | } |
111 | 115 | ||
112 | #define smp_cpu_not_running(cpu) 1 | 116 | #define smp_cpu_not_running(cpu) 1 |
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h index bd0b05ae87d2..bbe137c3ed69 100644 --- a/include/asm-s390/system.h +++ b/include/asm-s390/system.h | |||
@@ -373,8 +373,8 @@ __set_psw_mask(unsigned long mask) | |||
373 | __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); | 373 | __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); |
374 | } | 374 | } |
375 | 375 | ||
376 | #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS) | 376 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
377 | #define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK) | 377 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) |
378 | 378 | ||
379 | #ifdef CONFIG_SMP | 379 | #ifdef CONFIG_SMP |
380 | 380 | ||
diff --git a/include/asm-s390/tape390.h b/include/asm-s390/tape390.h index f1d66ba0deef..884fba48f1ff 100644 --- a/include/asm-s390/tape390.h +++ b/include/asm-s390/tape390.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /************************************************************************* | 1 | /************************************************************************* |
2 | * | 2 | * |
3 | * tape390.h | 3 | * tape390.h |
4 | * enables user programs to display messages on the tape device | 4 | * enables user programs to display messages and control encryption |
5 | * on s390 tape devices | ||
5 | * | 6 | * |
6 | * S390 and zSeries version | 7 | * Copyright IBM Corp. 2001,2006 |
7 | * Copyright (C) 2001 IBM Corporation | 8 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> |
8 | * Author(s): Despina Papadopoulou <despina_p@de.ibm.com> | ||
9 | * | 9 | * |
10 | *************************************************************************/ | 10 | *************************************************************************/ |
11 | 11 | ||
@@ -36,4 +36,68 @@ typedef struct display_struct { | |||
36 | char message2[8]; | 36 | char message2[8]; |
37 | } display_struct; | 37 | } display_struct; |
38 | 38 | ||
39 | /* | ||
40 | * Tape encryption support | ||
41 | */ | ||
42 | |||
43 | struct tape390_crypt_info { | ||
44 | char capability; | ||
45 | char status; | ||
46 | char medium_status; | ||
47 | } __attribute__ ((packed)); | ||
48 | |||
49 | |||
50 | /* Macros for "capable" field */ | ||
51 | #define TAPE390_CRYPT_SUPPORTED_MASK 0x01 | ||
52 | #define TAPE390_CRYPT_SUPPORTED(x) \ | ||
53 | ((x.capability & TAPE390_CRYPT_SUPPORTED_MASK)) | ||
54 | |||
55 | /* Macros for "status" field */ | ||
56 | #define TAPE390_CRYPT_ON_MASK 0x01 | ||
57 | #define TAPE390_CRYPT_ON(x) (((x.status) & TAPE390_CRYPT_ON_MASK)) | ||
58 | |||
59 | /* Macros for "medium status" field */ | ||
60 | #define TAPE390_MEDIUM_LOADED_MASK 0x01 | ||
61 | #define TAPE390_MEDIUM_ENCRYPTED_MASK 0x02 | ||
62 | #define TAPE390_MEDIUM_ENCRYPTED(x) \ | ||
63 | (((x.medium_status) & TAPE390_MEDIUM_ENCRYPTED_MASK)) | ||
64 | #define TAPE390_MEDIUM_LOADED(x) \ | ||
65 | (((x.medium_status) & TAPE390_MEDIUM_LOADED_MASK)) | ||
66 | |||
67 | /* | ||
68 | * The TAPE390_CRYPT_SET ioctl is used to switch on/off encryption. | ||
69 | * The "encryption_capable" and "tape_status" fields are ignored for this ioctl! | ||
70 | */ | ||
71 | #define TAPE390_CRYPT_SET _IOW('d', 2, struct tape390_crypt_info) | ||
72 | |||
73 | /* | ||
74 | * The TAPE390_CRYPT_QUERY ioctl is used to query the encryption state. | ||
75 | */ | ||
76 | #define TAPE390_CRYPT_QUERY _IOR('d', 3, struct tape390_crypt_info) | ||
77 | |||
78 | /* Values for "kekl1/2_type" and "kekl1/2_type_on_tape" fields */ | ||
79 | #define TAPE390_KEKL_TYPE_NONE 0 | ||
80 | #define TAPE390_KEKL_TYPE_LABEL 1 | ||
81 | #define TAPE390_KEKL_TYPE_HASH 2 | ||
82 | |||
83 | struct tape390_kekl { | ||
84 | unsigned char type; | ||
85 | unsigned char type_on_tape; | ||
86 | char label[65]; | ||
87 | } __attribute__ ((packed)); | ||
88 | |||
89 | struct tape390_kekl_pair { | ||
90 | struct tape390_kekl kekl[2]; | ||
91 | } __attribute__ ((packed)); | ||
92 | |||
93 | /* | ||
94 | * The TAPE390_KEKL_SET ioctl is used to set Key Encrypting Key labels. | ||
95 | */ | ||
96 | #define TAPE390_KEKL_SET _IOW('d', 4, struct tape390_kekl_pair) | ||
97 | |||
98 | /* | ||
99 | * The TAPE390_KEKL_QUERY ioctl is used to query Key Encrypting Key labels. | ||
100 | */ | ||
101 | #define TAPE390_KEKL_QUERY _IOR('d', 5, struct tape390_kekl_pair) | ||
102 | |||
39 | #endif | 103 | #endif |
diff --git a/include/asm-s390/timer.h b/include/asm-s390/timer.h index 30e5cbe570f2..adb34860a543 100644 --- a/include/asm-s390/timer.h +++ b/include/asm-s390/timer.h | |||
@@ -45,6 +45,9 @@ extern void add_virt_timer_periodic(void *new); | |||
45 | extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); | 45 | extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); |
46 | extern int del_virt_timer(struct vtimer_list *timer); | 46 | extern int del_virt_timer(struct vtimer_list *timer); |
47 | 47 | ||
48 | extern void init_cpu_vtimer(void); | ||
49 | extern void vtime_init(void); | ||
50 | |||
48 | #endif /* __KERNEL__ */ | 51 | #endif /* __KERNEL__ */ |
49 | 52 | ||
50 | #endif /* _ASM_S390_TIMER_H */ | 53 | #endif /* _ASM_S390_TIMER_H */ |
diff --git a/include/asm-s390/timex.h b/include/asm-s390/timex.h index 4df4a41029a3..98229db24314 100644 --- a/include/asm-s390/timex.h +++ b/include/asm-s390/timex.h | |||
@@ -11,6 +11,41 @@ | |||
11 | #ifndef _ASM_S390_TIMEX_H | 11 | #ifndef _ASM_S390_TIMEX_H |
12 | #define _ASM_S390_TIMEX_H | 12 | #define _ASM_S390_TIMEX_H |
13 | 13 | ||
14 | /* Inline functions for clock register access. */ | ||
15 | static inline int set_clock(__u64 time) | ||
16 | { | ||
17 | int cc; | ||
18 | |||
19 | asm volatile( | ||
20 | " sck 0(%2)\n" | ||
21 | " ipm %0\n" | ||
22 | " srl %0,28\n" | ||
23 | : "=d" (cc) : "m" (time), "a" (&time) : "cc"); | ||
24 | return cc; | ||
25 | } | ||
26 | |||
27 | static inline int store_clock(__u64 *time) | ||
28 | { | ||
29 | int cc; | ||
30 | |||
31 | asm volatile( | ||
32 | " stck 0(%2)\n" | ||
33 | " ipm %0\n" | ||
34 | " srl %0,28\n" | ||
35 | : "=d" (cc), "=m" (*time) : "a" (time) : "cc"); | ||
36 | return cc; | ||
37 | } | ||
38 | |||
39 | static inline void set_clock_comparator(__u64 time) | ||
40 | { | ||
41 | asm volatile("sckc 0(%1)" : : "m" (time), "a" (&time)); | ||
42 | } | ||
43 | |||
44 | static inline void store_clock_comparator(__u64 *time) | ||
45 | { | ||
46 | asm volatile("stckc 0(%1)" : "=m" (*time) : "a" (time)); | ||
47 | } | ||
48 | |||
14 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ | 49 | #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ |
15 | 50 | ||
16 | typedef unsigned long long cycles_t; | 51 | typedef unsigned long long cycles_t; |
@@ -27,9 +62,24 @@ static inline unsigned long long get_clock (void) | |||
27 | return clk; | 62 | return clk; |
28 | } | 63 | } |
29 | 64 | ||
65 | static inline void get_clock_extended(void *dest) | ||
66 | { | ||
67 | typedef struct { unsigned long long clk[2]; } __clock_t; | ||
68 | |||
69 | #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) | ||
70 | asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc"); | ||
71 | #else /* __GNUC__ */ | ||
72 | asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest)) | ||
73 | : "a" ((__clock_t *)dest) : "cc"); | ||
74 | #endif /* __GNUC__ */ | ||
75 | } | ||
76 | |||
30 | static inline cycles_t get_cycles(void) | 77 | static inline cycles_t get_cycles(void) |
31 | { | 78 | { |
32 | return (cycles_t) get_clock() >> 2; | 79 | return (cycles_t) get_clock() >> 2; |
33 | } | 80 | } |
34 | 81 | ||
82 | int get_sync_clock(unsigned long long *clock); | ||
83 | void init_cpu_timer(void); | ||
84 | |||
35 | #endif | 85 | #endif |
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h index fa4dc916a9bf..66793f55c8b2 100644 --- a/include/asm-s390/tlbflush.h +++ b/include/asm-s390/tlbflush.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | #include <asm/pgalloc.h> | ||
6 | 7 | ||
7 | /* | 8 | /* |
8 | * TLB flushing: | 9 | * TLB flushing: |
@@ -102,6 +103,14 @@ static inline void __flush_tlb_mm(struct mm_struct * mm) | |||
102 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) | 103 | if (unlikely(cpus_empty(mm->cpu_vm_mask))) |
103 | return; | 104 | return; |
104 | if (MACHINE_HAS_IDTE) { | 105 | if (MACHINE_HAS_IDTE) { |
106 | pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); | ||
107 | |||
108 | if (shadow_pgd) { | ||
109 | asm volatile( | ||
110 | " .insn rrf,0xb98e0000,0,%0,%1,0" | ||
111 | : : "a" (2048), | ||
112 | "a" (__pa(shadow_pgd) & PAGE_MASK) : "cc" ); | ||
113 | } | ||
105 | asm volatile( | 114 | asm volatile( |
106 | " .insn rrf,0xb98e0000,0,%0,%1,0" | 115 | " .insn rrf,0xb98e0000,0,%0,%1,0" |
107 | : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); | 116 | : : "a" (2048), "a" (__pa(mm->pgd)&PAGE_MASK) : "cc"); |
diff --git a/include/asm-s390/uaccess.h b/include/asm-s390/uaccess.h index 73ac4e82217b..0235970278f0 100644 --- a/include/asm-s390/uaccess.h +++ b/include/asm-s390/uaccess.h | |||
@@ -90,6 +90,8 @@ struct uaccess_ops { | |||
90 | extern struct uaccess_ops uaccess; | 90 | extern struct uaccess_ops uaccess; |
91 | extern struct uaccess_ops uaccess_std; | 91 | extern struct uaccess_ops uaccess_std; |
92 | extern struct uaccess_ops uaccess_mvcos; | 92 | extern struct uaccess_ops uaccess_mvcos; |
93 | extern struct uaccess_ops uaccess_mvcos_switch; | ||
94 | extern struct uaccess_ops uaccess_pt; | ||
93 | 95 | ||
94 | static inline int __put_user_fn(size_t size, void __user *ptr, void *x) | 96 | static inline int __put_user_fn(size_t size, void __user *ptr, void *x) |
95 | { | 97 | { |