diff options
Diffstat (limited to 'arch')
68 files changed, 3448 insertions, 1362 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 12272361c018..eaed402ad346 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -34,10 +34,6 @@ config GENERIC_HWEIGHT | |||
34 | bool | 34 | bool |
35 | default y | 35 | default y |
36 | 36 | ||
37 | config GENERIC_CALIBRATE_DELAY | ||
38 | bool | ||
39 | default y | ||
40 | |||
41 | config GENERIC_TIME | 37 | config GENERIC_TIME |
42 | def_bool y | 38 | def_bool y |
43 | 39 | ||
@@ -134,6 +130,31 @@ config AUDIT_ARCH | |||
134 | bool | 130 | bool |
135 | default y | 131 | default y |
136 | 132 | ||
133 | config S390_SWITCH_AMODE | ||
134 | bool "Switch kernel/user addressing modes" | ||
135 | help | ||
136 | This option allows to switch the addressing modes of kernel and user | ||
137 | space. The kernel parameter switch_amode=on will enable this feature, | ||
138 | default is disabled. Enabling this (via kernel parameter) on machines | ||
139 | earlier than IBM System z9-109 EC/BC will reduce system performance. | ||
140 | |||
141 | Note that this option will also be selected by selecting the execute | ||
142 | protection option below. Enabling the execute protection via the | ||
143 | noexec kernel parameter will also switch the addressing modes, | ||
144 | independent of the switch_amode kernel parameter. | ||
145 | |||
146 | |||
147 | config S390_EXEC_PROTECT | ||
148 | bool "Data execute protection" | ||
149 | select S390_SWITCH_AMODE | ||
150 | help | ||
151 | This option allows to enable a buffer overflow protection for user | ||
152 | space programs and it also selects the addressing mode option above. | ||
153 | The kernel parameter noexec=on will enable this feature and also | ||
154 | switch the addressing modes, default is disabled. Enabling this (via | ||
155 | kernel parameter) on machines earlier than IBM System z9-109 EC/BC | ||
156 | will reduce system performance. | ||
157 | |||
137 | comment "Code generation options" | 158 | comment "Code generation options" |
138 | 159 | ||
139 | choice | 160 | choice |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index b8c237290263..c9da7d16145e 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -81,7 +81,7 @@ static struct ctl_table appldata_dir_table[] = { | |||
81 | /* | 81 | /* |
82 | * Timer | 82 | * Timer |
83 | */ | 83 | */ |
84 | DEFINE_PER_CPU(struct vtimer_list, appldata_timer); | 84 | static DEFINE_PER_CPU(struct vtimer_list, appldata_timer); |
85 | static atomic_t appldata_expire_count = ATOMIC_INIT(0); | 85 | static atomic_t appldata_expire_count = ATOMIC_INIT(0); |
86 | 86 | ||
87 | static DEFINE_SPINLOCK(appldata_timer_lock); | 87 | static DEFINE_SPINLOCK(appldata_timer_lock); |
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index 8aea3698a77b..4ca615788702 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
@@ -36,7 +36,7 @@ | |||
36 | * book: | 36 | * book: |
37 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | 37 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml |
38 | */ | 38 | */ |
39 | struct appldata_mem_data { | 39 | static struct appldata_mem_data { |
40 | u64 timestamp; | 40 | u64 timestamp; |
41 | u32 sync_count_1; /* after VM collected the record data, */ | 41 | u32 sync_count_1; /* after VM collected the record data, */ |
42 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | 42 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the |
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 075e619bf37d..f64b8c867ae2 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c | |||
@@ -34,7 +34,7 @@ | |||
34 | * book: | 34 | * book: |
35 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml | 35 | * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml |
36 | */ | 36 | */ |
37 | struct appldata_net_sum_data { | 37 | static struct appldata_net_sum_data { |
38 | u64 timestamp; | 38 | u64 timestamp; |
39 | u32 sync_count_1; /* after VM collected the record data, */ | 39 | u32 sync_count_1; /* after VM collected the record data, */ |
40 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the | 40 | u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the |
diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig new file mode 100644 index 000000000000..99ff9f08e4d7 --- /dev/null +++ b/arch/s390/crypto/Kconfig | |||
@@ -0,0 +1,60 @@ | |||
1 | config CRYPTO_SHA1_S390 | ||
2 | tristate "SHA1 digest algorithm" | ||
3 | depends on S390 | ||
4 | select CRYPTO_ALGAPI | ||
5 | help | ||
6 | This is the s390 hardware accelerated implementation of the | ||
7 | SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2). | ||
8 | |||
9 | config CRYPTO_SHA256_S390 | ||
10 | tristate "SHA256 digest algorithm" | ||
11 | depends on S390 | ||
12 | select CRYPTO_ALGAPI | ||
13 | help | ||
14 | This is the s390 hardware accelerated implementation of the | ||
15 | SHA256 secure hash standard (DFIPS 180-2). | ||
16 | |||
17 | This version of SHA implements a 256 bit hash with 128 bits of | ||
18 | security against collision attacks. | ||
19 | |||
20 | config CRYPTO_DES_S390 | ||
21 | tristate "DES and Triple DES cipher algorithms" | ||
22 | depends on S390 | ||
23 | select CRYPTO_ALGAPI | ||
24 | select CRYPTO_BLKCIPHER | ||
25 | help | ||
26 | This us the s390 hardware accelerated implementation of the | ||
27 | DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3). | ||
28 | |||
29 | config CRYPTO_AES_S390 | ||
30 | tristate "AES cipher algorithms" | ||
31 | depends on S390 | ||
32 | select CRYPTO_ALGAPI | ||
33 | select CRYPTO_BLKCIPHER | ||
34 | help | ||
35 | This is the s390 hardware accelerated implementation of the | ||
36 | AES cipher algorithms (FIPS-197). AES uses the Rijndael | ||
37 | algorithm. | ||
38 | |||
39 | Rijndael appears to be consistently a very good performer in | ||
40 | both hardware and software across a wide range of computing | ||
41 | environments regardless of its use in feedback or non-feedback | ||
42 | modes. Its key setup time is excellent, and its key agility is | ||
43 | good. Rijndael's very low memory requirements make it very well | ||
44 | suited for restricted-space environments, in which it also | ||
45 | demonstrates excellent performance. Rijndael's operations are | ||
46 | among the easiest to defend against power and timing attacks. | ||
47 | |||
48 | On s390 the System z9-109 currently only supports the key size | ||
49 | of 128 bit. | ||
50 | |||
51 | config S390_PRNG | ||
52 | tristate "Pseudo random number generator device driver" | ||
53 | depends on S390 | ||
54 | default "m" | ||
55 | help | ||
56 | Select this option if you want to use the s390 pseudo random number | ||
57 | generator. The PRNG is part of the cryptograhic processor functions | ||
58 | and uses triple-DES to generate secure random numbers like the | ||
59 | ANSI X9.17 standard. The PRNG is usable via the char device | ||
60 | /dev/prandom. | ||
diff --git a/arch/s390/crypto/Makefile b/arch/s390/crypto/Makefile index bfe2541dc5cf..14e552c5cc43 100644 --- a/arch/s390/crypto/Makefile +++ b/arch/s390/crypto/Makefile | |||
@@ -6,5 +6,4 @@ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o | |||
6 | obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o | 6 | obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o |
7 | obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o | 7 | obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o des_check_key.o |
8 | obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o | 8 | obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o |
9 | 9 | obj-$(CONFIG_S390_PRNG) += prng.o | |
10 | obj-$(CONFIG_CRYPTO_TEST) += crypt_s390_query.o | ||
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 15c9eec02928..91636353f6f0 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * s390 implementation of the AES Cipher Algorithm. | 4 | * s390 implementation of the AES Cipher Algorithm. |
5 | * | 5 | * |
6 | * s390 Version: | 6 | * s390 Version: |
7 | * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2005,2007 |
8 | * Author(s): Jan Glauber (jang@de.ibm.com) | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
9 | * | 9 | * |
10 | * Derived from "crypto/aes.c" | 10 | * Derived from "crypto/aes.c" |
@@ -27,9 +27,11 @@ | |||
27 | /* data block size for all key lengths */ | 27 | /* data block size for all key lengths */ |
28 | #define AES_BLOCK_SIZE 16 | 28 | #define AES_BLOCK_SIZE 16 |
29 | 29 | ||
30 | int has_aes_128 = 0; | 30 | #define AES_KEYLEN_128 1 |
31 | int has_aes_192 = 0; | 31 | #define AES_KEYLEN_192 2 |
32 | int has_aes_256 = 0; | 32 | #define AES_KEYLEN_256 4 |
33 | |||
34 | static char keylen_flag = 0; | ||
33 | 35 | ||
34 | struct s390_aes_ctx { | 36 | struct s390_aes_ctx { |
35 | u8 iv[AES_BLOCK_SIZE]; | 37 | u8 iv[AES_BLOCK_SIZE]; |
@@ -47,20 +49,19 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, | |||
47 | 49 | ||
48 | switch (key_len) { | 50 | switch (key_len) { |
49 | case 16: | 51 | case 16: |
50 | if (!has_aes_128) | 52 | if (!(keylen_flag & AES_KEYLEN_128)) |
51 | goto fail; | 53 | goto fail; |
52 | break; | 54 | break; |
53 | case 24: | 55 | case 24: |
54 | if (!has_aes_192) | 56 | if (!(keylen_flag & AES_KEYLEN_192)) |
55 | goto fail; | 57 | goto fail; |
56 | 58 | ||
57 | break; | 59 | break; |
58 | case 32: | 60 | case 32: |
59 | if (!has_aes_256) | 61 | if (!(keylen_flag & AES_KEYLEN_256)) |
60 | goto fail; | 62 | goto fail; |
61 | break; | 63 | break; |
62 | default: | 64 | default: |
63 | /* invalid key length */ | ||
64 | goto fail; | 65 | goto fail; |
65 | break; | 66 | break; |
66 | } | 67 | } |
@@ -322,34 +323,32 @@ static int __init aes_init(void) | |||
322 | int ret; | 323 | int ret; |
323 | 324 | ||
324 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) | 325 | if (crypt_s390_func_available(KM_AES_128_ENCRYPT)) |
325 | has_aes_128 = 1; | 326 | keylen_flag |= AES_KEYLEN_128; |
326 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) | 327 | if (crypt_s390_func_available(KM_AES_192_ENCRYPT)) |
327 | has_aes_192 = 1; | 328 | keylen_flag |= AES_KEYLEN_192; |
328 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) | 329 | if (crypt_s390_func_available(KM_AES_256_ENCRYPT)) |
329 | has_aes_256 = 1; | 330 | keylen_flag |= AES_KEYLEN_256; |
331 | |||
332 | if (!keylen_flag) | ||
333 | return -EOPNOTSUPP; | ||
330 | 334 | ||
331 | if (!has_aes_128 && !has_aes_192 && !has_aes_256) | 335 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
332 | return -ENOSYS; | 336 | if (keylen_flag == AES_KEYLEN_128) |
337 | printk(KERN_INFO | ||
338 | "aes_s390: hardware acceleration only available for" | ||
339 | "128 bit keys\n"); | ||
333 | 340 | ||
334 | ret = crypto_register_alg(&aes_alg); | 341 | ret = crypto_register_alg(&aes_alg); |
335 | if (ret != 0) { | 342 | if (ret) |
336 | printk(KERN_INFO "crypt_s390: aes-s390 couldn't be loaded.\n"); | ||
337 | goto aes_err; | 343 | goto aes_err; |
338 | } | ||
339 | 344 | ||
340 | ret = crypto_register_alg(&ecb_aes_alg); | 345 | ret = crypto_register_alg(&ecb_aes_alg); |
341 | if (ret != 0) { | 346 | if (ret) |
342 | printk(KERN_INFO | ||
343 | "crypt_s390: ecb-aes-s390 couldn't be loaded.\n"); | ||
344 | goto ecb_aes_err; | 347 | goto ecb_aes_err; |
345 | } | ||
346 | 348 | ||
347 | ret = crypto_register_alg(&cbc_aes_alg); | 349 | ret = crypto_register_alg(&cbc_aes_alg); |
348 | if (ret != 0) { | 350 | if (ret) |
349 | printk(KERN_INFO | ||
350 | "crypt_s390: cbc-aes-s390 couldn't be loaded.\n"); | ||
351 | goto cbc_aes_err; | 351 | goto cbc_aes_err; |
352 | } | ||
353 | 352 | ||
354 | out: | 353 | out: |
355 | return ret; | 354 | return ret; |
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index 2b137089f625..2775d2618332 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h | |||
@@ -3,8 +3,9 @@ | |||
3 | * | 3 | * |
4 | * Support for s390 cryptographic instructions. | 4 | * Support for s390 cryptographic instructions. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2003,2007 |
7 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 7 | * Author(s): Thomas Spatzier |
8 | * Jan Glauber (jan.glauber@de.ibm.com) | ||
8 | * | 9 | * |
9 | * This program is free software; you can redistribute it and/or modify it | 10 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of the GNU General Public License as published by the Free | 11 | * under the terms of the GNU General Public License as published by the Free |
@@ -32,7 +33,8 @@ enum crypt_s390_operations { | |||
32 | CRYPT_S390_KMAC = 0x0500 | 33 | CRYPT_S390_KMAC = 0x0500 |
33 | }; | 34 | }; |
34 | 35 | ||
35 | /* function codes for KM (CIPHER MESSAGE) instruction | 36 | /* |
37 | * function codes for KM (CIPHER MESSAGE) instruction | ||
36 | * 0x80 is the decipher modifier bit | 38 | * 0x80 is the decipher modifier bit |
37 | */ | 39 | */ |
38 | enum crypt_s390_km_func { | 40 | enum crypt_s390_km_func { |
@@ -51,7 +53,8 @@ enum crypt_s390_km_func { | |||
51 | KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, | 53 | KM_AES_256_DECRYPT = CRYPT_S390_KM | 0x14 | 0x80, |
52 | }; | 54 | }; |
53 | 55 | ||
54 | /* function codes for KMC (CIPHER MESSAGE WITH CHAINING) | 56 | /* |
57 | * function codes for KMC (CIPHER MESSAGE WITH CHAINING) | ||
55 | * instruction | 58 | * instruction |
56 | */ | 59 | */ |
57 | enum crypt_s390_kmc_func { | 60 | enum crypt_s390_kmc_func { |
@@ -68,9 +71,11 @@ enum crypt_s390_kmc_func { | |||
68 | KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, | 71 | KMC_AES_192_DECRYPT = CRYPT_S390_KMC | 0x13 | 0x80, |
69 | KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, | 72 | KMC_AES_256_ENCRYPT = CRYPT_S390_KMC | 0x14, |
70 | KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, | 73 | KMC_AES_256_DECRYPT = CRYPT_S390_KMC | 0x14 | 0x80, |
74 | KMC_PRNG = CRYPT_S390_KMC | 0x43, | ||
71 | }; | 75 | }; |
72 | 76 | ||
73 | /* function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) | 77 | /* |
78 | * function codes for KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) | ||
74 | * instruction | 79 | * instruction |
75 | */ | 80 | */ |
76 | enum crypt_s390_kimd_func { | 81 | enum crypt_s390_kimd_func { |
@@ -79,7 +84,8 @@ enum crypt_s390_kimd_func { | |||
79 | KIMD_SHA_256 = CRYPT_S390_KIMD | 2, | 84 | KIMD_SHA_256 = CRYPT_S390_KIMD | 2, |
80 | }; | 85 | }; |
81 | 86 | ||
82 | /* function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) | 87 | /* |
88 | * function codes for KLMD (COMPUTE LAST MESSAGE DIGEST) | ||
83 | * instruction | 89 | * instruction |
84 | */ | 90 | */ |
85 | enum crypt_s390_klmd_func { | 91 | enum crypt_s390_klmd_func { |
@@ -88,7 +94,8 @@ enum crypt_s390_klmd_func { | |||
88 | KLMD_SHA_256 = CRYPT_S390_KLMD | 2, | 94 | KLMD_SHA_256 = CRYPT_S390_KLMD | 2, |
89 | }; | 95 | }; |
90 | 96 | ||
91 | /* function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) | 97 | /* |
98 | * function codes for KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) | ||
92 | * instruction | 99 | * instruction |
93 | */ | 100 | */ |
94 | enum crypt_s390_kmac_func { | 101 | enum crypt_s390_kmac_func { |
@@ -98,229 +105,219 @@ enum crypt_s390_kmac_func { | |||
98 | KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 | 105 | KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 |
99 | }; | 106 | }; |
100 | 107 | ||
101 | /* status word for s390 crypto instructions' QUERY functions */ | 108 | /** |
102 | struct crypt_s390_query_status { | 109 | * crypt_s390_km: |
103 | u64 high; | 110 | * @func: the function code passed to KM; see crypt_s390_km_func |
104 | u64 low; | 111 | * @param: address of parameter block; see POP for details on each func |
105 | }; | 112 | * @dest: address of destination memory area |
106 | 113 | * @src: address of source memory area | |
107 | /* | 114 | * @src_len: length of src operand in bytes |
115 | * | ||
108 | * Executes the KM (CIPHER MESSAGE) operation of the CPU. | 116 | * Executes the KM (CIPHER MESSAGE) operation of the CPU. |
109 | * @param func: the function code passed to KM; see crypt_s390_km_func | 117 | * |
110 | * @param param: address of parameter block; see POP for details on each func | 118 | * Returns -1 for failure, 0 for the query func, number of processed |
111 | * @param dest: address of destination memory area | 119 | * bytes for encryption/decryption funcs |
112 | * @param src: address of source memory area | ||
113 | * @param src_len: length of src operand in bytes | ||
114 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
115 | * for encryption/decryption funcs | ||
116 | */ | 120 | */ |
117 | static inline int | 121 | static inline int crypt_s390_km(long func, void *param, |
118 | crypt_s390_km(long func, void* param, u8* dest, const u8* src, long src_len) | 122 | u8 *dest, const u8 *src, long src_len) |
119 | { | 123 | { |
120 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 124 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
121 | register void* __param asm("1") = param; | 125 | register void *__param asm("1") = param; |
122 | register const u8* __src asm("2") = src; | 126 | register const u8 *__src asm("2") = src; |
123 | register long __src_len asm("3") = src_len; | 127 | register long __src_len asm("3") = src_len; |
124 | register u8* __dest asm("4") = dest; | 128 | register u8 *__dest asm("4") = dest; |
125 | int ret; | 129 | int ret; |
126 | 130 | ||
127 | asm volatile( | 131 | asm volatile( |
128 | "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ | 132 | "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ |
129 | "1: brc 1,0b \n" /* handle partial completion */ | 133 | "1: brc 1,0b \n" /* handle partial completion */ |
130 | " ahi %0,%h7\n" | 134 | " la %0,0\n" |
131 | "2: ahi %0,%h8\n" | 135 | "2:\n" |
132 | "3:\n" | 136 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
133 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
134 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) | 137 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) |
135 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 138 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
136 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
137 | if (ret < 0) | 139 | if (ret < 0) |
138 | return ret; | 140 | return ret; |
139 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 141 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
140 | } | 142 | } |
141 | 143 | ||
142 | /* | 144 | /** |
145 | * crypt_s390_kmc: | ||
146 | * @func: the function code passed to KM; see crypt_s390_kmc_func | ||
147 | * @param: address of parameter block; see POP for details on each func | ||
148 | * @dest: address of destination memory area | ||
149 | * @src: address of source memory area | ||
150 | * @src_len: length of src operand in bytes | ||
151 | * | ||
143 | * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. | 152 | * Executes the KMC (CIPHER MESSAGE WITH CHAINING) operation of the CPU. |
144 | * @param func: the function code passed to KM; see crypt_s390_kmc_func | 153 | * |
145 | * @param param: address of parameter block; see POP for details on each func | 154 | * Returns -1 for failure, 0 for the query func, number of processed |
146 | * @param dest: address of destination memory area | 155 | * bytes for encryption/decryption funcs |
147 | * @param src: address of source memory area | ||
148 | * @param src_len: length of src operand in bytes | ||
149 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
150 | * for encryption/decryption funcs | ||
151 | */ | 156 | */ |
152 | static inline int | 157 | static inline int crypt_s390_kmc(long func, void *param, |
153 | crypt_s390_kmc(long func, void* param, u8* dest, const u8* src, long src_len) | 158 | u8 *dest, const u8 *src, long src_len) |
154 | { | 159 | { |
155 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 160 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
156 | register void* __param asm("1") = param; | 161 | register void *__param asm("1") = param; |
157 | register const u8* __src asm("2") = src; | 162 | register const u8 *__src asm("2") = src; |
158 | register long __src_len asm("3") = src_len; | 163 | register long __src_len asm("3") = src_len; |
159 | register u8* __dest asm("4") = dest; | 164 | register u8 *__dest asm("4") = dest; |
160 | int ret; | 165 | int ret; |
161 | 166 | ||
162 | asm volatile( | 167 | asm volatile( |
163 | "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ | 168 | "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ |
164 | "1: brc 1,0b \n" /* handle partial completion */ | 169 | "1: brc 1,0b \n" /* handle partial completion */ |
165 | " ahi %0,%h7\n" | 170 | " la %0,0\n" |
166 | "2: ahi %0,%h8\n" | 171 | "2:\n" |
167 | "3:\n" | 172 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
168 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
169 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) | 173 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) |
170 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 174 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
171 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
172 | if (ret < 0) | 175 | if (ret < 0) |
173 | return ret; | 176 | return ret; |
174 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 177 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
175 | } | 178 | } |
176 | 179 | ||
177 | /* | 180 | /** |
181 | * crypt_s390_kimd: | ||
182 | * @func: the function code passed to KM; see crypt_s390_kimd_func | ||
183 | * @param: address of parameter block; see POP for details on each func | ||
184 | * @src: address of source memory area | ||
185 | * @src_len: length of src operand in bytes | ||
186 | * | ||
178 | * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation | 187 | * Executes the KIMD (COMPUTE INTERMEDIATE MESSAGE DIGEST) operation |
179 | * of the CPU. | 188 | * of the CPU. |
180 | * @param func: the function code passed to KM; see crypt_s390_kimd_func | 189 | * |
181 | * @param param: address of parameter block; see POP for details on each func | 190 | * Returns -1 for failure, 0 for the query func, number of processed |
182 | * @param src: address of source memory area | 191 | * bytes for digest funcs |
183 | * @param src_len: length of src operand in bytes | ||
184 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
185 | * for digest funcs | ||
186 | */ | 192 | */ |
187 | static inline int | 193 | static inline int crypt_s390_kimd(long func, void *param, |
188 | crypt_s390_kimd(long func, void* param, const u8* src, long src_len) | 194 | const u8 *src, long src_len) |
189 | { | 195 | { |
190 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 196 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
191 | register void* __param asm("1") = param; | 197 | register void *__param asm("1") = param; |
192 | register const u8* __src asm("2") = src; | 198 | register const u8 *__src asm("2") = src; |
193 | register long __src_len asm("3") = src_len; | 199 | register long __src_len asm("3") = src_len; |
194 | int ret; | 200 | int ret; |
195 | 201 | ||
196 | asm volatile( | 202 | asm volatile( |
197 | "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ | 203 | "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ |
198 | "1: brc 1,0b \n" /* handle partial completion */ | 204 | "1: brc 1,0b \n" /* handle partial completion */ |
199 | " ahi %0,%h6\n" | 205 | " la %0,0\n" |
200 | "2: ahi %0,%h7\n" | 206 | "2:\n" |
201 | "3:\n" | 207 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
202 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
203 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 208 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
204 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 209 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
205 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
206 | if (ret < 0) | 210 | if (ret < 0) |
207 | return ret; | 211 | return ret; |
208 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 212 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
209 | } | 213 | } |
210 | 214 | ||
211 | /* | 215 | /** |
216 | * crypt_s390_klmd: | ||
217 | * @func: the function code passed to KM; see crypt_s390_klmd_func | ||
218 | * @param: address of parameter block; see POP for details on each func | ||
219 | * @src: address of source memory area | ||
220 | * @src_len: length of src operand in bytes | ||
221 | * | ||
212 | * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. | 222 | * Executes the KLMD (COMPUTE LAST MESSAGE DIGEST) operation of the CPU. |
213 | * @param func: the function code passed to KM; see crypt_s390_klmd_func | 223 | * |
214 | * @param param: address of parameter block; see POP for details on each func | 224 | * Returns -1 for failure, 0 for the query func, number of processed |
215 | * @param src: address of source memory area | 225 | * bytes for digest funcs |
216 | * @param src_len: length of src operand in bytes | ||
217 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
218 | * for digest funcs | ||
219 | */ | 226 | */ |
220 | static inline int | 227 | static inline int crypt_s390_klmd(long func, void *param, |
221 | crypt_s390_klmd(long func, void* param, const u8* src, long src_len) | 228 | const u8 *src, long src_len) |
222 | { | 229 | { |
223 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 230 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
224 | register void* __param asm("1") = param; | 231 | register void *__param asm("1") = param; |
225 | register const u8* __src asm("2") = src; | 232 | register const u8 *__src asm("2") = src; |
226 | register long __src_len asm("3") = src_len; | 233 | register long __src_len asm("3") = src_len; |
227 | int ret; | 234 | int ret; |
228 | 235 | ||
229 | asm volatile( | 236 | asm volatile( |
230 | "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ | 237 | "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ |
231 | "1: brc 1,0b \n" /* handle partial completion */ | 238 | "1: brc 1,0b \n" /* handle partial completion */ |
232 | " ahi %0,%h6\n" | 239 | " la %0,0\n" |
233 | "2: ahi %0,%h7\n" | 240 | "2:\n" |
234 | "3:\n" | 241 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
235 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
236 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 242 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
237 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 243 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
238 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
239 | if (ret < 0) | 244 | if (ret < 0) |
240 | return ret; | 245 | return ret; |
241 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 246 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
242 | } | 247 | } |
243 | 248 | ||
244 | /* | 249 | /** |
250 | * crypt_s390_kmac: | ||
251 | * @func: the function code passed to KM; see crypt_s390_klmd_func | ||
252 | * @param: address of parameter block; see POP for details on each func | ||
253 | * @src: address of source memory area | ||
254 | * @src_len: length of src operand in bytes | ||
255 | * | ||
245 | * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation | 256 | * Executes the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE) operation |
246 | * of the CPU. | 257 | * of the CPU. |
247 | * @param func: the function code passed to KM; see crypt_s390_klmd_func | 258 | * |
248 | * @param param: address of parameter block; see POP for details on each func | 259 | * Returns -1 for failure, 0 for the query func, number of processed |
249 | * @param src: address of source memory area | 260 | * bytes for digest funcs |
250 | * @param src_len: length of src operand in bytes | ||
251 | * @returns < zero for failure, 0 for the query func, number of processed bytes | ||
252 | * for digest funcs | ||
253 | */ | 261 | */ |
254 | static inline int | 262 | static inline int crypt_s390_kmac(long func, void *param, |
255 | crypt_s390_kmac(long func, void* param, const u8* src, long src_len) | 263 | const u8 *src, long src_len) |
256 | { | 264 | { |
257 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | 265 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; |
258 | register void* __param asm("1") = param; | 266 | register void *__param asm("1") = param; |
259 | register const u8* __src asm("2") = src; | 267 | register const u8 *__src asm("2") = src; |
260 | register long __src_len asm("3") = src_len; | 268 | register long __src_len asm("3") = src_len; |
261 | int ret; | 269 | int ret; |
262 | 270 | ||
263 | asm volatile( | 271 | asm volatile( |
264 | "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ | 272 | "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ |
265 | "1: brc 1,0b \n" /* handle partial completion */ | 273 | "1: brc 1,0b \n" /* handle partial completion */ |
266 | " ahi %0,%h6\n" | 274 | " la %0,0\n" |
267 | "2: ahi %0,%h7\n" | 275 | "2:\n" |
268 | "3:\n" | 276 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) |
269 | EX_TABLE(0b,3b) EX_TABLE(1b,2b) | ||
270 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 277 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
271 | : "d" (__func), "a" (__param), "0" (-EFAULT), | 278 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
272 | "K" (ENOSYS), "K" (-ENOSYS + EFAULT) : "cc", "memory"); | ||
273 | if (ret < 0) | 279 | if (ret < 0) |
274 | return ret; | 280 | return ret; |
275 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; | 281 | return (func & CRYPT_S390_FUNC_MASK) ? src_len - __src_len : __src_len; |
276 | } | 282 | } |
277 | 283 | ||
278 | /** | 284 | /** |
285 | * crypt_s390_func_available: | ||
286 | * @func: the function code of the specific function; 0 if op in general | ||
287 | * | ||
279 | * Tests if a specific crypto function is implemented on the machine. | 288 | * Tests if a specific crypto function is implemented on the machine. |
280 | * @param func: the function code of the specific function; 0 if op in general | 289 | * |
281 | * @return 1 if func available; 0 if func or op in general not available | 290 | * Returns 1 if func available; 0 if func or op in general not available |
282 | */ | 291 | */ |
283 | static inline int | 292 | static inline int crypt_s390_func_available(int func) |
284 | crypt_s390_func_available(int func) | ||
285 | { | 293 | { |
294 | unsigned char status[16]; | ||
286 | int ret; | 295 | int ret; |
287 | 296 | ||
288 | struct crypt_s390_query_status status = { | 297 | switch (func & CRYPT_S390_OP_MASK) { |
289 | .high = 0, | 298 | case CRYPT_S390_KM: |
290 | .low = 0 | 299 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); |
291 | }; | 300 | break; |
292 | switch (func & CRYPT_S390_OP_MASK){ | 301 | case CRYPT_S390_KMC: |
293 | case CRYPT_S390_KM: | 302 | ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); |
294 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); | 303 | break; |
295 | break; | 304 | case CRYPT_S390_KIMD: |
296 | case CRYPT_S390_KMC: | 305 | ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); |
297 | ret = crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); | 306 | break; |
298 | break; | 307 | case CRYPT_S390_KLMD: |
299 | case CRYPT_S390_KIMD: | 308 | ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); |
300 | ret = crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); | 309 | break; |
301 | break; | 310 | case CRYPT_S390_KMAC: |
302 | case CRYPT_S390_KLMD: | 311 | ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); |
303 | ret = crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); | 312 | break; |
304 | break; | 313 | default: |
305 | case CRYPT_S390_KMAC: | 314 | return 0; |
306 | ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); | ||
307 | break; | ||
308 | default: | ||
309 | ret = 0; | ||
310 | return ret; | ||
311 | } | ||
312 | if (ret >= 0){ | ||
313 | func &= CRYPT_S390_FUNC_MASK; | ||
314 | func &= 0x7f; //mask modifier bit | ||
315 | if (func < 64){ | ||
316 | ret = (status.high >> (64 - func - 1)) & 0x1; | ||
317 | } else { | ||
318 | ret = (status.low >> (128 - func - 1)) & 0x1; | ||
319 | } | ||
320 | } else { | ||
321 | ret = 0; | ||
322 | } | 315 | } |
323 | return ret; | 316 | if (ret < 0) |
317 | return 0; | ||
318 | func &= CRYPT_S390_FUNC_MASK; | ||
319 | func &= 0x7f; /* mask modifier bit */ | ||
320 | return (status[func >> 3] & (0x80 >> (func & 7))) != 0; | ||
324 | } | 321 | } |
325 | 322 | ||
326 | #endif // _CRYPTO_ARCH_S390_CRYPT_S390_H | 323 | #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ |
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c deleted file mode 100644 index 54fb11d7fadd..000000000000 --- a/arch/s390/crypto/crypt_s390_query.c +++ /dev/null | |||
@@ -1,129 +0,0 @@ | |||
1 | /* | ||
2 | * Cryptographic API. | ||
3 | * | ||
4 | * Support for s390 cryptographic instructions. | ||
5 | * Testing module for querying processor crypto capabilities. | ||
6 | * | ||
7 | * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
8 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the Free | ||
12 | * Software Foundation; either version 2 of the License, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | */ | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include "crypt_s390.h" | ||
21 | |||
22 | static void query_available_functions(void) | ||
23 | { | ||
24 | printk(KERN_INFO "#####################\n"); | ||
25 | |||
26 | /* query available KM functions */ | ||
27 | printk(KERN_INFO "KM_QUERY: %d\n", | ||
28 | crypt_s390_func_available(KM_QUERY)); | ||
29 | printk(KERN_INFO "KM_DEA: %d\n", | ||
30 | crypt_s390_func_available(KM_DEA_ENCRYPT)); | ||
31 | printk(KERN_INFO "KM_TDEA_128: %d\n", | ||
32 | crypt_s390_func_available(KM_TDEA_128_ENCRYPT)); | ||
33 | printk(KERN_INFO "KM_TDEA_192: %d\n", | ||
34 | crypt_s390_func_available(KM_TDEA_192_ENCRYPT)); | ||
35 | printk(KERN_INFO "KM_AES_128: %d\n", | ||
36 | crypt_s390_func_available(KM_AES_128_ENCRYPT)); | ||
37 | printk(KERN_INFO "KM_AES_192: %d\n", | ||
38 | crypt_s390_func_available(KM_AES_192_ENCRYPT)); | ||
39 | printk(KERN_INFO "KM_AES_256: %d\n", | ||
40 | crypt_s390_func_available(KM_AES_256_ENCRYPT)); | ||
41 | |||
42 | /* query available KMC functions */ | ||
43 | printk(KERN_INFO "KMC_QUERY: %d\n", | ||
44 | crypt_s390_func_available(KMC_QUERY)); | ||
45 | printk(KERN_INFO "KMC_DEA: %d\n", | ||
46 | crypt_s390_func_available(KMC_DEA_ENCRYPT)); | ||
47 | printk(KERN_INFO "KMC_TDEA_128: %d\n", | ||
48 | crypt_s390_func_available(KMC_TDEA_128_ENCRYPT)); | ||
49 | printk(KERN_INFO "KMC_TDEA_192: %d\n", | ||
50 | crypt_s390_func_available(KMC_TDEA_192_ENCRYPT)); | ||
51 | printk(KERN_INFO "KMC_AES_128: %d\n", | ||
52 | crypt_s390_func_available(KMC_AES_128_ENCRYPT)); | ||
53 | printk(KERN_INFO "KMC_AES_192: %d\n", | ||
54 | crypt_s390_func_available(KMC_AES_192_ENCRYPT)); | ||
55 | printk(KERN_INFO "KMC_AES_256: %d\n", | ||
56 | crypt_s390_func_available(KMC_AES_256_ENCRYPT)); | ||
57 | |||
58 | /* query available KIMD functions */ | ||
59 | printk(KERN_INFO "KIMD_QUERY: %d\n", | ||
60 | crypt_s390_func_available(KIMD_QUERY)); | ||
61 | printk(KERN_INFO "KIMD_SHA_1: %d\n", | ||
62 | crypt_s390_func_available(KIMD_SHA_1)); | ||
63 | printk(KERN_INFO "KIMD_SHA_256: %d\n", | ||
64 | crypt_s390_func_available(KIMD_SHA_256)); | ||
65 | |||
66 | /* query available KLMD functions */ | ||
67 | printk(KERN_INFO "KLMD_QUERY: %d\n", | ||
68 | crypt_s390_func_available(KLMD_QUERY)); | ||
69 | printk(KERN_INFO "KLMD_SHA_1: %d\n", | ||
70 | crypt_s390_func_available(KLMD_SHA_1)); | ||
71 | printk(KERN_INFO "KLMD_SHA_256: %d\n", | ||
72 | crypt_s390_func_available(KLMD_SHA_256)); | ||
73 | |||
74 | /* query available KMAC functions */ | ||
75 | printk(KERN_INFO "KMAC_QUERY: %d\n", | ||
76 | crypt_s390_func_available(KMAC_QUERY)); | ||
77 | printk(KERN_INFO "KMAC_DEA: %d\n", | ||
78 | crypt_s390_func_available(KMAC_DEA)); | ||
79 | printk(KERN_INFO "KMAC_TDEA_128: %d\n", | ||
80 | crypt_s390_func_available(KMAC_TDEA_128)); | ||
81 | printk(KERN_INFO "KMAC_TDEA_192: %d\n", | ||
82 | crypt_s390_func_available(KMAC_TDEA_192)); | ||
83 | } | ||
84 | |||
85 | static int init(void) | ||
86 | { | ||
87 | struct crypt_s390_query_status status = { | ||
88 | .high = 0, | ||
89 | .low = 0 | ||
90 | }; | ||
91 | |||
92 | printk(KERN_INFO "crypt_s390: querying available crypto functions\n"); | ||
93 | crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); | ||
94 | printk(KERN_INFO "KM:\t%016llx %016llx\n", | ||
95 | (unsigned long long) status.high, | ||
96 | (unsigned long long) status.low); | ||
97 | status.high = status.low = 0; | ||
98 | crypt_s390_kmc(KMC_QUERY, &status, NULL, NULL, 0); | ||
99 | printk(KERN_INFO "KMC:\t%016llx %016llx\n", | ||
100 | (unsigned long long) status.high, | ||
101 | (unsigned long long) status.low); | ||
102 | status.high = status.low = 0; | ||
103 | crypt_s390_kimd(KIMD_QUERY, &status, NULL, 0); | ||
104 | printk(KERN_INFO "KIMD:\t%016llx %016llx\n", | ||
105 | (unsigned long long) status.high, | ||
106 | (unsigned long long) status.low); | ||
107 | status.high = status.low = 0; | ||
108 | crypt_s390_klmd(KLMD_QUERY, &status, NULL, 0); | ||
109 | printk(KERN_INFO "KLMD:\t%016llx %016llx\n", | ||
110 | (unsigned long long) status.high, | ||
111 | (unsigned long long) status.low); | ||
112 | status.high = status.low = 0; | ||
113 | crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); | ||
114 | printk(KERN_INFO "KMAC:\t%016llx %016llx\n", | ||
115 | (unsigned long long) status.high, | ||
116 | (unsigned long long) status.low); | ||
117 | |||
118 | query_available_functions(); | ||
119 | return -ECANCELED; | ||
120 | } | ||
121 | |||
122 | static void __exit cleanup(void) | ||
123 | { | ||
124 | } | ||
125 | |||
126 | module_init(init); | ||
127 | module_exit(cleanup); | ||
128 | |||
129 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/s390/crypto/des_check_key.c b/arch/s390/crypto/des_check_key.c index e3f5c5f238fe..5706af266442 100644 --- a/arch/s390/crypto/des_check_key.c +++ b/arch/s390/crypto/des_check_key.c | |||
@@ -10,8 +10,9 @@ | |||
10 | * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. | 10 | * scatterlist interface. Changed LGPL to GPL per section 3 of the LGPL. |
11 | * | 11 | * |
12 | * s390 Version: | 12 | * s390 Version: |
13 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | 13 | * Copyright IBM Corp. 2003 |
14 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 14 | * Author(s): Thomas Spatzier |
15 | * Jan Glauber (jan.glauber@de.ibm.com) | ||
15 | * | 16 | * |
16 | * Derived from "crypto/des.c" | 17 | * Derived from "crypto/des.c" |
17 | * Copyright (c) 1992 Dana L. How. | 18 | * Copyright (c) 1992 Dana L. How. |
@@ -30,6 +31,7 @@ | |||
30 | #include <linux/module.h> | 31 | #include <linux/module.h> |
31 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
32 | #include <linux/crypto.h> | 33 | #include <linux/crypto.h> |
34 | #include "crypto_des.h" | ||
33 | 35 | ||
34 | #define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) | 36 | #define ROR(d,c,o) ((d) = (d) >> (c) | (d) << (o)) |
35 | 37 | ||
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index 2aba04852fe3..ea22707f435f 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -3,9 +3,9 @@ | |||
3 | * | 3 | * |
4 | * s390 implementation of the DES Cipher Algorithm. | 4 | * s390 implementation of the DES Cipher Algorithm. |
5 | * | 5 | * |
6 | * Copyright (c) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 2003,2007 |
7 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 7 | * Author(s): Thomas Spatzier |
8 | * | 8 | * Jan Glauber (jan.glauber@de.ibm.com) |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
@@ -557,7 +557,7 @@ static int init(void) | |||
557 | if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || | 557 | if (!crypt_s390_func_available(KM_DEA_ENCRYPT) || |
558 | !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || | 558 | !crypt_s390_func_available(KM_TDEA_128_ENCRYPT) || |
559 | !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) | 559 | !crypt_s390_func_available(KM_TDEA_192_ENCRYPT)) |
560 | return -ENOSYS; | 560 | return -EOPNOTSUPP; |
561 | 561 | ||
562 | ret = crypto_register_alg(&des_alg); | 562 | ret = crypto_register_alg(&des_alg); |
563 | if (ret) | 563 | if (ret) |
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c new file mode 100644 index 000000000000..8eb3a1aedc22 --- /dev/null +++ b/arch/s390/crypto/prng.c | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2006,2007 | ||
3 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> | ||
4 | * Driver for the s390 pseudo random number generator | ||
5 | */ | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/miscdevice.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/moduleparam.h> | ||
12 | #include <linux/random.h> | ||
13 | #include <asm/debug.h> | ||
14 | #include <asm/uaccess.h> | ||
15 | |||
16 | #include "crypt_s390.h" | ||
17 | |||
18 | MODULE_LICENSE("GPL"); | ||
19 | MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>"); | ||
20 | MODULE_DESCRIPTION("s390 PRNG interface"); | ||
21 | |||
22 | static int prng_chunk_size = 256; | ||
23 | module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH); | ||
24 | MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes"); | ||
25 | |||
26 | static int prng_entropy_limit = 4096; | ||
27 | module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR); | ||
28 | MODULE_PARM_DESC(prng_entropy_limit, | ||
29 | "PRNG add entropy after that much bytes were produced"); | ||
30 | |||
31 | /* | ||
32 | * Any one who considers arithmetical methods of producing random digits is, | ||
33 | * of course, in a state of sin. -- John von Neumann | ||
34 | */ | ||
35 | |||
36 | struct s390_prng_data { | ||
37 | unsigned long count; /* how many bytes were produced */ | ||
38 | char *buf; | ||
39 | }; | ||
40 | |||
41 | static struct s390_prng_data *p; | ||
42 | |||
43 | /* copied from libica, use a non-zero initial parameter block */ | ||
44 | static unsigned char parm_block[32] = { | ||
45 | 0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4, | ||
46 | 0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0, | ||
47 | }; | ||
48 | |||
49 | static int prng_open(struct inode *inode, struct file *file) | ||
50 | { | ||
51 | return nonseekable_open(inode, file); | ||
52 | } | ||
53 | |||
54 | static void prng_add_entropy(void) | ||
55 | { | ||
56 | __u64 entropy[4]; | ||
57 | unsigned int i; | ||
58 | int ret; | ||
59 | |||
60 | for (i = 0; i < 16; i++) { | ||
61 | ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy, | ||
62 | (char *)entropy, sizeof(entropy)); | ||
63 | BUG_ON(ret < 0 || ret != sizeof(entropy)); | ||
64 | memcpy(parm_block, entropy, sizeof(entropy)); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static void prng_seed(int nbytes) | ||
69 | { | ||
70 | char buf[16]; | ||
71 | int i = 0; | ||
72 | |||
73 | BUG_ON(nbytes > 16); | ||
74 | get_random_bytes(buf, nbytes); | ||
75 | |||
76 | /* Add the entropy */ | ||
77 | while (nbytes >= 8) { | ||
78 | *((__u64 *)parm_block) ^= *((__u64 *)buf+i*8); | ||
79 | prng_add_entropy(); | ||
80 | i += 8; | ||
81 | nbytes -= 8; | ||
82 | } | ||
83 | prng_add_entropy(); | ||
84 | } | ||
85 | |||
86 | static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, | ||
87 | loff_t *ppos) | ||
88 | { | ||
89 | int chunk, n; | ||
90 | int ret = 0; | ||
91 | int tmp; | ||
92 | |||
93 | /* nbytes can be arbitrary long, we spilt it into chunks */ | ||
94 | while (nbytes) { | ||
95 | /* same as in extract_entropy_user in random.c */ | ||
96 | if (need_resched()) { | ||
97 | if (signal_pending(current)) { | ||
98 | if (ret == 0) | ||
99 | ret = -ERESTARTSYS; | ||
100 | break; | ||
101 | } | ||
102 | schedule(); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * we lose some random bytes if an attacker issues | ||
107 | * reads < 8 bytes, but we don't care | ||
108 | */ | ||
109 | chunk = min_t(int, nbytes, prng_chunk_size); | ||
110 | |||
111 | /* PRNG only likes multiples of 8 bytes */ | ||
112 | n = (chunk + 7) & -8; | ||
113 | |||
114 | if (p->count > prng_entropy_limit) | ||
115 | prng_seed(8); | ||
116 | |||
117 | /* if the CPU supports PRNG stckf is present too */ | ||
118 | asm volatile(".insn s,0xb27c0000,%0" | ||
119 | : "=m" (*((unsigned long long *)p->buf)) : : "cc"); | ||
120 | |||
121 | /* | ||
122 | * Beside the STCKF the input for the TDES-EDE is the output | ||
123 | * of the last operation. We differ here from X9.17 since we | ||
124 | * only store one timestamp into the buffer. Padding the whole | ||
125 | * buffer with timestamps does not improve security, since | ||
126 | * successive stckf have nearly constant offsets. | ||
127 | * If an attacker knows the first timestamp it would be | ||
128 | * trivial to guess the additional values. One timestamp | ||
129 | * is therefore enough and still guarantees unique input values. | ||
130 | * | ||
131 | * Note: you can still get strict X9.17 conformity by setting | ||
132 | * prng_chunk_size to 8 bytes. | ||
133 | */ | ||
134 | tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n); | ||
135 | BUG_ON((tmp < 0) || (tmp != n)); | ||
136 | |||
137 | p->count += n; | ||
138 | |||
139 | if (copy_to_user(ubuf, p->buf, chunk)) | ||
140 | return -EFAULT; | ||
141 | |||
142 | nbytes -= chunk; | ||
143 | ret += chunk; | ||
144 | ubuf += chunk; | ||
145 | } | ||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | static struct file_operations prng_fops = { | ||
150 | .owner = THIS_MODULE, | ||
151 | .open = &prng_open, | ||
152 | .release = NULL, | ||
153 | .read = &prng_read, | ||
154 | }; | ||
155 | |||
156 | static struct miscdevice prng_dev = { | ||
157 | .name = "prandom", | ||
158 | .minor = MISC_DYNAMIC_MINOR, | ||
159 | .fops = &prng_fops, | ||
160 | }; | ||
161 | |||
162 | static int __init prng_init(void) | ||
163 | { | ||
164 | int ret; | ||
165 | |||
166 | /* check if the CPU has a PRNG */ | ||
167 | if (!crypt_s390_func_available(KMC_PRNG)) | ||
168 | return -EOPNOTSUPP; | ||
169 | |||
170 | if (prng_chunk_size < 8) | ||
171 | return -EINVAL; | ||
172 | |||
173 | p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL); | ||
174 | if (!p) | ||
175 | return -ENOMEM; | ||
176 | p->count = 0; | ||
177 | |||
178 | p->buf = kmalloc(prng_chunk_size, GFP_KERNEL); | ||
179 | if (!p->buf) { | ||
180 | ret = -ENOMEM; | ||
181 | goto out_free; | ||
182 | } | ||
183 | |||
184 | /* initialize the PRNG, add 128 bits of entropy */ | ||
185 | prng_seed(16); | ||
186 | |||
187 | ret = misc_register(&prng_dev); | ||
188 | if (ret) { | ||
189 | printk(KERN_WARNING | ||
190 | "Could not register misc device for PRNG.\n"); | ||
191 | goto out_buf; | ||
192 | } | ||
193 | return 0; | ||
194 | |||
195 | out_buf: | ||
196 | kfree(p->buf); | ||
197 | out_free: | ||
198 | kfree(p); | ||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | static void __exit prng_exit(void) | ||
203 | { | ||
204 | /* wipe me */ | ||
205 | memset(p->buf, 0, prng_chunk_size); | ||
206 | kfree(p->buf); | ||
207 | kfree(p); | ||
208 | |||
209 | misc_deregister(&prng_dev); | ||
210 | } | ||
211 | |||
212 | module_init(prng_init); | ||
213 | module_exit(prng_exit); | ||
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 49ca8690ee39..969639f31977 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -8,8 +8,9 @@ | |||
8 | * implementation written by Steve Reid. | 8 | * implementation written by Steve Reid. |
9 | * | 9 | * |
10 | * s390 Version: | 10 | * s390 Version: |
11 | * Copyright (C) 2003 IBM Deutschland GmbH, IBM Corporation | 11 | * Copyright IBM Corp. 2003,2007 |
12 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | 12 | * Author(s): Thomas Spatzier |
13 | * Jan Glauber (jan.glauber@de.ibm.com) | ||
13 | * | 14 | * |
14 | * Derived from "crypto/sha1.c" | 15 | * Derived from "crypto/sha1.c" |
15 | * Copyright (c) Alan Smithee. | 16 | * Copyright (c) Alan Smithee. |
@@ -43,16 +44,14 @@ struct crypt_s390_sha1_ctx { | |||
43 | static void sha1_init(struct crypto_tfm *tfm) | 44 | static void sha1_init(struct crypto_tfm *tfm) |
44 | { | 45 | { |
45 | struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); | 46 | struct crypt_s390_sha1_ctx *ctx = crypto_tfm_ctx(tfm); |
46 | static const u32 initstate[5] = { | 47 | |
47 | 0x67452301, | 48 | ctx->state[0] = 0x67452301; |
48 | 0xEFCDAB89, | 49 | ctx->state[1] = 0xEFCDAB89; |
49 | 0x98BADCFE, | 50 | ctx->state[2] = 0x98BADCFE; |
50 | 0x10325476, | 51 | ctx->state[3] = 0x10325476; |
51 | 0xC3D2E1F0 | 52 | ctx->state[4] = 0xC3D2E1F0; |
52 | }; | ||
53 | 53 | ||
54 | ctx->count = 0; | 54 | ctx->count = 0; |
55 | memcpy(ctx->state, &initstate, sizeof(initstate)); | ||
56 | ctx->buf_len = 0; | 55 | ctx->buf_len = 0; |
57 | } | 56 | } |
58 | 57 | ||
@@ -63,13 +62,13 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data, | |||
63 | long imd_len; | 62 | long imd_len; |
64 | 63 | ||
65 | sctx = crypto_tfm_ctx(tfm); | 64 | sctx = crypto_tfm_ctx(tfm); |
66 | sctx->count += len * 8; //message bit length | 65 | sctx->count += len * 8; /* message bit length */ |
67 | 66 | ||
68 | //anything in buffer yet? -> must be completed | 67 | /* anything in buffer yet? -> must be completed */ |
69 | if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { | 68 | if (sctx->buf_len && (sctx->buf_len + len) >= SHA1_BLOCK_SIZE) { |
70 | //complete full block and hash | 69 | /* complete full block and hash */ |
71 | memcpy(sctx->buffer + sctx->buf_len, data, | 70 | memcpy(sctx->buffer + sctx->buf_len, data, |
72 | SHA1_BLOCK_SIZE - sctx->buf_len); | 71 | SHA1_BLOCK_SIZE - sctx->buf_len); |
73 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, | 72 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, |
74 | SHA1_BLOCK_SIZE); | 73 | SHA1_BLOCK_SIZE); |
75 | data += SHA1_BLOCK_SIZE - sctx->buf_len; | 74 | data += SHA1_BLOCK_SIZE - sctx->buf_len; |
@@ -77,37 +76,36 @@ static void sha1_update(struct crypto_tfm *tfm, const u8 *data, | |||
77 | sctx->buf_len = 0; | 76 | sctx->buf_len = 0; |
78 | } | 77 | } |
79 | 78 | ||
80 | //rest of data contains full blocks? | 79 | /* rest of data contains full blocks? */ |
81 | imd_len = len & ~0x3ful; | 80 | imd_len = len & ~0x3ful; |
82 | if (imd_len){ | 81 | if (imd_len) { |
83 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); | 82 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, data, imd_len); |
84 | data += imd_len; | 83 | data += imd_len; |
85 | len -= imd_len; | 84 | len -= imd_len; |
86 | } | 85 | } |
87 | //anything left? store in buffer | 86 | /* anything left? store in buffer */ |
88 | if (len){ | 87 | if (len) { |
89 | memcpy(sctx->buffer + sctx->buf_len , data, len); | 88 | memcpy(sctx->buffer + sctx->buf_len , data, len); |
90 | sctx->buf_len += len; | 89 | sctx->buf_len += len; |
91 | } | 90 | } |
92 | } | 91 | } |
93 | 92 | ||
94 | 93 | ||
95 | static void | 94 | static void pad_message(struct crypt_s390_sha1_ctx* sctx) |
96 | pad_message(struct crypt_s390_sha1_ctx* sctx) | ||
97 | { | 95 | { |
98 | int index; | 96 | int index; |
99 | 97 | ||
100 | index = sctx->buf_len; | 98 | index = sctx->buf_len; |
101 | sctx->buf_len = (sctx->buf_len < 56)? | 99 | sctx->buf_len = (sctx->buf_len < 56) ? |
102 | SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; | 100 | SHA1_BLOCK_SIZE:2 * SHA1_BLOCK_SIZE; |
103 | //start pad with 1 | 101 | /* start pad with 1 */ |
104 | sctx->buffer[index] = 0x80; | 102 | sctx->buffer[index] = 0x80; |
105 | //pad with zeros | 103 | /* pad with zeros */ |
106 | index++; | 104 | index++; |
107 | memset(sctx->buffer + index, 0x00, sctx->buf_len - index); | 105 | memset(sctx->buffer + index, 0x00, sctx->buf_len - index); |
108 | //append length | 106 | /* append length */ |
109 | memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, | 107 | memcpy(sctx->buffer + sctx->buf_len - 8, &sctx->count, |
110 | sizeof sctx->count); | 108 | sizeof sctx->count); |
111 | } | 109 | } |
112 | 110 | ||
113 | /* Add padding and return the message digest. */ | 111 | /* Add padding and return the message digest. */ |
@@ -115,47 +113,40 @@ static void sha1_final(struct crypto_tfm *tfm, u8 *out) | |||
115 | { | 113 | { |
116 | struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); | 114 | struct crypt_s390_sha1_ctx *sctx = crypto_tfm_ctx(tfm); |
117 | 115 | ||
118 | //must perform manual padding | 116 | /* must perform manual padding */ |
119 | pad_message(sctx); | 117 | pad_message(sctx); |
120 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); | 118 | crypt_s390_kimd(KIMD_SHA_1, sctx->state, sctx->buffer, sctx->buf_len); |
121 | //copy digest to out | 119 | /* copy digest to out */ |
122 | memcpy(out, sctx->state, SHA1_DIGEST_SIZE); | 120 | memcpy(out, sctx->state, SHA1_DIGEST_SIZE); |
123 | /* Wipe context */ | 121 | /* wipe context */ |
124 | memset(sctx, 0, sizeof *sctx); | 122 | memset(sctx, 0, sizeof *sctx); |
125 | } | 123 | } |
126 | 124 | ||
127 | static struct crypto_alg alg = { | 125 | static struct crypto_alg alg = { |
128 | .cra_name = "sha1", | 126 | .cra_name = "sha1", |
129 | .cra_driver_name = "sha1-s390", | 127 | .cra_driver_name= "sha1-s390", |
130 | .cra_priority = CRYPT_S390_PRIORITY, | 128 | .cra_priority = CRYPT_S390_PRIORITY, |
131 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST, | 129 | .cra_flags = CRYPTO_ALG_TYPE_DIGEST, |
132 | .cra_blocksize = SHA1_BLOCK_SIZE, | 130 | .cra_blocksize = SHA1_BLOCK_SIZE, |
133 | .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), | 131 | .cra_ctxsize = sizeof(struct crypt_s390_sha1_ctx), |
134 | .cra_module = THIS_MODULE, | 132 | .cra_module = THIS_MODULE, |
135 | .cra_list = LIST_HEAD_INIT(alg.cra_list), | 133 | .cra_list = LIST_HEAD_INIT(alg.cra_list), |
136 | .cra_u = { .digest = { | 134 | .cra_u = { .digest = { |
137 | .dia_digestsize = SHA1_DIGEST_SIZE, | 135 | .dia_digestsize = SHA1_DIGEST_SIZE, |
138 | .dia_init = sha1_init, | 136 | .dia_init = sha1_init, |
139 | .dia_update = sha1_update, | 137 | .dia_update = sha1_update, |
140 | .dia_final = sha1_final } } | 138 | .dia_final = sha1_final } } |
141 | }; | 139 | }; |
142 | 140 | ||
143 | static int | 141 | static int __init init(void) |
144 | init(void) | ||
145 | { | 142 | { |
146 | int ret = -ENOSYS; | 143 | if (!crypt_s390_func_available(KIMD_SHA_1)) |
144 | return -EOPNOTSUPP; | ||
147 | 145 | ||
148 | if (crypt_s390_func_available(KIMD_SHA_1)){ | 146 | return crypto_register_alg(&alg); |
149 | ret = crypto_register_alg(&alg); | ||
150 | if (ret == 0){ | ||
151 | printk(KERN_INFO "crypt_s390: sha1_s390 loaded.\n"); | ||
152 | } | ||
153 | } | ||
154 | return ret; | ||
155 | } | 147 | } |
156 | 148 | ||
157 | static void __exit | 149 | static void __exit fini(void) |
158 | fini(void) | ||
159 | { | 150 | { |
160 | crypto_unregister_alg(&alg); | 151 | crypto_unregister_alg(&alg); |
161 | } | 152 | } |
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index 8e4e67503fe7..78436c696d37 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * s390 implementation of the SHA256 Secure Hash Algorithm. | 4 | * s390 implementation of the SHA256 Secure Hash Algorithm. |
5 | * | 5 | * |
6 | * s390 Version: | 6 | * s390 Version: |
7 | * Copyright (C) 2005 IBM Deutschland GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2005,2007 |
8 | * Author(s): Jan Glauber (jang@de.ibm.com) | 8 | * Author(s): Jan Glauber (jang@de.ibm.com) |
9 | * | 9 | * |
10 | * Derived from "crypto/sha256.c" | 10 | * Derived from "crypto/sha256.c" |
@@ -143,15 +143,10 @@ static struct crypto_alg alg = { | |||
143 | 143 | ||
144 | static int init(void) | 144 | static int init(void) |
145 | { | 145 | { |
146 | int ret; | ||
147 | |||
148 | if (!crypt_s390_func_available(KIMD_SHA_256)) | 146 | if (!crypt_s390_func_available(KIMD_SHA_256)) |
149 | return -ENOSYS; | 147 | return -EOPNOTSUPP; |
150 | 148 | ||
151 | ret = crypto_register_alg(&alg); | 149 | return crypto_register_alg(&alg); |
152 | if (ret != 0) | ||
153 | printk(KERN_INFO "crypt_s390: sha256_s390 couldn't be loaded."); | ||
154 | return ret; | ||
155 | } | 150 | } |
156 | 151 | ||
157 | static void __exit fini(void) | 152 | static void __exit fini(void) |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 5368cf4a350e..7c621b8ef683 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -108,6 +108,8 @@ CONFIG_DEFAULT_MIGRATION_COST=1000000 | |||
108 | CONFIG_COMPAT=y | 108 | CONFIG_COMPAT=y |
109 | CONFIG_SYSVIPC_COMPAT=y | 109 | CONFIG_SYSVIPC_COMPAT=y |
110 | CONFIG_AUDIT_ARCH=y | 110 | CONFIG_AUDIT_ARCH=y |
111 | CONFIG_S390_SWITCH_AMODE=y | ||
112 | CONFIG_S390_EXEC_PROTECT=y | ||
111 | 113 | ||
112 | # | 114 | # |
113 | # Code generation options | 115 | # Code generation options |
@@ -431,7 +433,6 @@ CONFIG_TN3270_CONSOLE=y | |||
431 | CONFIG_TN3215=y | 433 | CONFIG_TN3215=y |
432 | CONFIG_TN3215_CONSOLE=y | 434 | CONFIG_TN3215_CONSOLE=y |
433 | CONFIG_CCW_CONSOLE=y | 435 | CONFIG_CCW_CONSOLE=y |
434 | CONFIG_SCLP=y | ||
435 | CONFIG_SCLP_TTY=y | 436 | CONFIG_SCLP_TTY=y |
436 | CONFIG_SCLP_CONSOLE=y | 437 | CONFIG_SCLP_CONSOLE=y |
437 | CONFIG_SCLP_VT220_TTY=y | 438 | CONFIG_SCLP_VT220_TTY=y |
@@ -724,9 +725,7 @@ CONFIG_CRYPTO_MANAGER=y | |||
724 | # CONFIG_CRYPTO_MD4 is not set | 725 | # CONFIG_CRYPTO_MD4 is not set |
725 | # CONFIG_CRYPTO_MD5 is not set | 726 | # CONFIG_CRYPTO_MD5 is not set |
726 | # CONFIG_CRYPTO_SHA1 is not set | 727 | # CONFIG_CRYPTO_SHA1 is not set |
727 | # CONFIG_CRYPTO_SHA1_S390 is not set | ||
728 | # CONFIG_CRYPTO_SHA256 is not set | 728 | # CONFIG_CRYPTO_SHA256 is not set |
729 | # CONFIG_CRYPTO_SHA256_S390 is not set | ||
730 | # CONFIG_CRYPTO_SHA512 is not set | 729 | # CONFIG_CRYPTO_SHA512 is not set |
731 | # CONFIG_CRYPTO_WP512 is not set | 730 | # CONFIG_CRYPTO_WP512 is not set |
732 | # CONFIG_CRYPTO_TGR192 is not set | 731 | # CONFIG_CRYPTO_TGR192 is not set |
@@ -735,12 +734,10 @@ CONFIG_CRYPTO_ECB=m | |||
735 | CONFIG_CRYPTO_CBC=y | 734 | CONFIG_CRYPTO_CBC=y |
736 | # CONFIG_CRYPTO_LRW is not set | 735 | # CONFIG_CRYPTO_LRW is not set |
737 | # CONFIG_CRYPTO_DES is not set | 736 | # CONFIG_CRYPTO_DES is not set |
738 | # CONFIG_CRYPTO_DES_S390 is not set | ||
739 | # CONFIG_CRYPTO_BLOWFISH is not set | 737 | # CONFIG_CRYPTO_BLOWFISH is not set |
740 | # CONFIG_CRYPTO_TWOFISH is not set | 738 | # CONFIG_CRYPTO_TWOFISH is not set |
741 | # CONFIG_CRYPTO_SERPENT is not set | 739 | # CONFIG_CRYPTO_SERPENT is not set |
742 | # CONFIG_CRYPTO_AES is not set | 740 | # CONFIG_CRYPTO_AES is not set |
743 | # CONFIG_CRYPTO_AES_S390 is not set | ||
744 | # CONFIG_CRYPTO_CAST5 is not set | 741 | # CONFIG_CRYPTO_CAST5 is not set |
745 | # CONFIG_CRYPTO_CAST6 is not set | 742 | # CONFIG_CRYPTO_CAST6 is not set |
746 | # CONFIG_CRYPTO_TEA is not set | 743 | # CONFIG_CRYPTO_TEA is not set |
@@ -755,6 +752,11 @@ CONFIG_CRYPTO_CBC=y | |||
755 | # | 752 | # |
756 | # Hardware crypto devices | 753 | # Hardware crypto devices |
757 | # | 754 | # |
755 | # CONFIG_CRYPTO_SHA1_S390 is not set | ||
756 | # CONFIG_CRYPTO_SHA256_S390 is not set | ||
757 | # CONFIG_CRYPTO_DES_S390 is not set | ||
758 | # CONFIG_CRYPTO_AES_S390 is not set | ||
759 | CONFIG_S390_PRNG=m | ||
758 | 760 | ||
759 | # | 761 | # |
760 | # Library routines | 762 | # Library routines |
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile index f4b00cd81f7c..b08d2abf6178 100644 --- a/arch/s390/hypfs/Makefile +++ b/arch/s390/hypfs/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o | 5 | obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o |
6 | 6 | ||
7 | s390_hypfs-objs := inode.o hypfs_diag.o | 7 | s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o |
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h index f3dbd91965c6..aea572009d60 100644 --- a/arch/s390/hypfs/hypfs.h +++ b/arch/s390/hypfs/hypfs.h | |||
@@ -27,4 +27,13 @@ extern struct dentry *hypfs_create_str(struct super_block *sb, | |||
27 | struct dentry *dir, const char *name, | 27 | struct dentry *dir, const char *name, |
28 | char *string); | 28 | char *string); |
29 | 29 | ||
30 | /* LPAR Hypervisor */ | ||
31 | extern int hypfs_diag_init(void); | ||
32 | extern void hypfs_diag_exit(void); | ||
33 | extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root); | ||
34 | |||
35 | /* VM Hypervisor */ | ||
36 | extern int hypfs_vm_init(void); | ||
37 | extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); | ||
38 | |||
30 | #endif /* _HYPFS_H_ */ | 39 | #endif /* _HYPFS_H_ */ |
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h deleted file mode 100644 index 256b384aebe1..000000000000 --- a/arch/s390/hypfs/hypfs_diag.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | /* | ||
2 | * arch/s390/hypfs_diag.h | ||
3 | * Hypervisor filesystem for Linux on s390. | ||
4 | * | ||
5 | * Copyright (C) IBM Corp. 2006 | ||
6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #ifndef _HYPFS_DIAG_H_ | ||
10 | #define _HYPFS_DIAG_H_ | ||
11 | |||
12 | extern int hypfs_diag_init(void); | ||
13 | extern void hypfs_diag_exit(void); | ||
14 | extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root); | ||
15 | |||
16 | #endif /* _HYPFS_DIAG_H_ */ | ||
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c new file mode 100644 index 000000000000..d01fc8f799f0 --- /dev/null +++ b/arch/s390/hypfs/hypfs_vm.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Hypervisor filesystem for Linux on s390. z/VM implementation. | ||
3 | * | ||
4 | * Copyright (C) IBM Corp. 2006 | ||
5 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/vmalloc.h> | ||
12 | #include <asm/ebcdic.h> | ||
13 | #include "hypfs.h" | ||
14 | |||
15 | #define NAME_LEN 8 | ||
16 | |||
17 | static char local_guest[] = " "; | ||
18 | static char all_guests[] = "* "; | ||
19 | static char *guest_query; | ||
20 | |||
21 | struct diag2fc_data { | ||
22 | __u32 version; | ||
23 | __u32 flags; | ||
24 | __u64 used_cpu; | ||
25 | __u64 el_time; | ||
26 | __u64 mem_min_kb; | ||
27 | __u64 mem_max_kb; | ||
28 | __u64 mem_share_kb; | ||
29 | __u64 mem_used_kb; | ||
30 | __u32 pcpus; | ||
31 | __u32 lcpus; | ||
32 | __u32 vcpus; | ||
33 | __u32 cpu_min; | ||
34 | __u32 cpu_max; | ||
35 | __u32 cpu_shares; | ||
36 | __u32 cpu_use_samp; | ||
37 | __u32 cpu_delay_samp; | ||
38 | __u32 page_wait_samp; | ||
39 | __u32 idle_samp; | ||
40 | __u32 other_samp; | ||
41 | __u32 total_samp; | ||
42 | char guest_name[NAME_LEN]; | ||
43 | }; | ||
44 | |||
45 | struct diag2fc_parm_list { | ||
46 | char userid[NAME_LEN]; | ||
47 | char aci_grp[NAME_LEN]; | ||
48 | __u64 addr; | ||
49 | __u32 size; | ||
50 | __u32 fmt; | ||
51 | }; | ||
52 | |||
53 | static int diag2fc(int size, char* query, void *addr) | ||
54 | { | ||
55 | unsigned long residual_cnt; | ||
56 | unsigned long rc; | ||
57 | struct diag2fc_parm_list parm_list; | ||
58 | |||
59 | memcpy(parm_list.userid, query, NAME_LEN); | ||
60 | ASCEBC(parm_list.userid, NAME_LEN); | ||
61 | parm_list.addr = (unsigned long) addr ; | ||
62 | parm_list.size = size; | ||
63 | parm_list.fmt = 0x02; | ||
64 | memset(parm_list.aci_grp, 0x40, NAME_LEN); | ||
65 | rc = -1; | ||
66 | |||
67 | asm volatile( | ||
68 | " diag %0,%1,0x2fc\n" | ||
69 | "0:\n" | ||
70 | EX_TABLE(0b,0b) | ||
71 | : "=d" (residual_cnt), "+d" (rc) : "0" (&parm_list) : "memory"); | ||
72 | |||
73 | if ((rc != 0 ) && (rc != -2)) | ||
74 | return rc; | ||
75 | else | ||
76 | return -residual_cnt; | ||
77 | } | ||
78 | |||
79 | static struct diag2fc_data *diag2fc_store(char *query, int *count) | ||
80 | { | ||
81 | int size; | ||
82 | struct diag2fc_data *data; | ||
83 | |||
84 | do { | ||
85 | size = diag2fc(0, query, NULL); | ||
86 | if (size < 0) | ||
87 | return ERR_PTR(-EACCES); | ||
88 | data = vmalloc(size); | ||
89 | if (!data) | ||
90 | return ERR_PTR(-ENOMEM); | ||
91 | if (diag2fc(size, query, data) == 0) | ||
92 | break; | ||
93 | vfree(data); | ||
94 | } while (1); | ||
95 | *count = (size / sizeof(*data)); | ||
96 | |||
97 | return data; | ||
98 | } | ||
99 | |||
100 | static void diag2fc_free(void *data) | ||
101 | { | ||
102 | vfree(data); | ||
103 | } | ||
104 | |||
105 | #define ATTRIBUTE(sb, dir, name, member) \ | ||
106 | do { \ | ||
107 | void *rc; \ | ||
108 | rc = hypfs_create_u64(sb, dir, name, member); \ | ||
109 | if (IS_ERR(rc)) \ | ||
110 | return PTR_ERR(rc); \ | ||
111 | } while(0) | ||
112 | |||
113 | static int hpyfs_vm_create_guest(struct super_block *sb, | ||
114 | struct dentry *systems_dir, | ||
115 | struct diag2fc_data *data) | ||
116 | { | ||
117 | char guest_name[NAME_LEN + 1] = {}; | ||
118 | struct dentry *guest_dir, *cpus_dir, *samples_dir, *mem_dir; | ||
119 | int dedicated_flag, capped_value; | ||
120 | |||
121 | capped_value = (data->flags & 0x00000006) >> 1; | ||
122 | dedicated_flag = (data->flags & 0x00000008) >> 3; | ||
123 | |||
124 | /* guest dir */ | ||
125 | memcpy(guest_name, data->guest_name, NAME_LEN); | ||
126 | EBCASC(guest_name, NAME_LEN); | ||
127 | strstrip(guest_name); | ||
128 | guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); | ||
129 | if (IS_ERR(guest_dir)) | ||
130 | return PTR_ERR(guest_dir); | ||
131 | ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time); | ||
132 | |||
133 | /* logical cpu information */ | ||
134 | cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus"); | ||
135 | if (IS_ERR(cpus_dir)) | ||
136 | return PTR_ERR(cpus_dir); | ||
137 | ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu); | ||
138 | ATTRIBUTE(sb, cpus_dir, "capped", capped_value); | ||
139 | ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag); | ||
140 | ATTRIBUTE(sb, cpus_dir, "count", data->vcpus); | ||
141 | ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min); | ||
142 | ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max); | ||
143 | ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares); | ||
144 | |||
145 | /* memory information */ | ||
146 | mem_dir = hypfs_mkdir(sb, guest_dir, "mem"); | ||
147 | if (IS_ERR(mem_dir)) | ||
148 | return PTR_ERR(mem_dir); | ||
149 | ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb); | ||
150 | ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb); | ||
151 | ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb); | ||
152 | ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb); | ||
153 | |||
154 | /* samples */ | ||
155 | samples_dir = hypfs_mkdir(sb, guest_dir, "samples"); | ||
156 | if (IS_ERR(samples_dir)) | ||
157 | return PTR_ERR(samples_dir); | ||
158 | ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp); | ||
159 | ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp); | ||
160 | ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp); | ||
161 | ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp); | ||
162 | ATTRIBUTE(sb, samples_dir, "other", data->other_samp); | ||
163 | ATTRIBUTE(sb, samples_dir, "total", data->total_samp); | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | int hypfs_vm_create_files(struct super_block *sb, struct dentry *root) | ||
168 | { | ||
169 | struct dentry *dir, *file; | ||
170 | struct diag2fc_data *data; | ||
171 | int rc, i, count = 0; | ||
172 | |||
173 | data = diag2fc_store(guest_query, &count); | ||
174 | if (IS_ERR(data)) | ||
175 | return PTR_ERR(data); | ||
176 | |||
177 | /* Hpervisor Info */ | ||
178 | dir = hypfs_mkdir(sb, root, "hyp"); | ||
179 | if (IS_ERR(dir)) { | ||
180 | rc = PTR_ERR(dir); | ||
181 | goto failed; | ||
182 | } | ||
183 | file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor"); | ||
184 | if (IS_ERR(file)) { | ||
185 | rc = PTR_ERR(file); | ||
186 | goto failed; | ||
187 | } | ||
188 | |||
189 | /* physical cpus */ | ||
190 | dir = hypfs_mkdir(sb, root, "cpus"); | ||
191 | if (IS_ERR(dir)) { | ||
192 | rc = PTR_ERR(dir); | ||
193 | goto failed; | ||
194 | } | ||
195 | file = hypfs_create_u64(sb, dir, "count", data->lcpus); | ||
196 | if (IS_ERR(file)) { | ||
197 | rc = PTR_ERR(file); | ||
198 | goto failed; | ||
199 | } | ||
200 | |||
201 | /* guests */ | ||
202 | dir = hypfs_mkdir(sb, root, "systems"); | ||
203 | if (IS_ERR(dir)) { | ||
204 | rc = PTR_ERR(dir); | ||
205 | goto failed; | ||
206 | } | ||
207 | |||
208 | for (i = 0; i < count; i++) { | ||
209 | rc = hpyfs_vm_create_guest(sb, dir, &(data[i])); | ||
210 | if (rc) | ||
211 | goto failed; | ||
212 | } | ||
213 | diag2fc_free(data); | ||
214 | return 0; | ||
215 | |||
216 | failed: | ||
217 | diag2fc_free(data); | ||
218 | return rc; | ||
219 | } | ||
220 | |||
221 | int hypfs_vm_init(void) | ||
222 | { | ||
223 | if (diag2fc(0, all_guests, NULL) > 0) | ||
224 | guest_query = all_guests; | ||
225 | else if (diag2fc(0, local_guest, NULL) > 0) | ||
226 | guest_query = local_guest; | ||
227 | else | ||
228 | return -EACCES; | ||
229 | |||
230 | return 0; | ||
231 | } | ||
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index b6716c4b9934..a4fda7b53640 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <asm/ebcdic.h> | 20 | #include <asm/ebcdic.h> |
21 | #include "hypfs.h" | 21 | #include "hypfs.h" |
22 | #include "hypfs_diag.h" | ||
23 | 22 | ||
24 | #define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ | 23 | #define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ |
25 | #define TMP_SIZE 64 /* size of temporary buffers */ | 24 | #define TMP_SIZE 64 /* size of temporary buffers */ |
@@ -192,7 +191,10 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
192 | goto out; | 191 | goto out; |
193 | } | 192 | } |
194 | hypfs_delete_tree(sb->s_root); | 193 | hypfs_delete_tree(sb->s_root); |
195 | rc = hypfs_diag_create_files(sb, sb->s_root); | 194 | if (MACHINE_IS_VM) |
195 | rc = hypfs_vm_create_files(sb, sb->s_root); | ||
196 | else | ||
197 | rc = hypfs_diag_create_files(sb, sb->s_root); | ||
196 | if (rc) { | 198 | if (rc) { |
197 | printk(KERN_ERR "hypfs: Update failed\n"); | 199 | printk(KERN_ERR "hypfs: Update failed\n"); |
198 | hypfs_delete_tree(sb->s_root); | 200 | hypfs_delete_tree(sb->s_root); |
@@ -289,7 +291,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) | |||
289 | rc = -ENOMEM; | 291 | rc = -ENOMEM; |
290 | goto err_alloc; | 292 | goto err_alloc; |
291 | } | 293 | } |
292 | rc = hypfs_diag_create_files(sb, root_dentry); | 294 | if (MACHINE_IS_VM) |
295 | rc = hypfs_vm_create_files(sb, root_dentry); | ||
296 | else | ||
297 | rc = hypfs_diag_create_files(sb, root_dentry); | ||
293 | if (rc) | 298 | if (rc) |
294 | goto err_tree; | 299 | goto err_tree; |
295 | sbi->update_file = hypfs_create_update_file(sb, root_dentry); | 300 | sbi->update_file = hypfs_create_update_file(sb, root_dentry); |
@@ -462,11 +467,15 @@ static int __init hypfs_init(void) | |||
462 | { | 467 | { |
463 | int rc; | 468 | int rc; |
464 | 469 | ||
465 | if (MACHINE_IS_VM) | 470 | if (MACHINE_IS_VM) { |
466 | return -ENODATA; | 471 | if (hypfs_vm_init()) |
467 | if (hypfs_diag_init()) { | 472 | /* no diag 2fc, just exit */ |
468 | rc = -ENODATA; | 473 | return -ENODATA; |
469 | goto fail_diag; | 474 | } else { |
475 | if (hypfs_diag_init()) { | ||
476 | rc = -ENODATA; | ||
477 | goto fail_diag; | ||
478 | } | ||
470 | } | 479 | } |
471 | kset_set_kset_s(&s390_subsys, hypervisor_subsys); | 480 | kset_set_kset_s(&s390_subsys, hypervisor_subsys); |
472 | rc = subsystem_register(&s390_subsys); | 481 | rc = subsystem_register(&s390_subsys); |
@@ -480,7 +489,8 @@ static int __init hypfs_init(void) | |||
480 | fail_filesystem: | 489 | fail_filesystem: |
481 | subsystem_unregister(&s390_subsys); | 490 | subsystem_unregister(&s390_subsys); |
482 | fail_sysfs: | 491 | fail_sysfs: |
483 | hypfs_diag_exit(); | 492 | if (!MACHINE_IS_VM) |
493 | hypfs_diag_exit(); | ||
484 | fail_diag: | 494 | fail_diag: |
485 | printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); | 495 | printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); |
486 | return rc; | 496 | return rc; |
@@ -488,7 +498,8 @@ fail_diag: | |||
488 | 498 | ||
489 | static void __exit hypfs_exit(void) | 499 | static void __exit hypfs_exit(void) |
490 | { | 500 | { |
491 | hypfs_diag_exit(); | 501 | if (!MACHINE_IS_VM) |
502 | hypfs_diag_exit(); | ||
492 | unregister_filesystem(&hypfs_type); | 503 | unregister_filesystem(&hypfs_type); |
493 | subsystem_unregister(&s390_subsys); | 504 | subsystem_unregister(&s390_subsys); |
494 | } | 505 | } |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index a81881c9b297..5492d25d7d69 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -4,9 +4,9 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | obj-y := bitmap.o traps.o time.o process.o reset.o \ | 7 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ |
8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
9 | semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o | 9 | semaphore.o s390_ext.o debug.o irq.o ipl.o |
10 | 10 | ||
11 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 11 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
12 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 12 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S new file mode 100644 index 000000000000..dc7e5259770f --- /dev/null +++ b/arch/s390/kernel/base.S | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/base.S | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006,2007 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | * Michael Holzheu <holzheu@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/lowcore.h> | ||
11 | |||
12 | #ifdef CONFIG_64BIT | ||
13 | |||
14 | .globl s390_base_mcck_handler | ||
15 | s390_base_mcck_handler: | ||
16 | basr %r13,0 | ||
17 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
18 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
19 | larl %r1,s390_base_mcck_handler_fn | ||
20 | lg %r1,0(%r1) | ||
21 | ltgr %r1,%r1 | ||
22 | jz 1f | ||
23 | basr %r14,%r1 | ||
24 | 1: la %r1,4095 | ||
25 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | ||
26 | lpswe __LC_MCK_OLD_PSW | ||
27 | |||
28 | .section .bss | ||
29 | .globl s390_base_mcck_handler_fn | ||
30 | s390_base_mcck_handler_fn: | ||
31 | .quad 0 | ||
32 | .previous | ||
33 | |||
34 | .globl s390_base_ext_handler | ||
35 | s390_base_ext_handler: | ||
36 | stmg %r0,%r15,__LC_SAVE_AREA | ||
37 | basr %r13,0 | ||
38 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | ||
39 | larl %r1,s390_base_ext_handler_fn | ||
40 | lg %r1,0(%r1) | ||
41 | ltgr %r1,%r1 | ||
42 | jz 1f | ||
43 | basr %r14,%r1 | ||
44 | 1: lmg %r0,%r15,__LC_SAVE_AREA | ||
45 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | ||
46 | lpswe __LC_EXT_OLD_PSW | ||
47 | |||
48 | .section .bss | ||
49 | .globl s390_base_ext_handler_fn | ||
50 | s390_base_ext_handler_fn: | ||
51 | .quad 0 | ||
52 | .previous | ||
53 | |||
54 | .globl s390_base_pgm_handler | ||
55 | s390_base_pgm_handler: | ||
56 | stmg %r0,%r15,__LC_SAVE_AREA | ||
57 | basr %r13,0 | ||
58 | 0: aghi %r15,-STACK_FRAME_OVERHEAD | ||
59 | larl %r1,s390_base_pgm_handler_fn | ||
60 | lg %r1,0(%r1) | ||
61 | ltgr %r1,%r1 | ||
62 | jz 1f | ||
63 | basr %r14,%r1 | ||
64 | lmg %r0,%r15,__LC_SAVE_AREA | ||
65 | lpswe __LC_PGM_OLD_PSW | ||
66 | 1: lpswe disabled_wait_psw-0b(%r13) | ||
67 | |||
68 | .align 8 | ||
69 | disabled_wait_psw: | ||
70 | .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler | ||
71 | |||
72 | .section .bss | ||
73 | .globl s390_base_pgm_handler_fn | ||
74 | s390_base_pgm_handler_fn: | ||
75 | .quad 0 | ||
76 | .previous | ||
77 | |||
78 | #else /* CONFIG_64BIT */ | ||
79 | |||
80 | .globl s390_base_mcck_handler | ||
81 | s390_base_mcck_handler: | ||
82 | basr %r13,0 | ||
83 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
84 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
85 | l %r1,2f-0b(%r13) | ||
86 | l %r1,0(%r1) | ||
87 | ltr %r1,%r1 | ||
88 | jz 1f | ||
89 | basr %r14,%r1 | ||
90 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
91 | lpsw __LC_MCK_OLD_PSW | ||
92 | |||
93 | 2: .long s390_base_mcck_handler_fn | ||
94 | |||
95 | .section .bss | ||
96 | .globl s390_base_mcck_handler_fn | ||
97 | s390_base_mcck_handler_fn: | ||
98 | .long 0 | ||
99 | .previous | ||
100 | |||
101 | .globl s390_base_ext_handler | ||
102 | s390_base_ext_handler: | ||
103 | stm %r0,%r15,__LC_SAVE_AREA | ||
104 | basr %r13,0 | ||
105 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
106 | l %r1,2f-0b(%r13) | ||
107 | l %r1,0(%r1) | ||
108 | ltr %r1,%r1 | ||
109 | jz 1f | ||
110 | basr %r14,%r1 | ||
111 | 1: lm %r0,%r15,__LC_SAVE_AREA | ||
112 | ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit | ||
113 | lpsw __LC_EXT_OLD_PSW | ||
114 | |||
115 | 2: .long s390_base_ext_handler_fn | ||
116 | |||
117 | .section .bss | ||
118 | .globl s390_base_ext_handler_fn | ||
119 | s390_base_ext_handler_fn: | ||
120 | .long 0 | ||
121 | .previous | ||
122 | |||
123 | .globl s390_base_pgm_handler | ||
124 | s390_base_pgm_handler: | ||
125 | stm %r0,%r15,__LC_SAVE_AREA | ||
126 | basr %r13,0 | ||
127 | 0: ahi %r15,-STACK_FRAME_OVERHEAD | ||
128 | l %r1,2f-0b(%r13) | ||
129 | l %r1,0(%r1) | ||
130 | ltr %r1,%r1 | ||
131 | jz 1f | ||
132 | basr %r14,%r1 | ||
133 | lm %r0,%r15,__LC_SAVE_AREA | ||
134 | lpsw __LC_PGM_OLD_PSW | ||
135 | |||
136 | 1: lpsw disabled_wait_psw-0b(%r13) | ||
137 | |||
138 | 2: .long s390_base_pgm_handler_fn | ||
139 | |||
140 | disabled_wait_psw: | ||
141 | .align 8 | ||
142 | .long 0x000a0000,0x00000000 + s390_base_pgm_handler | ||
143 | |||
144 | .section .bss | ||
145 | .globl s390_base_pgm_handler_fn | ||
146 | s390_base_pgm_handler_fn: | ||
147 | .long 0 | ||
148 | .previous | ||
149 | |||
150 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c index 5c46054195cb..f1e40ca00d8d 100644 --- a/arch/s390/kernel/binfmt_elf32.c +++ b/arch/s390/kernel/binfmt_elf32.c | |||
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>"); | |||
192 | 192 | ||
193 | #undef cputime_to_timeval | 193 | #undef cputime_to_timeval |
194 | #define cputime_to_timeval cputime_to_compat_timeval | 194 | #define cputime_to_timeval cputime_to_compat_timeval |
195 | static __inline__ void | 195 | static inline void |
196 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | 196 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) |
197 | { | 197 | { |
198 | value->tv_usec = cputime % 1000000; | 198 | value->tv_usec = cputime % 1000000; |
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c index 71d27c493568..914d49444f92 100644 --- a/arch/s390/kernel/compat_exec_domain.c +++ b/arch/s390/kernel/compat_exec_domain.c | |||
@@ -12,10 +12,9 @@ | |||
12 | #include <linux/personality.h> | 12 | #include <linux/personality.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | 14 | ||
15 | struct exec_domain s390_exec_domain; | 15 | static struct exec_domain s390_exec_domain; |
16 | 16 | ||
17 | static int __init | 17 | static int __init s390_init (void) |
18 | s390_init (void) | ||
19 | { | 18 | { |
20 | s390_exec_domain.name = "Linux/s390"; | 19 | s390_exec_domain.name = "Linux/s390"; |
21 | s390_exec_domain.handler = NULL; | 20 | s390_exec_domain.handler = NULL; |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 5b33f823863a..666bb6daa148 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -69,6 +69,12 @@ | |||
69 | 69 | ||
70 | #include "compat_linux.h" | 70 | #include "compat_linux.h" |
71 | 71 | ||
72 | long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
73 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
74 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | ||
75 | long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | | ||
76 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
77 | PSW32_MASK_PSTATE); | ||
72 | 78 | ||
73 | /* For this source file, we want overflow handling. */ | 79 | /* For this source file, we want overflow handling. */ |
74 | 80 | ||
@@ -416,7 +422,7 @@ asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info) | |||
416 | mm_segment_t old_fs = get_fs (); | 422 | mm_segment_t old_fs = get_fs (); |
417 | 423 | ||
418 | set_fs (KERNEL_DS); | 424 | set_fs (KERNEL_DS); |
419 | ret = sys_sysinfo((struct sysinfo __user *) &s); | 425 | ret = sys_sysinfo((struct sysinfo __force __user *) &s); |
420 | set_fs (old_fs); | 426 | set_fs (old_fs); |
421 | err = put_user (s.uptime, &info->uptime); | 427 | err = put_user (s.uptime, &info->uptime); |
422 | err |= __put_user (s.loads[0], &info->loads[0]); | 428 | err |= __put_user (s.loads[0], &info->loads[0]); |
@@ -445,7 +451,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid, | |||
445 | mm_segment_t old_fs = get_fs (); | 451 | mm_segment_t old_fs = get_fs (); |
446 | 452 | ||
447 | set_fs (KERNEL_DS); | 453 | set_fs (KERNEL_DS); |
448 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); | 454 | ret = sys_sched_rr_get_interval(pid, |
455 | (struct timespec __force __user *) &t); | ||
449 | set_fs (old_fs); | 456 | set_fs (old_fs); |
450 | if (put_compat_timespec(&t, interval)) | 457 | if (put_compat_timespec(&t, interval)) |
451 | return -EFAULT; | 458 | return -EFAULT; |
@@ -472,8 +479,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
472 | } | 479 | } |
473 | set_fs (KERNEL_DS); | 480 | set_fs (KERNEL_DS); |
474 | ret = sys_rt_sigprocmask(how, | 481 | ret = sys_rt_sigprocmask(how, |
475 | set ? (sigset_t __user *) &s : NULL, | 482 | set ? (sigset_t __force __user *) &s : NULL, |
476 | oset ? (sigset_t __user *) &s : NULL, | 483 | oset ? (sigset_t __force __user *) &s : NULL, |
477 | sigsetsize); | 484 | sigsetsize); |
478 | set_fs (old_fs); | 485 | set_fs (old_fs); |
479 | if (ret) return ret; | 486 | if (ret) return ret; |
@@ -499,7 +506,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, | |||
499 | mm_segment_t old_fs = get_fs(); | 506 | mm_segment_t old_fs = get_fs(); |
500 | 507 | ||
501 | set_fs (KERNEL_DS); | 508 | set_fs (KERNEL_DS); |
502 | ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); | 509 | ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize); |
503 | set_fs (old_fs); | 510 | set_fs (old_fs); |
504 | if (!ret) { | 511 | if (!ret) { |
505 | switch (_NSIG_WORDS) { | 512 | switch (_NSIG_WORDS) { |
@@ -524,7 +531,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) | |||
524 | if (copy_siginfo_from_user32(&info, uinfo)) | 531 | if (copy_siginfo_from_user32(&info, uinfo)) |
525 | return -EFAULT; | 532 | return -EFAULT; |
526 | set_fs (KERNEL_DS); | 533 | set_fs (KERNEL_DS); |
527 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); | 534 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info); |
528 | set_fs (old_fs); | 535 | set_fs (old_fs); |
529 | return ret; | 536 | return ret; |
530 | } | 537 | } |
@@ -682,7 +689,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse | |||
682 | 689 | ||
683 | set_fs(KERNEL_DS); | 690 | set_fs(KERNEL_DS); |
684 | ret = sys_sendfile(out_fd, in_fd, | 691 | ret = sys_sendfile(out_fd, in_fd, |
685 | offset ? (off_t __user *) &of : NULL, count); | 692 | offset ? (off_t __force __user *) &of : NULL, count); |
686 | set_fs(old_fs); | 693 | set_fs(old_fs); |
687 | 694 | ||
688 | if (offset && put_user(of, offset)) | 695 | if (offset && put_user(of, offset)) |
@@ -703,7 +710,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd, | |||
703 | 710 | ||
704 | set_fs(KERNEL_DS); | 711 | set_fs(KERNEL_DS); |
705 | ret = sys_sendfile64(out_fd, in_fd, | 712 | ret = sys_sendfile64(out_fd, in_fd, |
706 | offset ? (loff_t __user *) &lof : NULL, count); | 713 | offset ? (loff_t __force __user *) &lof : NULL, |
714 | count); | ||
707 | set_fs(old_fs); | 715 | set_fs(old_fs); |
708 | 716 | ||
709 | if (offset && put_user(lof, offset)) | 717 | if (offset && put_user(lof, offset)) |
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index 1a18e29668ef..e89f8c0c42a0 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h | |||
@@ -115,37 +115,6 @@ typedef struct | |||
115 | __u32 addr; | 115 | __u32 addr; |
116 | } _psw_t32 __attribute__ ((aligned(8))); | 116 | } _psw_t32 __attribute__ ((aligned(8))); |
117 | 117 | ||
118 | #define PSW32_MASK_PER 0x40000000UL | ||
119 | #define PSW32_MASK_DAT 0x04000000UL | ||
120 | #define PSW32_MASK_IO 0x02000000UL | ||
121 | #define PSW32_MASK_EXT 0x01000000UL | ||
122 | #define PSW32_MASK_KEY 0x00F00000UL | ||
123 | #define PSW32_MASK_MCHECK 0x00040000UL | ||
124 | #define PSW32_MASK_WAIT 0x00020000UL | ||
125 | #define PSW32_MASK_PSTATE 0x00010000UL | ||
126 | #define PSW32_MASK_ASC 0x0000C000UL | ||
127 | #define PSW32_MASK_CC 0x00003000UL | ||
128 | #define PSW32_MASK_PM 0x00000f00UL | ||
129 | |||
130 | #define PSW32_ADDR_AMODE31 0x80000000UL | ||
131 | #define PSW32_ADDR_INSN 0x7FFFFFFFUL | ||
132 | |||
133 | #define PSW32_BASE_BITS 0x00080000UL | ||
134 | |||
135 | #define PSW32_ASC_PRIMARY 0x00000000UL | ||
136 | #define PSW32_ASC_ACCREG 0x00004000UL | ||
137 | #define PSW32_ASC_SECONDARY 0x00008000UL | ||
138 | #define PSW32_ASC_HOME 0x0000C000UL | ||
139 | |||
140 | #define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \ | ||
141 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \ | ||
142 | PSW32_MASK_PSTATE) | ||
143 | |||
144 | #define PSW32_MASK_MERGE(CURRENT,NEW) \ | ||
145 | (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \ | ||
146 | ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM))) | ||
147 | |||
148 | |||
149 | typedef struct | 118 | typedef struct |
150 | { | 119 | { |
151 | _psw_t32 psw; | 120 | _psw_t32 psw; |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 861888ab8c13..887a9881d0d0 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss, | |||
275 | } | 275 | } |
276 | 276 | ||
277 | set_fs (KERNEL_DS); | 277 | set_fs (KERNEL_DS); |
278 | ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL), | 278 | ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL), |
279 | (stack_t __user *) (uoss ? &koss : NULL), | 279 | (stack_t __force __user *) (uoss ? &koss : NULL), |
280 | regs->gprs[15]); | 280 | regs->gprs[15]); |
281 | set_fs (old_fs); | 281 | set_fs (old_fs); |
282 | 282 | ||
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) | |||
298 | _s390_regs_common32 regs32; | 298 | _s390_regs_common32 regs32; |
299 | int err, i; | 299 | int err, i; |
300 | 300 | ||
301 | regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS, | 301 | regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits, |
302 | (__u32)(regs->psw.mask >> 32)); | 302 | (__u32)(regs->psw.mask >> 32)); |
303 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; | 303 | regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr; |
304 | for (i = 0; i < NUM_GPRS; i++) | 304 | for (i = 0; i < NUM_GPRS; i++) |
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs) | |||
401 | goto badframe; | 401 | goto badframe; |
402 | 402 | ||
403 | set_fs (KERNEL_DS); | 403 | set_fs (KERNEL_DS); |
404 | do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]); | 404 | do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]); |
405 | set_fs (old_fs); | 405 | set_fs (old_fs); |
406 | 406 | ||
407 | return regs->gprs[2]; | 407 | return regs->gprs[2]; |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index a5972f1541fe..6c89f30c8e31 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
17 | #include <asm/cpcmd.h> | 17 | #include <asm/cpcmd.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/io.h> | ||
19 | 20 | ||
20 | static DEFINE_SPINLOCK(cpcmd_lock); | 21 | static DEFINE_SPINLOCK(cpcmd_lock); |
21 | static char cpcmd_buf[241]; | 22 | static char cpcmd_buf[241]; |
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
88 | int len; | 89 | int len; |
89 | unsigned long flags; | 90 | unsigned long flags; |
90 | 91 | ||
91 | if ((rlen == 0) || (response == NULL) | 92 | if ((virt_to_phys(response) != (unsigned long) response) || |
92 | || !((unsigned long)response >> 31)) { | 93 | (((unsigned long)response + rlen) >> 31)) { |
93 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
94 | len = __cpcmd(cmd, response, rlen, response_code); | ||
95 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
96 | } | ||
97 | else { | ||
98 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); | 94 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); |
99 | if (!lowbuf) { | 95 | if (!lowbuf) { |
100 | printk(KERN_WARNING | 96 | printk(KERN_WARNING |
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
106 | spin_unlock_irqrestore(&cpcmd_lock, flags); | 102 | spin_unlock_irqrestore(&cpcmd_lock, flags); |
107 | memcpy(response, lowbuf, rlen); | 103 | memcpy(response, lowbuf, rlen); |
108 | kfree(lowbuf); | 104 | kfree(lowbuf); |
105 | } else { | ||
106 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
107 | len = __cpcmd(cmd, response, rlen, response_code); | ||
108 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
109 | } | 109 | } |
110 | return len; | 110 | return len; |
111 | } | 111 | } |
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c index 926cceeae0fa..8cc7c9fa64f5 100644 --- a/arch/s390/kernel/crash.c +++ b/arch/s390/kernel/crash.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/threads.h> | 10 | #include <linux/threads.h> |
11 | #include <linux/kexec.h> | 11 | #include <linux/kexec.h> |
12 | #include <linux/reboot.h> | ||
12 | 13 | ||
13 | void machine_crash_shutdown(struct pt_regs *regs) | 14 | void machine_crash_shutdown(struct pt_regs *regs) |
14 | { | 15 | { |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index bb57bc0e3fc8..f4b62df02aa2 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = { | |||
120 | NULL | 120 | NULL |
121 | }; | 121 | }; |
122 | 122 | ||
123 | struct debug_view debug_level_view = { | 123 | static struct debug_view debug_level_view = { |
124 | "level", | 124 | "level", |
125 | &debug_prolog_level_fn, | 125 | &debug_prolog_level_fn, |
126 | NULL, | 126 | NULL, |
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = { | |||
129 | NULL | 129 | NULL |
130 | }; | 130 | }; |
131 | 131 | ||
132 | struct debug_view debug_pages_view = { | 132 | static struct debug_view debug_pages_view = { |
133 | "pages", | 133 | "pages", |
134 | &debug_prolog_pages_fn, | 134 | &debug_prolog_pages_fn, |
135 | NULL, | 135 | NULL, |
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = { | |||
138 | NULL | 138 | NULL |
139 | }; | 139 | }; |
140 | 140 | ||
141 | struct debug_view debug_flush_view = { | 141 | static struct debug_view debug_flush_view = { |
142 | "flush", | 142 | "flush", |
143 | NULL, | 143 | NULL, |
144 | NULL, | 144 | NULL, |
@@ -156,14 +156,14 @@ struct debug_view debug_sprintf_view = { | |||
156 | NULL | 156 | NULL |
157 | }; | 157 | }; |
158 | 158 | ||
159 | 159 | /* used by dump analysis tools to determine version of debug feature */ | |
160 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; | 160 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; |
161 | 161 | ||
162 | /* static globals */ | 162 | /* static globals */ |
163 | 163 | ||
164 | static debug_info_t *debug_area_first = NULL; | 164 | static debug_info_t *debug_area_first = NULL; |
165 | static debug_info_t *debug_area_last = NULL; | 165 | static debug_info_t *debug_area_last = NULL; |
166 | DECLARE_MUTEX(debug_lock); | 166 | static DECLARE_MUTEX(debug_lock); |
167 | 167 | ||
168 | static int initialized; | 168 | static int initialized; |
169 | 169 | ||
@@ -905,7 +905,7 @@ static struct ctl_table s390dbf_dir_table[] = { | |||
905 | { .ctl_name = 0 } | 905 | { .ctl_name = 0 } |
906 | }; | 906 | }; |
907 | 907 | ||
908 | struct ctl_table_header *s390dbf_sysctl_header; | 908 | static struct ctl_table_header *s390dbf_sysctl_header; |
909 | 909 | ||
910 | void | 910 | void |
911 | debug_stop_all(void) | 911 | debug_stop_all(void) |
@@ -1300,8 +1300,7 @@ out: | |||
1300 | * flushes debug areas | 1300 | * flushes debug areas |
1301 | */ | 1301 | */ |
1302 | 1302 | ||
1303 | void | 1303 | static void debug_flush(debug_info_t* id, int area) |
1304 | debug_flush(debug_info_t* id, int area) | ||
1305 | { | 1304 | { |
1306 | unsigned long flags; | 1305 | unsigned long flags; |
1307 | int i,j; | 1306 | int i,j; |
@@ -1511,8 +1510,7 @@ out: | |||
1511 | /* | 1510 | /* |
1512 | * clean up module | 1511 | * clean up module |
1513 | */ | 1512 | */ |
1514 | void | 1513 | static void __exit debug_exit(void) |
1515 | __exit debug_exit(void) | ||
1516 | { | 1514 | { |
1517 | debugfs_remove(debug_debugfs_root_entry); | 1515 | debugfs_remove(debug_debugfs_root_entry); |
1518 | unregister_sysctl_table(s390dbf_sysctl_header); | 1516 | unregister_sysctl_table(s390dbf_sysctl_header); |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c new file mode 100644 index 000000000000..e518dd53eff5 --- /dev/null +++ b/arch/s390/kernel/early.c | |||
@@ -0,0 +1,306 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/early.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * Author(s): Hongjie Yang <hongjie@us.ibm.com>, | ||
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/lockdep.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/pfn.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <asm/lowcore.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/sections.h> | ||
20 | #include <asm/setup.h> | ||
21 | #include <asm/cpcmd.h> | ||
22 | #include <asm/sclp.h> | ||
23 | |||
24 | /* | ||
25 | * Create a Kernel NSS if the SAVESYS= parameter is defined | ||
26 | */ | ||
27 | #define DEFSYS_CMD_SIZE 96 | ||
28 | #define SAVESYS_CMD_SIZE 32 | ||
29 | |||
30 | char kernel_nss_name[NSS_NAME_SIZE + 1]; | ||
31 | |||
32 | #ifdef CONFIG_SHARED_KERNEL | ||
33 | static noinline __init void create_kernel_nss(void) | ||
34 | { | ||
35 | unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; | ||
36 | #ifdef CONFIG_BLK_DEV_INITRD | ||
37 | unsigned int sinitrd_pfn, einitrd_pfn; | ||
38 | #endif | ||
39 | int response; | ||
40 | char *savesys_ptr; | ||
41 | char upper_command_line[COMMAND_LINE_SIZE]; | ||
42 | char defsys_cmd[DEFSYS_CMD_SIZE]; | ||
43 | char savesys_cmd[SAVESYS_CMD_SIZE]; | ||
44 | |||
45 | /* Do nothing if we are not running under VM */ | ||
46 | if (!MACHINE_IS_VM) | ||
47 | return; | ||
48 | |||
49 | /* Convert COMMAND_LINE to upper case */ | ||
50 | for (i = 0; i < strlen(COMMAND_LINE); i++) | ||
51 | upper_command_line[i] = toupper(COMMAND_LINE[i]); | ||
52 | |||
53 | savesys_ptr = strstr(upper_command_line, "SAVESYS="); | ||
54 | |||
55 | if (!savesys_ptr) | ||
56 | return; | ||
57 | |||
58 | savesys_ptr += 8; /* Point to the beginning of the NSS name */ | ||
59 | for (i = 0; i < NSS_NAME_SIZE; i++) { | ||
60 | if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0') | ||
61 | break; | ||
62 | kernel_nss_name[i] = savesys_ptr[i]; | ||
63 | } | ||
64 | |||
65 | stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
66 | eshared_pfn = PFN_DOWN(__pa(&_eshared)); | ||
67 | end_pfn = PFN_UP(__pa(&_end)); | ||
68 | min_size = end_pfn << 2; | ||
69 | |||
70 | sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X", | ||
71 | kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1, | ||
72 | eshared_pfn, end_pfn); | ||
73 | |||
74 | #ifdef CONFIG_BLK_DEV_INITRD | ||
75 | if (INITRD_START && INITRD_SIZE) { | ||
76 | sinitrd_pfn = PFN_DOWN(__pa(INITRD_START)); | ||
77 | einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE)); | ||
78 | min_size = einitrd_pfn << 2; | ||
79 | sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd, | ||
80 | sinitrd_pfn, einitrd_pfn); | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); | ||
85 | sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", | ||
86 | kernel_nss_name, kernel_nss_name); | ||
87 | |||
88 | __cpcmd(defsys_cmd, NULL, 0, &response); | ||
89 | |||
90 | if (response != 0) | ||
91 | return; | ||
92 | |||
93 | __cpcmd(savesys_cmd, NULL, 0, &response); | ||
94 | |||
95 | if (response != strlen(savesys_cmd)) | ||
96 | return; | ||
97 | |||
98 | ipl_flags = IPL_NSS_VALID; | ||
99 | } | ||
100 | |||
101 | #else /* CONFIG_SHARED_KERNEL */ | ||
102 | |||
103 | static inline void create_kernel_nss(void) { } | ||
104 | |||
105 | #endif /* CONFIG_SHARED_KERNEL */ | ||
106 | |||
107 | /* | ||
108 | * Clear bss memory | ||
109 | */ | ||
110 | static noinline __init void clear_bss_section(void) | ||
111 | { | ||
112 | memset(__bss_start, 0, _end - __bss_start); | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Initialize storage key for kernel pages | ||
117 | */ | ||
118 | static noinline __init void init_kernel_storage_key(void) | ||
119 | { | ||
120 | unsigned long end_pfn, init_pfn; | ||
121 | |||
122 | end_pfn = PFN_UP(__pa(&_end)); | ||
123 | |||
124 | for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++) | ||
125 | page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); | ||
126 | } | ||
127 | |||
128 | static noinline __init void detect_machine_type(void) | ||
129 | { | ||
130 | struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; | ||
131 | |||
132 | asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id)); | ||
133 | |||
134 | /* Running under z/VM ? */ | ||
135 | if (cpuinfo->cpu_id.version == 0xff) | ||
136 | machine_flags |= 1; | ||
137 | |||
138 | /* Running on a P/390 ? */ | ||
139 | if (cpuinfo->cpu_id.machine == 0x7490) | ||
140 | machine_flags |= 4; | ||
141 | } | ||
142 | |||
143 | static noinline __init int memory_fast_detect(void) | ||
144 | { | ||
145 | |||
146 | unsigned long val0 = 0; | ||
147 | unsigned long val1 = 0xc; | ||
148 | int ret = -ENOSYS; | ||
149 | |||
150 | if (ipl_flags & IPL_NSS_VALID) | ||
151 | return -ENOSYS; | ||
152 | |||
153 | asm volatile( | ||
154 | " diag %1,%2,0x260\n" | ||
155 | "0: lhi %0,0\n" | ||
156 | "1:\n" | ||
157 | EX_TABLE(0b,1b) | ||
158 | : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc"); | ||
159 | |||
160 | if (ret || val0 != val1) | ||
161 | return -ENOSYS; | ||
162 | |||
163 | memory_chunk[0].size = val0; | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | #define ADDR2G (1UL << 31) | ||
168 | |||
169 | static noinline __init unsigned long sclp_memory_detect(void) | ||
170 | { | ||
171 | struct sclp_readinfo_sccb *sccb; | ||
172 | unsigned long long memsize; | ||
173 | |||
174 | sccb = &s390_readinfo_sccb; | ||
175 | |||
176 | if (sccb->header.response_code != 0x10) | ||
177 | return 0; | ||
178 | |||
179 | if (sccb->rnsize) | ||
180 | memsize = sccb->rnsize << 20; | ||
181 | else | ||
182 | memsize = sccb->rnsize2 << 20; | ||
183 | if (sccb->rnmax) | ||
184 | memsize *= sccb->rnmax; | ||
185 | else | ||
186 | memsize *= sccb->rnmax2; | ||
187 | #ifndef CONFIG_64BIT | ||
188 | /* | ||
189 | * Can't deal with more than 2G in 31 bit addressing mode, so | ||
190 | * limit the value in order to avoid strange side effects. | ||
191 | */ | ||
192 | if (memsize > ADDR2G) | ||
193 | memsize = ADDR2G; | ||
194 | #endif | ||
195 | return (unsigned long) memsize; | ||
196 | } | ||
197 | |||
198 | static inline __init unsigned long __tprot(unsigned long addr) | ||
199 | { | ||
200 | int cc = -1; | ||
201 | |||
202 | asm volatile( | ||
203 | " tprot 0(%1),0\n" | ||
204 | "0: ipm %0\n" | ||
205 | " srl %0,28\n" | ||
206 | "1:\n" | ||
207 | EX_TABLE(0b,1b) | ||
208 | : "+d" (cc) : "a" (addr) : "cc"); | ||
209 | return (unsigned long)cc; | ||
210 | } | ||
211 | |||
212 | /* Checking memory in 128KB increments. */ | ||
213 | #define CHUNK_INCR (1UL << 17) | ||
214 | |||
215 | static noinline __init void find_memory_chunks(unsigned long memsize) | ||
216 | { | ||
217 | unsigned long addr = 0, old_addr = 0; | ||
218 | unsigned long old_cc = CHUNK_READ_WRITE; | ||
219 | unsigned long cc; | ||
220 | int chunk = 0; | ||
221 | |||
222 | while (chunk < MEMORY_CHUNKS) { | ||
223 | cc = __tprot(addr); | ||
224 | while (cc == old_cc) { | ||
225 | addr += CHUNK_INCR; | ||
226 | cc = __tprot(addr); | ||
227 | #ifndef CONFIG_64BIT | ||
228 | if (addr == ADDR2G) | ||
229 | break; | ||
230 | #endif | ||
231 | } | ||
232 | |||
233 | if (old_addr != addr && | ||
234 | (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) { | ||
235 | memory_chunk[chunk].addr = old_addr; | ||
236 | memory_chunk[chunk].size = addr - old_addr; | ||
237 | memory_chunk[chunk].type = old_cc; | ||
238 | chunk++; | ||
239 | } | ||
240 | |||
241 | old_addr = addr; | ||
242 | old_cc = cc; | ||
243 | |||
244 | #ifndef CONFIG_64BIT | ||
245 | if (addr == ADDR2G) | ||
246 | break; | ||
247 | #endif | ||
248 | /* | ||
249 | * Finish memory detection at the first hole, unless | ||
250 | * - we reached the hsa -> skip it. | ||
251 | * - we know there must be more. | ||
252 | */ | ||
253 | if (cc == -1UL && !memsize && old_addr != ADDR2G) | ||
254 | break; | ||
255 | if (memsize && addr >= memsize) | ||
256 | break; | ||
257 | } | ||
258 | } | ||
259 | |||
260 | static __init void early_pgm_check_handler(void) | ||
261 | { | ||
262 | unsigned long addr; | ||
263 | const struct exception_table_entry *fixup; | ||
264 | |||
265 | addr = S390_lowcore.program_old_psw.addr; | ||
266 | fixup = search_exception_tables(addr & PSW_ADDR_INSN); | ||
267 | if (!fixup) | ||
268 | disabled_wait(0); | ||
269 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; | ||
270 | } | ||
271 | |||
272 | static noinline __init void setup_lowcore_early(void) | ||
273 | { | ||
274 | psw_t psw; | ||
275 | |||
276 | psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | ||
277 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; | ||
278 | S390_lowcore.external_new_psw = psw; | ||
279 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | ||
280 | S390_lowcore.program_new_psw = psw; | ||
281 | s390_base_pgm_handler_fn = early_pgm_check_handler; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * Save ipl parameters, clear bss memory, initialize storage keys | ||
286 | * and create a kernel NSS at startup if the SAVESYS= parm is defined | ||
287 | */ | ||
288 | void __init startup_init(void) | ||
289 | { | ||
290 | unsigned long memsize; | ||
291 | |||
292 | ipl_save_parameters(); | ||
293 | clear_bss_section(); | ||
294 | init_kernel_storage_key(); | ||
295 | lockdep_init(); | ||
296 | lockdep_off(); | ||
297 | detect_machine_type(); | ||
298 | create_kernel_nss(); | ||
299 | sort_main_extable(); | ||
300 | setup_lowcore_early(); | ||
301 | sclp_readinfo_early(); | ||
302 | memsize = sclp_memory_detect(); | ||
303 | if (memory_fast_detect() < 0) | ||
304 | find_memory_chunks(memsize); | ||
305 | lockdep_on(); | ||
306 | } | ||
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c index bb0f973137f0..cc0dc609d738 100644 --- a/arch/s390/kernel/ebcdic.c +++ b/arch/s390/kernel/ebcdic.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <asm/types.h> | 13 | #include <asm/types.h> |
14 | #include <asm/ebcdic.h> | ||
14 | 15 | ||
15 | /* | 16 | /* |
16 | * ASCII (IBM PC 437) -> EBCDIC 037 | 17 | * ASCII (IBM PC 437) -> EBCDIC 037 |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index eca507050e47..453fd3b4edea 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -51,176 +51,15 @@ startup_continue: | |||
51 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 51 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
52 | ahi %r15,-96 | 52 | ahi %r15,-96 |
53 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | 53 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain |
54 | |||
55 | l %r14,.Lipl_save_parameters-.LPG1(%r13) | ||
56 | basr %r14,%r14 | ||
57 | # | 54 | # |
58 | # clear bss memory | 55 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
56 | # and create a kernel NSS if the SAVESYS= parm is defined | ||
59 | # | 57 | # |
60 | l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss | 58 | l %r14,.Lstartup_init-.LPG1(%r13) |
61 | l %r3,.Lbss_end-.LPG1(%r13) # end of bss | 59 | basr %r14,%r14 |
62 | sr %r3,%r2 # length of bss | ||
63 | sr %r4,%r4 | ||
64 | sr %r5,%r5 # set src,length and pad to zero | ||
65 | sr %r0,%r0 | ||
66 | mvcle %r2,%r4,0 # clear mem | ||
67 | jo .-4 # branch back, if not finish | ||
68 | |||
69 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | ||
70 | .Lservicecall: | ||
71 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | ||
72 | |||
73 | stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0 | ||
74 | la %r1,0x200 # set bit 22 | ||
75 | o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1 | ||
76 | st %r1,.Lcr-.LPG1(%r13) | ||
77 | lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0 | ||
78 | |||
79 | mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw | ||
80 | la %r1, .Lsclph-.LPG1(%r13) | ||
81 | a %r1,__LC_EXT_NEW_PSW+4 # set handler | ||
82 | st %r1,__LC_EXT_NEW_PSW+4 | ||
83 | |||
84 | l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff | ||
85 | lr %r1,%r4 # our sccb | ||
86 | .insn rre,0xb2200000,%r2,%r1 # service call | ||
87 | ipm %r1 | ||
88 | srl %r1,28 # get cc code | ||
89 | xr %r3, %r3 | ||
90 | chi %r1,3 | ||
91 | be .Lfchunk-.LPG1(%r13) # leave | ||
92 | chi %r1,2 | ||
93 | be .Lservicecall-.LPG1(%r13) | ||
94 | lpsw .Lwaitsclp-.LPG1(%r13) | ||
95 | .Lsclph: | ||
96 | lh %r1,.Lsccbr-.Lsccb(%r4) | ||
97 | chi %r1,0x10 # 0x0010 is the sucess code | ||
98 | je .Lprocsccb # let's process the sccb | ||
99 | chi %r1,0x1f0 | ||
100 | bne .Lfchunk-.LPG1(%r13) # unhandled error code | ||
101 | c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced | ||
102 | bne .Lfchunk-.LPG1(%r13) # if no, give up | ||
103 | l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP | ||
104 | b .Lservicecall-.LPG1(%r13) | ||
105 | .Lprocsccb: | ||
106 | lhi %r1,0 | ||
107 | icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0 | ||
108 | jnz .Lscnd | ||
109 | lhi %r1,0x800 # otherwise report 2GB | ||
110 | .Lscnd: | ||
111 | lhi %r3,0x800 # limit reported memory size to 2GB | ||
112 | cr %r1,%r3 | ||
113 | jl .Lno2gb | ||
114 | lr %r1,%r3 | ||
115 | .Lno2gb: | ||
116 | xr %r3,%r3 # same logic | ||
117 | ic %r3,.Lscpa1-.Lsccb(%r4) | ||
118 | chi %r3,0x00 | ||
119 | jne .Lcompmem | ||
120 | l %r3,.Lscpa2-.Lsccb(%r4) | ||
121 | .Lcompmem: | ||
122 | mr %r2,%r1 # mem in MB on 128-bit | ||
123 | l %r1,.Lonemb-.LPG1(%r13) | ||
124 | mr %r2,%r1 # mem size in bytes in %r3 | ||
125 | b .Lfchunk-.LPG1(%r13) | ||
126 | |||
127 | .align 4 | ||
128 | .Lipl_save_parameters: | ||
129 | .long ipl_save_parameters | ||
130 | .Linittu: | ||
131 | .long init_thread_union | ||
132 | .Lpmask: | ||
133 | .byte 0 | ||
134 | .align 8 | ||
135 | .Lpcext:.long 0x00080000,0x80000000 | ||
136 | .Lcr: | ||
137 | .long 0x00 # place holder for cr0 | ||
138 | .align 8 | ||
139 | .Lwaitsclp: | ||
140 | .long 0x010a0000,0x80000000 + .Lsclph | ||
141 | .Lrcp: | ||
142 | .int 0x00120001 # Read SCP forced code | ||
143 | .Lrcp2: | ||
144 | .int 0x00020001 # Read SCP code | ||
145 | .Lonemb: | ||
146 | .int 0x100000 | ||
147 | .Lfchunk: | ||
148 | 60 | ||
149 | # | ||
150 | # find memory chunks. | ||
151 | # | ||
152 | lr %r9,%r3 # end of mem | ||
153 | mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13) | ||
154 | la %r1,1 # test in increments of 128KB | ||
155 | sll %r1,17 | ||
156 | l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array | ||
157 | slr %r4,%r4 # set start of chunk to zero | ||
158 | slr %r5,%r5 # set end of chunk to zero | ||
159 | slr %r6,%r6 # set access code to zero | ||
160 | la %r10,MEMORY_CHUNKS # number of chunks | ||
161 | .Lloop: | ||
162 | tprot 0(%r5),0 # test protection of first byte | ||
163 | ipm %r7 | ||
164 | srl %r7,28 | ||
165 | clr %r6,%r7 # compare cc with last access code | ||
166 | be .Lsame-.LPG1(%r13) | ||
167 | lhi %r8,0 # no program checks | ||
168 | b .Lsavchk-.LPG1(%r13) | ||
169 | .Lsame: | ||
170 | ar %r5,%r1 # add 128KB to end of chunk | ||
171 | bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop | ||
172 | .Lchkmem: # > 2GB or tprot got a program check | ||
173 | lhi %r8,1 # set program check flag | ||
174 | .Lsavchk: | ||
175 | clr %r4,%r5 # chunk size > 0? | ||
176 | be .Lchkloop-.LPG1(%r13) | ||
177 | st %r4,0(%r3) # store start address of chunk | ||
178 | lr %r0,%r5 | ||
179 | slr %r0,%r4 | ||
180 | st %r0,4(%r3) # store size of chunk | ||
181 | st %r6,8(%r3) # store type of chunk | ||
182 | la %r3,12(%r3) | ||
183 | ahi %r10,-1 # update chunk number | ||
184 | .Lchkloop: | ||
185 | lr %r6,%r7 # set access code to last cc | ||
186 | # we got an exception or we're starting a new | ||
187 | # chunk , we must check if we should | ||
188 | # still try to find valid memory (if we detected | ||
189 | # the amount of available storage), and if we | ||
190 | # have chunks left | ||
191 | xr %r0,%r0 | ||
192 | clr %r0,%r9 # did we detect memory? | ||
193 | je .Ldonemem # if not, leave | ||
194 | chi %r10,0 # do we have chunks left? | ||
195 | je .Ldonemem | ||
196 | chi %r8,1 # program check ? | ||
197 | je .Lpgmchk | ||
198 | lr %r4,%r5 # potential new chunk | ||
199 | alr %r5,%r1 # add 128KB to end of chunk | ||
200 | j .Llpcnt | ||
201 | .Lpgmchk: | ||
202 | alr %r5,%r1 # add 128KB to end of chunk | ||
203 | lr %r4,%r5 # potential new chunk | ||
204 | .Llpcnt: | ||
205 | clr %r5,%r9 # should we go on? | ||
206 | jl .Lloop | ||
207 | .Ldonemem: | ||
208 | l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags | 61 | l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags |
209 | # | 62 | # |
210 | # find out if we are running under VM | ||
211 | # | ||
212 | stidp __LC_CPUID # store cpuid | ||
213 | tm __LC_CPUID,0xff # running under VM ? | ||
214 | bno .Lnovm-.LPG1(%r13) | ||
215 | oi 3(%r12),1 # set VM flag | ||
216 | .Lnovm: | ||
217 | lh %r0,__LC_CPUID+4 # get cpu version | ||
218 | chi %r0,0x7490 # running on a P/390 ? | ||
219 | bne .Lnop390-.LPG1(%r13) | ||
220 | oi 3(%r12),4 # set P/390 flag | ||
221 | .Lnop390: | ||
222 | |||
223 | # | ||
224 | # find out if we have an IEEE fpu | 63 | # find out if we have an IEEE fpu |
225 | # | 64 | # |
226 | mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) | 65 | mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13) |
@@ -295,7 +134,6 @@ startup_continue: | |||
295 | .long 0 # cr15: linkage stack operations | 134 | .long 0 # cr15: linkage stack operations |
296 | .Lduct: .long 0,0,0,0,0,0,0,0 | 135 | .Lduct: .long 0,0,0,0,0,0,0,0 |
297 | .long 0,0,0,0,0,0,0,0 | 136 | .long 0,0,0,0,0,0,0,0 |
298 | .Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem | ||
299 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu | 137 | .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu |
300 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp | 138 | .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp |
301 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg | 139 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg |
@@ -306,7 +144,9 @@ startup_continue: | |||
306 | .Lbss_bgn: .long __bss_start | 144 | .Lbss_bgn: .long __bss_start |
307 | .Lbss_end: .long _end | 145 | .Lbss_end: .long _end |
308 | .Lparmaddr: .long PARMAREA | 146 | .Lparmaddr: .long PARMAREA |
309 | .Lsccbaddr: .long .Lsccb | 147 | .Linittu: .long init_thread_union |
148 | .Lstartup_init: | ||
149 | .long startup_init | ||
310 | 150 | ||
311 | .globl ipl_schib | 151 | .globl ipl_schib |
312 | ipl_schib: | 152 | ipl_schib: |
@@ -322,26 +162,6 @@ ipl_devno: | |||
322 | .word 0 | 162 | .word 0 |
323 | 163 | ||
324 | .org 0x12000 | 164 | .org 0x12000 |
325 | .globl s390_readinfo_sccb | ||
326 | s390_readinfo_sccb: | ||
327 | .Lsccb: | ||
328 | .hword 0x1000 # length, one page | ||
329 | .byte 0x00,0x00,0x00 | ||
330 | .byte 0x80 # variable response bit set | ||
331 | .Lsccbr: | ||
332 | .hword 0x00 # response code | ||
333 | .Lscpincr1: | ||
334 | .hword 0x00 | ||
335 | .Lscpa1: | ||
336 | .byte 0x00 | ||
337 | .fill 89,1,0 | ||
338 | .Lscpa2: | ||
339 | .int 0x00 | ||
340 | .Lscpincr2: | ||
341 | .quad 0x00 | ||
342 | .fill 3984,1,0 | ||
343 | .org 0x13000 | ||
344 | |||
345 | #ifdef CONFIG_SHARED_KERNEL | 165 | #ifdef CONFIG_SHARED_KERNEL |
346 | .org 0x100000 | 166 | .org 0x100000 |
347 | #endif | 167 | #endif |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 6ba3f4512dd1..b8fec4e5c5d4 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -58,183 +58,15 @@ startup_continue: | |||
58 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack | 58 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack |
59 | aghi %r15,-160 | 59 | aghi %r15,-160 |
60 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | 60 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain |
61 | |||
62 | brasl %r14,ipl_save_parameters | ||
63 | # | 61 | # |
64 | # clear bss memory | 62 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
63 | # and create a kernel NSS if the SAVESYS= parm is defined | ||
65 | # | 64 | # |
66 | larl %r2,__bss_start # start of bss segment | 65 | brasl %r14,startup_init |
67 | larl %r3,_end # end of bss segment | ||
68 | sgr %r3,%r2 # length of bss | ||
69 | sgr %r4,%r4 # | ||
70 | sgr %r5,%r5 # set src,length and pad to zero | ||
71 | mvcle %r2,%r4,0 # clear mem | ||
72 | jo .-4 # branch back, if not finish | ||
73 | # set program check new psw mask | 66 | # set program check new psw mask |
74 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) | 67 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) |
75 | larl %r1,.Lslowmemdetect # set program check address | ||
76 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
77 | lghi %r1,0xc | ||
78 | diag %r0,%r1,0x260 # get memory size of virtual machine | ||
79 | cgr %r0,%r1 # different? -> old detection routine | ||
80 | jne .Lslowmemdetect | ||
81 | aghi %r1,1 # size is one more than end | ||
82 | larl %r2,memory_chunk | ||
83 | stg %r1,8(%r2) # store size of chunk | ||
84 | j .Ldonemem | ||
85 | |||
86 | .Lslowmemdetect: | ||
87 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | ||
88 | .Lservicecall: | ||
89 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | ||
90 | |||
91 | stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0 | ||
92 | la %r1,0x200 # set bit 22 | ||
93 | og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1 | ||
94 | stg %r1,.Lcr-.LPG1(%r13) | ||
95 | lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0 | ||
96 | |||
97 | mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw | ||
98 | larl %r1,.Lsclph | ||
99 | stg %r1,__LC_EXT_NEW_PSW+8 # set handler | ||
100 | |||
101 | larl %r4,.Lsccb # %r4 is our index for sccb stuff | ||
102 | lgr %r1,%r4 # our sccb | ||
103 | .insn rre,0xb2200000,%r2,%r1 # service call | ||
104 | ipm %r1 | ||
105 | srl %r1,28 # get cc code | ||
106 | xr %r3,%r3 | ||
107 | chi %r1,3 | ||
108 | be .Lfchunk-.LPG1(%r13) # leave | ||
109 | chi %r1,2 | ||
110 | be .Lservicecall-.LPG1(%r13) | ||
111 | lpswe .Lwaitsclp-.LPG1(%r13) | ||
112 | .Lsclph: | ||
113 | lh %r1,.Lsccbr-.Lsccb(%r4) | ||
114 | chi %r1,0x10 # 0x0010 is the sucess code | ||
115 | je .Lprocsccb # let's process the sccb | ||
116 | chi %r1,0x1f0 | ||
117 | bne .Lfchunk-.LPG1(%r13) # unhandled error code | ||
118 | c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced | ||
119 | bne .Lfchunk-.LPG1(%r13) # if no, give up | ||
120 | l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP | ||
121 | b .Lservicecall-.LPG1(%r13) | ||
122 | .Lprocsccb: | ||
123 | lghi %r1,0 | ||
124 | icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0 | ||
125 | jnz .Lscnd | ||
126 | lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one | ||
127 | .Lscnd: | ||
128 | xr %r3,%r3 # same logic | ||
129 | ic %r3,.Lscpa1-.Lsccb(%r4) | ||
130 | chi %r3,0x00 | ||
131 | jne .Lcompmem | ||
132 | l %r3,.Lscpa2-.Lsccb(%r4) | ||
133 | .Lcompmem: | ||
134 | mlgr %r2,%r1 # mem in MB on 128-bit | ||
135 | l %r1,.Lonemb-.LPG1(%r13) | ||
136 | mlgr %r2,%r1 # mem size in bytes in %r3 | ||
137 | b .Lfchunk-.LPG1(%r13) | ||
138 | |||
139 | .align 4 | ||
140 | .Lpmask: | ||
141 | .byte 0 | ||
142 | .align 8 | ||
143 | .Lcr: | ||
144 | .quad 0x00 # place holder for cr0 | ||
145 | .Lwaitsclp: | ||
146 | .quad 0x0102000180000000,.Lsclph | ||
147 | .Lrcp: | ||
148 | .int 0x00120001 # Read SCP forced code | ||
149 | .Lrcp2: | ||
150 | .int 0x00020001 # Read SCP code | ||
151 | .Lonemb: | ||
152 | .int 0x100000 | ||
153 | |||
154 | .Lfchunk: | ||
155 | |||
156 | # | ||
157 | # find memory chunks. | ||
158 | # | ||
159 | lgr %r9,%r3 # end of mem | ||
160 | larl %r1,.Lchkmem # set program check address | ||
161 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
162 | la %r1,1 # test in increments of 128KB | ||
163 | sllg %r1,%r1,17 | ||
164 | larl %r3,memory_chunk | ||
165 | slgr %r4,%r4 # set start of chunk to zero | ||
166 | slgr %r5,%r5 # set end of chunk to zero | ||
167 | slr %r6,%r6 # set access code to zero | ||
168 | la %r10,MEMORY_CHUNKS # number of chunks | ||
169 | .Lloop: | ||
170 | tprot 0(%r5),0 # test protection of first byte | ||
171 | ipm %r7 | ||
172 | srl %r7,28 | ||
173 | clr %r6,%r7 # compare cc with last access code | ||
174 | je .Lsame | ||
175 | lghi %r8,0 # no program checks | ||
176 | j .Lsavchk | ||
177 | .Lsame: | ||
178 | algr %r5,%r1 # add 128KB to end of chunk | ||
179 | # no need to check here, | ||
180 | brc 12,.Lloop # this is the same chunk | ||
181 | .Lchkmem: # > 16EB or tprot got a program check | ||
182 | lghi %r8,1 # set program check flag | ||
183 | .Lsavchk: | ||
184 | clgr %r4,%r5 # chunk size > 0? | ||
185 | je .Lchkloop | ||
186 | stg %r4,0(%r3) # store start address of chunk | ||
187 | lgr %r0,%r5 | ||
188 | slgr %r0,%r4 | ||
189 | stg %r0,8(%r3) # store size of chunk | ||
190 | st %r6,20(%r3) # store type of chunk | ||
191 | la %r3,24(%r3) | ||
192 | ahi %r10,-1 # update chunk number | ||
193 | .Lchkloop: | ||
194 | lr %r6,%r7 # set access code to last cc | ||
195 | # we got an exception or we're starting a new | ||
196 | # chunk , we must check if we should | ||
197 | # still try to find valid memory (if we detected | ||
198 | # the amount of available storage), and if we | ||
199 | # have chunks left | ||
200 | lghi %r4,1 | ||
201 | sllg %r4,%r4,31 | ||
202 | clgr %r5,%r4 | ||
203 | je .Lhsaskip | ||
204 | xr %r0, %r0 | ||
205 | clgr %r0, %r9 # did we detect memory? | ||
206 | je .Ldonemem # if not, leave | ||
207 | chi %r10, 0 # do we have chunks left? | ||
208 | je .Ldonemem | ||
209 | .Lhsaskip: | ||
210 | chi %r8,1 # program check ? | ||
211 | je .Lpgmchk | ||
212 | lgr %r4,%r5 # potential new chunk | ||
213 | algr %r5,%r1 # add 128KB to end of chunk | ||
214 | j .Llpcnt | ||
215 | .Lpgmchk: | ||
216 | algr %r5,%r1 # add 128KB to end of chunk | ||
217 | lgr %r4,%r5 # potential new chunk | ||
218 | .Llpcnt: | ||
219 | clgr %r5,%r9 # should we go on? | ||
220 | jl .Lloop | ||
221 | .Ldonemem: | ||
222 | |||
223 | larl %r12,machine_flags | 68 | larl %r12,machine_flags |
224 | # | 69 | # |
225 | # find out if we are running under VM | ||
226 | # | ||
227 | stidp __LC_CPUID # store cpuid | ||
228 | tm __LC_CPUID,0xff # running under VM ? | ||
229 | bno 0f-.LPG1(%r13) | ||
230 | oi 7(%r12),1 # set VM flag | ||
231 | 0: lh %r0,__LC_CPUID+4 # get cpu version | ||
232 | chi %r0,0x7490 # running on a P/390 ? | ||
233 | bne 1f-.LPG1(%r13) | ||
234 | oi 7(%r12),4 # set P/390 flag | ||
235 | 1: | ||
236 | |||
237 | # | ||
238 | # find out if we have the MVPG instruction | 70 | # find out if we have the MVPG instruction |
239 | # | 71 | # |
240 | la %r1,0f-.LPG1(%r13) # set program check address | 72 | la %r1,0f-.LPG1(%r13) # set program check address |
@@ -336,25 +168,6 @@ ipl_devno: | |||
336 | .word 0 | 168 | .word 0 |
337 | 169 | ||
338 | .org 0x12000 | 170 | .org 0x12000 |
339 | .globl s390_readinfo_sccb | ||
340 | s390_readinfo_sccb: | ||
341 | .Lsccb: | ||
342 | .hword 0x1000 # length, one page | ||
343 | .byte 0x00,0x00,0x00 | ||
344 | .byte 0x80 # variable response bit set | ||
345 | .Lsccbr: | ||
346 | .hword 0x00 # response code | ||
347 | .Lscpincr1: | ||
348 | .hword 0x00 | ||
349 | .Lscpa1: | ||
350 | .byte 0x00 | ||
351 | .fill 89,1,0 | ||
352 | .Lscpa2: | ||
353 | .int 0x00 | ||
354 | .Lscpincr2: | ||
355 | .quad 0x00 | ||
356 | .fill 3984,1,0 | ||
357 | .org 0x13000 | ||
358 | 171 | ||
359 | #ifdef CONFIG_SHARED_KERNEL | 172 | #ifdef CONFIG_SHARED_KERNEL |
360 | .org 0x100000 | 173 | .org 0x100000 |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 9e9972e8a52b..052259530651 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -20,26 +20,27 @@ | |||
20 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
21 | #include <asm/ebcdic.h> | 21 | #include <asm/ebcdic.h> |
22 | #include <asm/reset.h> | 22 | #include <asm/reset.h> |
23 | #include <asm/sclp.h> | ||
23 | 24 | ||
24 | #define IPL_PARM_BLOCK_VERSION 0 | 25 | #define IPL_PARM_BLOCK_VERSION 0 |
25 | #define LOADPARM_LEN 8 | ||
26 | 26 | ||
27 | extern char s390_readinfo_sccb[]; | 27 | #define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10) |
28 | #define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) | 28 | #define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm) |
29 | #define SCCB_LOADPARM (&s390_readinfo_sccb[24]) | 29 | #define SCCB_FLAG (s390_readinfo_sccb.flags) |
30 | #define SCCB_FLAG (s390_readinfo_sccb[91]) | ||
31 | 30 | ||
32 | enum ipl_type { | 31 | enum ipl_type { |
33 | IPL_TYPE_NONE = 1, | 32 | IPL_TYPE_NONE = 1, |
34 | IPL_TYPE_UNKNOWN = 2, | 33 | IPL_TYPE_UNKNOWN = 2, |
35 | IPL_TYPE_CCW = 4, | 34 | IPL_TYPE_CCW = 4, |
36 | IPL_TYPE_FCP = 8, | 35 | IPL_TYPE_FCP = 8, |
36 | IPL_TYPE_NSS = 16, | ||
37 | }; | 37 | }; |
38 | 38 | ||
39 | #define IPL_NONE_STR "none" | 39 | #define IPL_NONE_STR "none" |
40 | #define IPL_UNKNOWN_STR "unknown" | 40 | #define IPL_UNKNOWN_STR "unknown" |
41 | #define IPL_CCW_STR "ccw" | 41 | #define IPL_CCW_STR "ccw" |
42 | #define IPL_FCP_STR "fcp" | 42 | #define IPL_FCP_STR "fcp" |
43 | #define IPL_NSS_STR "nss" | ||
43 | 44 | ||
44 | static char *ipl_type_str(enum ipl_type type) | 45 | static char *ipl_type_str(enum ipl_type type) |
45 | { | 46 | { |
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type) | |||
50 | return IPL_CCW_STR; | 51 | return IPL_CCW_STR; |
51 | case IPL_TYPE_FCP: | 52 | case IPL_TYPE_FCP: |
52 | return IPL_FCP_STR; | 53 | return IPL_FCP_STR; |
54 | case IPL_TYPE_NSS: | ||
55 | return IPL_NSS_STR; | ||
53 | case IPL_TYPE_UNKNOWN: | 56 | case IPL_TYPE_UNKNOWN: |
54 | default: | 57 | default: |
55 | return IPL_UNKNOWN_STR; | 58 | return IPL_UNKNOWN_STR; |
@@ -64,6 +67,7 @@ enum ipl_method { | |||
64 | IPL_METHOD_FCP_RO_DIAG, | 67 | IPL_METHOD_FCP_RO_DIAG, |
65 | IPL_METHOD_FCP_RW_DIAG, | 68 | IPL_METHOD_FCP_RW_DIAG, |
66 | IPL_METHOD_FCP_RO_VM, | 69 | IPL_METHOD_FCP_RO_VM, |
70 | IPL_METHOD_NSS, | ||
67 | }; | 71 | }; |
68 | 72 | ||
69 | enum shutdown_action { | 73 | enum shutdown_action { |
@@ -114,11 +118,14 @@ enum diag308_rc { | |||
114 | static int diag308_set_works = 0; | 118 | static int diag308_set_works = 0; |
115 | 119 | ||
116 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; | 120 | static int reipl_capabilities = IPL_TYPE_UNKNOWN; |
121 | |||
117 | static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; | 122 | static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; |
118 | static enum ipl_method reipl_method = IPL_METHOD_NONE; | 123 | static enum ipl_method reipl_method = IPL_METHOD_NONE; |
119 | static struct ipl_parameter_block *reipl_block_fcp; | 124 | static struct ipl_parameter_block *reipl_block_fcp; |
120 | static struct ipl_parameter_block *reipl_block_ccw; | 125 | static struct ipl_parameter_block *reipl_block_ccw; |
121 | 126 | ||
127 | static char reipl_nss_name[NSS_NAME_SIZE + 1]; | ||
128 | |||
122 | static int dump_capabilities = IPL_TYPE_NONE; | 129 | static int dump_capabilities = IPL_TYPE_NONE; |
123 | static enum ipl_type dump_type = IPL_TYPE_NONE; | 130 | static enum ipl_type dump_type = IPL_TYPE_NONE; |
124 | static enum ipl_method dump_method = IPL_METHOD_NONE; | 131 | static enum ipl_method dump_method = IPL_METHOD_NONE; |
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ | |||
173 | sys_##_prefix##_##_name##_show, \ | 180 | sys_##_prefix##_##_name##_show, \ |
174 | sys_##_prefix##_##_name##_store); | 181 | sys_##_prefix##_##_name##_store); |
175 | 182 | ||
183 | #define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\ | ||
184 | static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \ | ||
185 | char *page) \ | ||
186 | { \ | ||
187 | return sprintf(page, _fmt_out, _value); \ | ||
188 | } \ | ||
189 | static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\ | ||
190 | const char *buf, size_t len) \ | ||
191 | { \ | ||
192 | if (sscanf(buf, _fmt_in, _value) != 1) \ | ||
193 | return -EINVAL; \ | ||
194 | return len; \ | ||
195 | } \ | ||
196 | static struct subsys_attribute sys_##_prefix##_##_name##_attr = \ | ||
197 | __ATTR(_name,(S_IRUGO | S_IWUSR), \ | ||
198 | sys_##_prefix##_##_name##_show, \ | ||
199 | sys_##_prefix##_##_name##_store); | ||
200 | |||
176 | static void make_attrs_ro(struct attribute **attrs) | 201 | static void make_attrs_ro(struct attribute **attrs) |
177 | { | 202 | { |
178 | while (*attrs) { | 203 | while (*attrs) { |
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void) | |||
189 | { | 214 | { |
190 | struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; | 215 | struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START; |
191 | 216 | ||
217 | if (ipl_flags & IPL_NSS_VALID) | ||
218 | return IPL_TYPE_NSS; | ||
192 | if (!(ipl_flags & IPL_DEVNO_VALID)) | 219 | if (!(ipl_flags & IPL_DEVNO_VALID)) |
193 | return IPL_TYPE_UNKNOWN; | 220 | return IPL_TYPE_UNKNOWN; |
194 | if (!(ipl_flags & IPL_PARMBLOCK_VALID)) | 221 | if (!(ipl_flags & IPL_PARMBLOCK_VALID)) |
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = { | |||
324 | .attrs = ipl_ccw_attrs, | 351 | .attrs = ipl_ccw_attrs, |
325 | }; | 352 | }; |
326 | 353 | ||
354 | /* NSS ipl device attributes */ | ||
355 | |||
356 | DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name); | ||
357 | |||
358 | static struct attribute *ipl_nss_attrs[] = { | ||
359 | &sys_ipl_type_attr.attr, | ||
360 | &sys_ipl_nss_name_attr.attr, | ||
361 | NULL, | ||
362 | }; | ||
363 | |||
364 | static struct attribute_group ipl_nss_attr_group = { | ||
365 | .attrs = ipl_nss_attrs, | ||
366 | }; | ||
367 | |||
327 | /* UNKNOWN ipl device attributes */ | 368 | /* UNKNOWN ipl device attributes */ |
328 | 369 | ||
329 | static struct attribute *ipl_unknown_attrs[] = { | 370 | static struct attribute *ipl_unknown_attrs[] = { |
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = { | |||
432 | .attrs = reipl_ccw_attrs, | 473 | .attrs = reipl_ccw_attrs, |
433 | }; | 474 | }; |
434 | 475 | ||
476 | |||
477 | /* NSS reipl device attributes */ | ||
478 | |||
479 | DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name); | ||
480 | |||
481 | static struct attribute *reipl_nss_attrs[] = { | ||
482 | &sys_reipl_nss_name_attr.attr, | ||
483 | NULL, | ||
484 | }; | ||
485 | |||
486 | static struct attribute_group reipl_nss_attr_group = { | ||
487 | .name = IPL_NSS_STR, | ||
488 | .attrs = reipl_nss_attrs, | ||
489 | }; | ||
490 | |||
435 | /* reipl type */ | 491 | /* reipl type */ |
436 | 492 | ||
437 | static int reipl_set_type(enum ipl_type type) | 493 | static int reipl_set_type(enum ipl_type type) |
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type) | |||
454 | else | 510 | else |
455 | reipl_method = IPL_METHOD_FCP_RO_DIAG; | 511 | reipl_method = IPL_METHOD_FCP_RO_DIAG; |
456 | break; | 512 | break; |
513 | case IPL_TYPE_NSS: | ||
514 | reipl_method = IPL_METHOD_NSS; | ||
515 | break; | ||
457 | default: | 516 | default: |
458 | reipl_method = IPL_METHOD_NONE; | 517 | reipl_method = IPL_METHOD_NONE; |
459 | } | 518 | } |
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf, | |||
475 | rc = reipl_set_type(IPL_TYPE_CCW); | 534 | rc = reipl_set_type(IPL_TYPE_CCW); |
476 | else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) | 535 | else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0) |
477 | rc = reipl_set_type(IPL_TYPE_FCP); | 536 | rc = reipl_set_type(IPL_TYPE_FCP); |
537 | else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0) | ||
538 | rc = reipl_set_type(IPL_TYPE_NSS); | ||
478 | return (rc != 0) ? rc : len; | 539 | return (rc != 0) ? rc : len; |
479 | } | 540 | } |
480 | 541 | ||
@@ -647,6 +708,10 @@ void do_reipl(void) | |||
647 | case IPL_METHOD_FCP_RO_VM: | 708 | case IPL_METHOD_FCP_RO_VM: |
648 | __cpcmd("IPL", NULL, 0, NULL); | 709 | __cpcmd("IPL", NULL, 0, NULL); |
649 | break; | 710 | break; |
711 | case IPL_METHOD_NSS: | ||
712 | sprintf(buf, "IPL %s", reipl_nss_name); | ||
713 | __cpcmd(buf, NULL, 0, NULL); | ||
714 | break; | ||
650 | case IPL_METHOD_NONE: | 715 | case IPL_METHOD_NONE: |
651 | default: | 716 | default: |
652 | if (MACHINE_IS_VM) | 717 | if (MACHINE_IS_VM) |
@@ -733,6 +798,10 @@ static int __init ipl_init(void) | |||
733 | case IPL_TYPE_FCP: | 798 | case IPL_TYPE_FCP: |
734 | rc = ipl_register_fcp_files(); | 799 | rc = ipl_register_fcp_files(); |
735 | break; | 800 | break; |
801 | case IPL_TYPE_NSS: | ||
802 | rc = sysfs_create_group(&ipl_subsys.kset.kobj, | ||
803 | &ipl_nss_attr_group); | ||
804 | break; | ||
736 | default: | 805 | default: |
737 | rc = sysfs_create_group(&ipl_subsys.kset.kobj, | 806 | rc = sysfs_create_group(&ipl_subsys.kset.kobj, |
738 | &ipl_unknown_attr_group); | 807 | &ipl_unknown_attr_group); |
@@ -755,6 +824,20 @@ static void __init reipl_probe(void) | |||
755 | free_page((unsigned long)buffer); | 824 | free_page((unsigned long)buffer); |
756 | } | 825 | } |
757 | 826 | ||
827 | static int __init reipl_nss_init(void) | ||
828 | { | ||
829 | int rc; | ||
830 | |||
831 | if (!MACHINE_IS_VM) | ||
832 | return 0; | ||
833 | rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group); | ||
834 | if (rc) | ||
835 | return rc; | ||
836 | strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); | ||
837 | reipl_capabilities |= IPL_TYPE_NSS; | ||
838 | return 0; | ||
839 | } | ||
840 | |||
758 | static int __init reipl_ccw_init(void) | 841 | static int __init reipl_ccw_init(void) |
759 | { | 842 | { |
760 | int rc; | 843 | int rc; |
@@ -837,6 +920,9 @@ static int __init reipl_init(void) | |||
837 | rc = reipl_fcp_init(); | 920 | rc = reipl_fcp_init(); |
838 | if (rc) | 921 | if (rc) |
839 | return rc; | 922 | return rc; |
923 | rc = reipl_nss_init(); | ||
924 | if (rc) | ||
925 | return rc; | ||
840 | rc = reipl_set_type(ipl_get_type()); | 926 | rc = reipl_set_type(ipl_get_type()); |
841 | if (rc) | 927 | if (rc) |
842 | return rc; | 928 | return rc; |
@@ -993,8 +1079,6 @@ static void do_reset_calls(void) | |||
993 | reset->fn(); | 1079 | reset->fn(); |
994 | } | 1080 | } |
995 | 1081 | ||
996 | extern void reset_mcck_handler(void); | ||
997 | extern void reset_pgm_handler(void); | ||
998 | extern __u32 dump_prefix_page; | 1082 | extern __u32 dump_prefix_page; |
999 | 1083 | ||
1000 | void s390_reset_system(void) | 1084 | void s390_reset_system(void) |
@@ -1016,14 +1100,14 @@ void s390_reset_system(void) | |||
1016 | __ctl_clear_bit(0,28); | 1100 | __ctl_clear_bit(0,28); |
1017 | 1101 | ||
1018 | /* Set new machine check handler */ | 1102 | /* Set new machine check handler */ |
1019 | S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; | 1103 | S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
1020 | S390_lowcore.mcck_new_psw.addr = | 1104 | S390_lowcore.mcck_new_psw.addr = |
1021 | PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; | 1105 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; |
1022 | 1106 | ||
1023 | /* Set new program check handler */ | 1107 | /* Set new program check handler */ |
1024 | S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; | 1108 | S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
1025 | S390_lowcore.program_new_psw.addr = | 1109 | S390_lowcore.program_new_psw.addr = |
1026 | PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler; | 1110 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; |
1027 | 1111 | ||
1028 | do_reset_calls(); | 1112 | do_reset_calls(); |
1029 | } | 1113 | } |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 1eef50918615..8f0cbca31203 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -1,9 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/irq.c | 2 | * arch/s390/kernel/irq.c |
3 | * | 3 | * |
4 | * S390 version | 4 | * Copyright IBM Corp. 2004,2007 |
5 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), |
6 | * Thomas Spatzier (tspat@de.ibm.com) | ||
7 | * | 7 | * |
8 | * This file contains interrupt related functions. | 8 | * This file contains interrupt related functions. |
9 | */ | 9 | */ |
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/profile.h> | ||
17 | 19 | ||
18 | /* | 20 | /* |
19 | * show_interrupts is needed by /proc/interrupts. | 21 | * show_interrupts is needed by /proc/interrupts. |
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void) | |||
93 | 95 | ||
94 | local_irq_restore(flags); | 96 | local_irq_restore(flags); |
95 | } | 97 | } |
96 | |||
97 | EXPORT_SYMBOL(do_softirq); | 98 | EXPORT_SYMBOL(do_softirq); |
99 | |||
100 | void init_irq_proc(void) | ||
101 | { | ||
102 | struct proc_dir_entry *root_irq_dir; | ||
103 | |||
104 | root_irq_dir = proc_mkdir("irq", NULL); | ||
105 | create_prof_cpu_mask(root_irq_dir); | ||
106 | } | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 576368c4f605..a466bab6677e 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) | |||
155 | static int __kprobes swap_instruction(void *aref) | 155 | static int __kprobes swap_instruction(void *aref) |
156 | { | 156 | { |
157 | struct ins_replace_args *args = aref; | 157 | struct ins_replace_args *args = aref; |
158 | u32 *addr; | ||
159 | u32 instr; | ||
158 | int err = -EFAULT; | 160 | int err = -EFAULT; |
159 | 161 | ||
162 | /* | ||
163 | * Text segment is read-only, hence we use stura to bypass dynamic | ||
164 | * address translation to exchange the instruction. Since stura | ||
165 | * always operates on four bytes, but we only want to exchange two | ||
166 | * bytes do some calculations to get things right. In addition we | ||
167 | * shall not cross any page boundaries (vmalloc area!) when writing | ||
168 | * the new instruction. | ||
169 | */ | ||
170 | addr = (u32 *)ALIGN((unsigned long)args->ptr, 4); | ||
171 | if ((unsigned long)args->ptr & 2) | ||
172 | instr = ((*addr) & 0xffff0000) | args->new; | ||
173 | else | ||
174 | instr = ((*addr) & 0x0000ffff) | args->new << 16; | ||
175 | |||
160 | asm volatile( | 176 | asm volatile( |
161 | "0: mvc 0(2,%2),0(%3)\n" | 177 | " lra %1,0(%1)\n" |
162 | "1: la %0,0\n" | 178 | "0: stura %2,%1\n" |
179 | "1: la %0,0\n" | ||
163 | "2:\n" | 180 | "2:\n" |
164 | EX_TABLE(0b,2b) | 181 | EX_TABLE(0b,2b) |
165 | : "+d" (err), "=m" (*args->ptr) | 182 | : "+d" (err) |
166 | : "a" (args->ptr), "a" (&args->new), "m" (args->new)); | 183 | : "a" (addr), "d" (instr) |
184 | : "memory", "cc"); | ||
185 | |||
167 | return err; | 186 | return err; |
168 | } | 187 | } |
169 | 188 | ||
@@ -356,7 +375,7 @@ no_kprobe: | |||
356 | * - When the probed function returns, this probe | 375 | * - When the probed function returns, this probe |
357 | * causes the handlers to fire | 376 | * causes the handlers to fire |
358 | */ | 377 | */ |
359 | void __kprobes kretprobe_trampoline_holder(void) | 378 | void kretprobe_trampoline_holder(void) |
360 | { | 379 | { |
361 | asm volatile(".global kretprobe_trampoline\n" | 380 | asm volatile(".global kretprobe_trampoline\n" |
362 | "kretprobe_trampoline: bcr 0,0\n"); | 381 | "kretprobe_trampoline: bcr 0,0\n"); |
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void) | |||
365 | /* | 384 | /* |
366 | * Called when the probe at kretprobe trampoline is hit | 385 | * Called when the probe at kretprobe trampoline is hit |
367 | */ | 386 | */ |
368 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 387 | static int __kprobes trampoline_probe_handler(struct kprobe *p, |
388 | struct pt_regs *regs) | ||
369 | { | 389 | { |
370 | struct kretprobe_instance *ri = NULL; | 390 | struct kretprobe_instance *ri = NULL; |
371 | struct hlist_head *head, empty_rp; | 391 | struct hlist_head *head, empty_rp; |
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index f6d9bcc0f75b..52f57af252b4 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/kexec.h> | 12 | #include <linux/kexec.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/reboot.h> | ||
14 | #include <asm/cio.h> | 15 | #include <asm/cio.h> |
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index d989ed45a7aa..39d1dd752529 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/fs.h> | 30 | #include <linux/fs.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/moduleloader.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #define DEBUGP printk | 36 | #define DEBUGP printk |
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region) | |||
58 | table entries. */ | 59 | table entries. */ |
59 | } | 60 | } |
60 | 61 | ||
61 | static inline void | 62 | static void |
62 | check_rela(Elf_Rela *rela, struct module *me) | 63 | check_rela(Elf_Rela *rela, struct module *me) |
63 | { | 64 | { |
64 | struct mod_arch_syminfo *info; | 65 | struct mod_arch_syminfo *info; |
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, | |||
181 | return -ENOEXEC; | 182 | return -ENOEXEC; |
182 | } | 183 | } |
183 | 184 | ||
184 | static inline int | 185 | static int |
185 | apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | 186 | apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, |
186 | struct module *me) | 187 | struct module *me) |
187 | { | 188 | { |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 6603fbb41d07..5acfac654f9d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -144,7 +144,7 @@ static void default_idle(void) | |||
144 | 144 | ||
145 | trace_hardirqs_on(); | 145 | trace_hardirqs_on(); |
146 | /* Wait for external, I/O or machine check interrupt. */ | 146 | /* Wait for external, I/O or machine check interrupt. */ |
147 | __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT | | 147 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
148 | PSW_MASK_IO | PSW_MASK_EXT); | 148 | PSW_MASK_IO | PSW_MASK_EXT); |
149 | } | 149 | } |
150 | 150 | ||
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |||
190 | struct pt_regs regs; | 190 | struct pt_regs regs; |
191 | 191 | ||
192 | memset(®s, 0, sizeof(regs)); | 192 | memset(®s, 0, sizeof(regs)); |
193 | regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | 193 | regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; |
194 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; | 194 | regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE; |
195 | regs.gprs[9] = (unsigned long) fn; | 195 | regs.gprs[9] = (unsigned long) fn; |
196 | regs.gprs[10] = (unsigned long) arg; | 196 | regs.gprs[10] = (unsigned long) arg; |
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c deleted file mode 100644 index b81aa1f569ca..000000000000 --- a/arch/s390/kernel/profile.c +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/profile.c | ||
3 | * | ||
4 | * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
5 | * Author(s): Thomas Spatzier (tspat@de.ibm.com) | ||
6 | * | ||
7 | */ | ||
8 | #include <linux/proc_fs.h> | ||
9 | #include <linux/profile.h> | ||
10 | |||
11 | static struct proc_dir_entry * root_irq_dir; | ||
12 | |||
13 | void init_irq_proc(void) | ||
14 | { | ||
15 | /* create /proc/irq */ | ||
16 | root_irq_dir = proc_mkdir("irq", NULL); | ||
17 | |||
18 | /* create /proc/irq/prof_cpu_mask */ | ||
19 | create_prof_cpu_mask(root_irq_dir); | ||
20 | } | ||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 8f36504075ed..2a8f0872ea8b 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task) | |||
86 | per_info->control_regs.bits.storage_alt_space_ctl = 0; | 86 | per_info->control_regs.bits.storage_alt_space_ctl = 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | void | 89 | static void set_single_step(struct task_struct *task) |
90 | set_single_step(struct task_struct *task) | ||
91 | { | 90 | { |
92 | task->thread.per_info.single_step = 1; | 91 | task->thread.per_info.single_step = 1; |
93 | FixPerRegisters(task); | 92 | FixPerRegisters(task); |
94 | } | 93 | } |
95 | 94 | ||
96 | void | 95 | static void clear_single_step(struct task_struct *task) |
97 | clear_single_step(struct task_struct *task) | ||
98 | { | 96 | { |
99 | task->thread.per_info.single_step = 0; | 97 | task->thread.per_info.single_step = 0; |
100 | FixPerRegisters(task); | 98 | FixPerRegisters(task); |
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
232 | */ | 230 | */ |
233 | if (addr == (addr_t) &dummy->regs.psw.mask && | 231 | if (addr == (addr_t) &dummy->regs.psw.mask && |
234 | #ifdef CONFIG_COMPAT | 232 | #ifdef CONFIG_COMPAT |
235 | data != PSW_MASK_MERGE(PSW_USER32_BITS, data) && | 233 | data != PSW_MASK_MERGE(psw_user32_bits, data) && |
236 | #endif | 234 | #endif |
237 | data != PSW_MASK_MERGE(PSW_USER_BITS, data)) | 235 | data != PSW_MASK_MERGE(psw_user_bits, data)) |
238 | /* Invalid psw mask. */ | 236 | /* Invalid psw mask. */ |
239 | return -EINVAL; | 237 | return -EINVAL; |
240 | #ifndef CONFIG_64BIT | 238 | #ifndef CONFIG_64BIT |
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | |||
309 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 307 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); |
310 | if (copied != sizeof(tmp)) | 308 | if (copied != sizeof(tmp)) |
311 | return -EIO; | 309 | return -EIO; |
312 | return put_user(tmp, (unsigned long __user *) data); | 310 | return put_user(tmp, (unsigned long __force __user *) data); |
313 | 311 | ||
314 | case PTRACE_PEEKUSR: | 312 | case PTRACE_PEEKUSR: |
315 | /* read the word at location addr in the USER area. */ | 313 | /* read the word at location addr in the USER area. */ |
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | |||
331 | 329 | ||
332 | case PTRACE_PEEKUSR_AREA: | 330 | case PTRACE_PEEKUSR_AREA: |
333 | case PTRACE_POKEUSR_AREA: | 331 | case PTRACE_POKEUSR_AREA: |
334 | if (copy_from_user(&parea, (void __user *) addr, | 332 | if (copy_from_user(&parea, (void __force __user *) addr, |
335 | sizeof(parea))) | 333 | sizeof(parea))) |
336 | return -EFAULT; | 334 | return -EFAULT; |
337 | addr = parea.kernel_addr; | 335 | addr = parea.kernel_addr; |
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data) | |||
341 | if (request == PTRACE_PEEKUSR_AREA) | 339 | if (request == PTRACE_PEEKUSR_AREA) |
342 | ret = peek_user(child, addr, data); | 340 | ret = peek_user(child, addr, data); |
343 | else { | 341 | else { |
344 | addr_t tmp; | 342 | addr_t utmp; |
345 | if (get_user (tmp, (addr_t __user *) data)) | 343 | if (get_user(utmp, |
344 | (addr_t __force __user *) data)) | ||
346 | return -EFAULT; | 345 | return -EFAULT; |
347 | ret = poke_user(child, addr, tmp); | 346 | ret = poke_user(child, addr, utmp); |
348 | } | 347 | } |
349 | if (ret) | 348 | if (ret) |
350 | return ret; | 349 | return ret; |
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
394 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 393 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
395 | /* Fake a 31 bit psw mask. */ | 394 | /* Fake a 31 bit psw mask. */ |
396 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); | 395 | tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32); |
397 | tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp); | 396 | tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp); |
398 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 397 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
399 | /* Fake a 31 bit psw address. */ | 398 | /* Fake a 31 bit psw address. */ |
400 | tmp = (__u32) task_pt_regs(child)->psw.addr | | 399 | tmp = (__u32) task_pt_regs(child)->psw.addr | |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) | |||
469 | */ | 468 | */ |
470 | if (addr == (addr_t) &dummy32->regs.psw.mask) { | 469 | if (addr == (addr_t) &dummy32->regs.psw.mask) { |
471 | /* Build a 64 bit psw mask from 31 bit mask. */ | 470 | /* Build a 64 bit psw mask from 31 bit mask. */ |
472 | if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp)) | 471 | if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp)) |
473 | /* Invalid psw mask. */ | 472 | /* Invalid psw mask. */ |
474 | return -EINVAL; | 473 | return -EINVAL; |
475 | task_pt_regs(child)->psw.mask = | 474 | task_pt_regs(child)->psw.mask = |
476 | PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32); | 475 | PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32); |
477 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { | 476 | } else if (addr == (addr_t) &dummy32->regs.psw.addr) { |
478 | /* Build a 64 bit psw address from 31 bit address. */ | 477 | /* Build a 64 bit psw address from 31 bit address. */ |
479 | task_pt_regs(child)->psw.addr = | 478 | task_pt_regs(child)->psw.addr = |
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
550 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); | 549 | copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); |
551 | if (copied != sizeof(tmp)) | 550 | if (copied != sizeof(tmp)) |
552 | return -EIO; | 551 | return -EIO; |
553 | return put_user(tmp, (unsigned int __user *) data); | 552 | return put_user(tmp, (unsigned int __force __user *) data); |
554 | 553 | ||
555 | case PTRACE_PEEKUSR: | 554 | case PTRACE_PEEKUSR: |
556 | /* read the word at location addr in the USER area. */ | 555 | /* read the word at location addr in the USER area. */ |
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
571 | 570 | ||
572 | case PTRACE_PEEKUSR_AREA: | 571 | case PTRACE_PEEKUSR_AREA: |
573 | case PTRACE_POKEUSR_AREA: | 572 | case PTRACE_POKEUSR_AREA: |
574 | if (copy_from_user(&parea, (void __user *) addr, | 573 | if (copy_from_user(&parea, (void __force __user *) addr, |
575 | sizeof(parea))) | 574 | sizeof(parea))) |
576 | return -EFAULT; | 575 | return -EFAULT; |
577 | addr = parea.kernel_addr; | 576 | addr = parea.kernel_addr; |
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
581 | if (request == PTRACE_PEEKUSR_AREA) | 580 | if (request == PTRACE_PEEKUSR_AREA) |
582 | ret = peek_user_emu31(child, addr, data); | 581 | ret = peek_user_emu31(child, addr, data); |
583 | else { | 582 | else { |
584 | __u32 tmp; | 583 | __u32 utmp; |
585 | if (get_user (tmp, (__u32 __user *) data)) | 584 | if (get_user(utmp, |
585 | (__u32 __force __user *) data)) | ||
586 | return -EFAULT; | 586 | return -EFAULT; |
587 | ret = poke_user_emu31(child, addr, tmp); | 587 | ret = poke_user_emu31(child, addr, utmp); |
588 | } | 588 | } |
589 | if (ret) | 589 | if (ret) |
590 | return ret; | 590 | return ret; |
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data) | |||
595 | return 0; | 595 | return 0; |
596 | case PTRACE_GETEVENTMSG: | 596 | case PTRACE_GETEVENTMSG: |
597 | return put_user((__u32) child->ptrace_message, | 597 | return put_user((__u32) child->ptrace_message, |
598 | (unsigned int __user *) data); | 598 | (unsigned int __force __user *) data); |
599 | case PTRACE_GETSIGINFO: | 599 | case PTRACE_GETSIGINFO: |
600 | if (child->last_siginfo == NULL) | 600 | if (child->last_siginfo == NULL) |
601 | return -EINVAL; | 601 | return -EINVAL; |
602 | return copy_siginfo_to_user32((compat_siginfo_t __user *) data, | 602 | return copy_siginfo_to_user32((compat_siginfo_t |
603 | __force __user *) data, | ||
603 | child->last_siginfo); | 604 | child->last_siginfo); |
604 | case PTRACE_SETSIGINFO: | 605 | case PTRACE_SETSIGINFO: |
605 | if (child->last_siginfo == NULL) | 606 | if (child->last_siginfo == NULL) |
606 | return -EINVAL; | 607 | return -EINVAL; |
607 | return copy_siginfo_from_user32(child->last_siginfo, | 608 | return copy_siginfo_from_user32(child->last_siginfo, |
608 | (compat_siginfo_t __user *) data); | 609 | (compat_siginfo_t |
610 | __force __user *) data); | ||
609 | } | 611 | } |
610 | return ptrace_request(child, request, addr, data); | 612 | return ptrace_request(child, request, addr, data); |
611 | } | 613 | } |
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S deleted file mode 100644 index 8a87355161fa..000000000000 --- a/arch/s390/kernel/reset.S +++ /dev/null | |||
@@ -1,90 +0,0 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/reset.S | ||
3 | * | ||
4 | * Copyright (C) IBM Corp. 2006 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | * Michael Holzheu <holzheu@de.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <asm/ptrace.h> | ||
10 | #include <asm/lowcore.h> | ||
11 | |||
12 | #ifdef CONFIG_64BIT | ||
13 | |||
14 | .globl reset_mcck_handler | ||
15 | reset_mcck_handler: | ||
16 | basr %r13,0 | ||
17 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
18 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
19 | lg %r1,s390_reset_mcck_handler-0b(%r13) | ||
20 | ltgr %r1,%r1 | ||
21 | jz 1f | ||
22 | basr %r14,%r1 | ||
23 | 1: la %r1,4095 | ||
24 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | ||
25 | lpswe __LC_MCK_OLD_PSW | ||
26 | |||
27 | .globl s390_reset_mcck_handler | ||
28 | s390_reset_mcck_handler: | ||
29 | .quad 0 | ||
30 | |||
31 | .globl reset_pgm_handler | ||
32 | reset_pgm_handler: | ||
33 | stmg %r0,%r15,__LC_SAVE_AREA | ||
34 | basr %r13,0 | ||
35 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
36 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
37 | lg %r1,s390_reset_pgm_handler-0b(%r13) | ||
38 | ltgr %r1,%r1 | ||
39 | jz 1f | ||
40 | basr %r14,%r1 | ||
41 | lmg %r0,%r15,__LC_SAVE_AREA | ||
42 | lpswe __LC_PGM_OLD_PSW | ||
43 | 1: lpswe disabled_wait_psw-0b(%r13) | ||
44 | .globl s390_reset_pgm_handler | ||
45 | s390_reset_pgm_handler: | ||
46 | .quad 0 | ||
47 | .align 8 | ||
48 | disabled_wait_psw: | ||
49 | .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler | ||
50 | |||
51 | #else /* CONFIG_64BIT */ | ||
52 | |||
53 | .globl reset_mcck_handler | ||
54 | reset_mcck_handler: | ||
55 | basr %r13,0 | ||
56 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
57 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
58 | l %r1,s390_reset_mcck_handler-0b(%r13) | ||
59 | ltr %r1,%r1 | ||
60 | jz 1f | ||
61 | basr %r14,%r1 | ||
62 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
63 | lpsw __LC_MCK_OLD_PSW | ||
64 | |||
65 | .globl s390_reset_mcck_handler | ||
66 | s390_reset_mcck_handler: | ||
67 | .long 0 | ||
68 | |||
69 | .globl reset_pgm_handler | ||
70 | reset_pgm_handler: | ||
71 | stm %r0,%r15,__LC_SAVE_AREA | ||
72 | basr %r13,0 | ||
73 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
74 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
75 | l %r1,s390_reset_pgm_handler-0b(%r13) | ||
76 | ltr %r1,%r1 | ||
77 | jz 1f | ||
78 | basr %r14,%r1 | ||
79 | lm %r0,%r15,__LC_SAVE_AREA | ||
80 | lpsw __LC_PGM_OLD_PSW | ||
81 | |||
82 | 1: lpsw disabled_wait_psw-0b(%r13) | ||
83 | .globl s390_reset_pgm_handler | ||
84 | s390_reset_pgm_handler: | ||
85 | .long 0 | ||
86 | disabled_wait_psw: | ||
87 | .align 8 | ||
88 | .long 0x000a0000,0x00000000 + reset_pgm_handler | ||
89 | |||
90 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index bc5beaa8f98e..acf93dba7727 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code) | |||
125 | * Make sure that the i/o interrupt did not "overtake" | 125 | * Make sure that the i/o interrupt did not "overtake" |
126 | * the last HZ timer interrupt. | 126 | * the last HZ timer interrupt. |
127 | */ | 127 | */ |
128 | account_ticks(); | 128 | account_ticks(S390_lowcore.int_clock); |
129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | 129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; |
130 | index = ext_hash(code); | 130 | index = ext_hash(code); |
131 | for (p = ext_int_hash[index]; p; p = p->next) { | 131 | for (p = ext_int_hash[index]; p; p = p->next) { |
132 | if (likely(p->code == code)) { | 132 | if (likely(p->code == code)) |
133 | if (likely(p->handler)) | 133 | p->handler(code); |
134 | p->handler(code); | ||
135 | } | ||
136 | } | 134 | } |
137 | irq_exit(); | 135 | irq_exit(); |
138 | set_irq_regs(old_regs); | 136 | set_irq_regs(old_regs); |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 5d8ee3baac14..03739813d3bf 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <linux/device.h> | 38 | #include <linux/device.h> |
39 | #include <linux/notifier.h> | 39 | #include <linux/notifier.h> |
40 | #include <linux/pfn.h> | 40 | #include <linux/pfn.h> |
41 | #include <linux/ctype.h> | ||
42 | #include <linux/reboot.h> | ||
41 | 43 | ||
42 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
43 | #include <asm/system.h> | 45 | #include <asm/system.h> |
@@ -49,6 +51,14 @@ | |||
49 | #include <asm/page.h> | 51 | #include <asm/page.h> |
50 | #include <asm/ptrace.h> | 52 | #include <asm/ptrace.h> |
51 | #include <asm/sections.h> | 53 | #include <asm/sections.h> |
54 | #include <asm/ebcdic.h> | ||
55 | #include <asm/compat.h> | ||
56 | |||
57 | long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | | ||
58 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY); | ||
59 | long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
60 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
61 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY); | ||
52 | 62 | ||
53 | /* | 63 | /* |
54 | * User copy operations. | 64 | * User copy operations. |
@@ -117,9 +127,9 @@ void __devinit cpu_init (void) | |||
117 | */ | 127 | */ |
118 | char vmhalt_cmd[128] = ""; | 128 | char vmhalt_cmd[128] = ""; |
119 | char vmpoff_cmd[128] = ""; | 129 | char vmpoff_cmd[128] = ""; |
120 | char vmpanic_cmd[128] = ""; | 130 | static char vmpanic_cmd[128] = ""; |
121 | 131 | ||
122 | static inline void strncpy_skip_quote(char *dst, char *src, int n) | 132 | static void strncpy_skip_quote(char *dst, char *src, int n) |
123 | { | 133 | { |
124 | int sx, dx; | 134 | int sx, dx; |
125 | 135 | ||
@@ -275,10 +285,6 @@ static void __init conmode_default(void) | |||
275 | } | 285 | } |
276 | 286 | ||
277 | #ifdef CONFIG_SMP | 287 | #ifdef CONFIG_SMP |
278 | extern void machine_restart_smp(char *); | ||
279 | extern void machine_halt_smp(void); | ||
280 | extern void machine_power_off_smp(void); | ||
281 | |||
282 | void (*_machine_restart)(char *command) = machine_restart_smp; | 288 | void (*_machine_restart)(char *command) = machine_restart_smp; |
283 | void (*_machine_halt)(void) = machine_halt_smp; | 289 | void (*_machine_halt)(void) = machine_halt_smp; |
284 | void (*_machine_power_off)(void) = machine_power_off_smp; | 290 | void (*_machine_power_off)(void) = machine_power_off_smp; |
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p) | |||
386 | } | 392 | } |
387 | early_param("ipldelay", early_parse_ipldelay); | 393 | early_param("ipldelay", early_parse_ipldelay); |
388 | 394 | ||
395 | #ifdef CONFIG_S390_SWITCH_AMODE | ||
396 | unsigned int switch_amode = 0; | ||
397 | EXPORT_SYMBOL_GPL(switch_amode); | ||
398 | |||
399 | static void set_amode_and_uaccess(unsigned long user_amode, | ||
400 | unsigned long user32_amode) | ||
401 | { | ||
402 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | ||
403 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
404 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
405 | #ifdef CONFIG_COMPAT | ||
406 | psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode | | ||
407 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | ||
408 | PSW_MASK_PSTATE | PSW_DEFAULT_KEY; | ||
409 | psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode | | ||
410 | PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | | ||
411 | PSW32_MASK_PSTATE; | ||
412 | #endif | ||
413 | psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | | ||
414 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | ||
415 | |||
416 | if (MACHINE_HAS_MVCOS) { | ||
417 | printk("mvcos available.\n"); | ||
418 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | ||
419 | } else { | ||
420 | printk("mvcos not available.\n"); | ||
421 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); | ||
422 | } | ||
423 | } | ||
424 | |||
425 | /* | ||
426 | * Switch kernel/user addressing modes? | ||
427 | */ | ||
428 | static int __init early_parse_switch_amode(char *p) | ||
429 | { | ||
430 | switch_amode = 1; | ||
431 | return 0; | ||
432 | } | ||
433 | early_param("switch_amode", early_parse_switch_amode); | ||
434 | |||
435 | #else /* CONFIG_S390_SWITCH_AMODE */ | ||
436 | static inline void set_amode_and_uaccess(unsigned long user_amode, | ||
437 | unsigned long user32_amode) | ||
438 | { | ||
439 | } | ||
440 | #endif /* CONFIG_S390_SWITCH_AMODE */ | ||
441 | |||
442 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
443 | unsigned int s390_noexec = 0; | ||
444 | EXPORT_SYMBOL_GPL(s390_noexec); | ||
445 | |||
446 | /* | ||
447 | * Enable execute protection? | ||
448 | */ | ||
449 | static int __init early_parse_noexec(char *p) | ||
450 | { | ||
451 | if (!strncmp(p, "off", 3)) | ||
452 | return 0; | ||
453 | switch_amode = 1; | ||
454 | s390_noexec = 1; | ||
455 | return 0; | ||
456 | } | ||
457 | early_param("noexec", early_parse_noexec); | ||
458 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
459 | |||
460 | static void setup_addressing_mode(void) | ||
461 | { | ||
462 | if (s390_noexec) { | ||
463 | printk("S390 execute protection active, "); | ||
464 | set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); | ||
465 | return; | ||
466 | } | ||
467 | if (switch_amode) { | ||
468 | printk("S390 address spaces switched, "); | ||
469 | set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); | ||
470 | } | ||
471 | } | ||
472 | |||
389 | static void __init | 473 | static void __init |
390 | setup_lowcore(void) | 474 | setup_lowcore(void) |
391 | { | 475 | { |
@@ -402,19 +486,21 @@ setup_lowcore(void) | |||
402 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; | 486 | lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; |
403 | lc->restart_psw.addr = | 487 | lc->restart_psw.addr = |
404 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | 488 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; |
405 | lc->external_new_psw.mask = PSW_KERNEL_BITS; | 489 | if (switch_amode) |
490 | lc->restart_psw.mask |= PSW_ASC_HOME; | ||
491 | lc->external_new_psw.mask = psw_kernel_bits; | ||
406 | lc->external_new_psw.addr = | 492 | lc->external_new_psw.addr = |
407 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | 493 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; |
408 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; | 494 | lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; |
409 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 495 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; |
410 | lc->program_new_psw.mask = PSW_KERNEL_BITS; | 496 | lc->program_new_psw.mask = psw_kernel_bits; |
411 | lc->program_new_psw.addr = | 497 | lc->program_new_psw.addr = |
412 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; | 498 | PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; |
413 | lc->mcck_new_psw.mask = | 499 | lc->mcck_new_psw.mask = |
414 | PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; | 500 | psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; |
415 | lc->mcck_new_psw.addr = | 501 | lc->mcck_new_psw.addr = |
416 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | 502 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; |
417 | lc->io_new_psw.mask = PSW_KERNEL_BITS; | 503 | lc->io_new_psw.mask = psw_kernel_bits; |
418 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 504 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
419 | lc->ipl_device = S390_lowcore.ipl_device; | 505 | lc->ipl_device = S390_lowcore.ipl_device; |
420 | lc->jiffy_timer = -1LL; | 506 | lc->jiffy_timer = -1LL; |
@@ -439,7 +525,7 @@ setup_lowcore(void) | |||
439 | static void __init | 525 | static void __init |
440 | setup_resources(void) | 526 | setup_resources(void) |
441 | { | 527 | { |
442 | struct resource *res; | 528 | struct resource *res, *sub_res; |
443 | int i; | 529 | int i; |
444 | 530 | ||
445 | code_resource.start = (unsigned long) &_text; | 531 | code_resource.start = (unsigned long) &_text; |
@@ -464,8 +550,38 @@ setup_resources(void) | |||
464 | res->start = memory_chunk[i].addr; | 550 | res->start = memory_chunk[i].addr; |
465 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; | 551 | res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; |
466 | request_resource(&iomem_resource, res); | 552 | request_resource(&iomem_resource, res); |
467 | request_resource(res, &code_resource); | 553 | |
468 | request_resource(res, &data_resource); | 554 | if (code_resource.start >= res->start && |
555 | code_resource.start <= res->end && | ||
556 | code_resource.end > res->end) { | ||
557 | sub_res = alloc_bootmem_low(sizeof(struct resource)); | ||
558 | memcpy(sub_res, &code_resource, | ||
559 | sizeof(struct resource)); | ||
560 | sub_res->end = res->end; | ||
561 | code_resource.start = res->end + 1; | ||
562 | request_resource(res, sub_res); | ||
563 | } | ||
564 | |||
565 | if (code_resource.start >= res->start && | ||
566 | code_resource.start <= res->end && | ||
567 | code_resource.end <= res->end) | ||
568 | request_resource(res, &code_resource); | ||
569 | |||
570 | if (data_resource.start >= res->start && | ||
571 | data_resource.start <= res->end && | ||
572 | data_resource.end > res->end) { | ||
573 | sub_res = alloc_bootmem_low(sizeof(struct resource)); | ||
574 | memcpy(sub_res, &data_resource, | ||
575 | sizeof(struct resource)); | ||
576 | sub_res->end = res->end; | ||
577 | data_resource.start = res->end + 1; | ||
578 | request_resource(res, sub_res); | ||
579 | } | ||
580 | |||
581 | if (data_resource.start >= res->start && | ||
582 | data_resource.start <= res->end && | ||
583 | data_resource.end <= res->end) | ||
584 | request_resource(res, &data_resource); | ||
469 | } | 585 | } |
470 | } | 586 | } |
471 | 587 | ||
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void) | |||
495 | } | 611 | } |
496 | if (!memory_end) | 612 | if (!memory_end) |
497 | memory_end = memory_size; | 613 | memory_end = memory_size; |
498 | if (real_size > memory_end) | ||
499 | printk("More memory detected than supported. Unused: %luk\n", | ||
500 | (real_size - memory_end) >> 10); | ||
501 | } | 614 | } |
502 | 615 | ||
503 | static void __init | 616 | static void __init |
504 | setup_memory(void) | 617 | setup_memory(void) |
505 | { | 618 | { |
506 | unsigned long bootmap_size; | 619 | unsigned long bootmap_size; |
507 | unsigned long start_pfn, end_pfn, init_pfn; | 620 | unsigned long start_pfn, end_pfn; |
508 | int i; | 621 | int i; |
509 | 622 | ||
510 | /* | 623 | /* |
@@ -514,10 +627,6 @@ setup_memory(void) | |||
514 | start_pfn = PFN_UP(__pa(&_end)); | 627 | start_pfn = PFN_UP(__pa(&_end)); |
515 | end_pfn = max_pfn = PFN_DOWN(memory_end); | 628 | end_pfn = max_pfn = PFN_DOWN(memory_end); |
516 | 629 | ||
517 | /* Initialize storage key for kernel pages */ | ||
518 | for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) | ||
519 | page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); | ||
520 | |||
521 | #ifdef CONFIG_BLK_DEV_INITRD | 630 | #ifdef CONFIG_BLK_DEV_INITRD |
522 | /* | 631 | /* |
523 | * Move the initrd in case the bitmap of the bootmem allocater | 632 | * Move the initrd in case the bitmap of the bootmem allocater |
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p) | |||
651 | parse_early_param(); | 760 | parse_early_param(); |
652 | 761 | ||
653 | setup_memory_end(); | 762 | setup_memory_end(); |
763 | setup_addressing_mode(); | ||
654 | setup_memory(); | 764 | setup_memory(); |
655 | setup_resources(); | 765 | setup_resources(); |
656 | setup_lowcore(); | 766 | setup_lowcore(); |
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
694 | struct cpuinfo_S390 *cpuinfo; | 804 | struct cpuinfo_S390 *cpuinfo; |
695 | unsigned long n = (unsigned long) v - 1; | 805 | unsigned long n = (unsigned long) v - 1; |
696 | 806 | ||
807 | s390_adjust_jiffies(); | ||
697 | preempt_disable(); | 808 | preempt_disable(); |
698 | if (!n) { | 809 | if (!n) { |
699 | seq_printf(m, "vendor_id : IBM/S390\n" | 810 | seq_printf(m, "vendor_id : IBM/S390\n" |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 4c8a7954ef48..554f9cf7499c 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) | |||
119 | 119 | ||
120 | /* Copy a 'clean' PSW mask to the user to avoid leaking | 120 | /* Copy a 'clean' PSW mask to the user to avoid leaking |
121 | information about whether PER is currently on. */ | 121 | information about whether PER is currently on. */ |
122 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask); | 122 | user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask); |
123 | user_sregs.regs.psw.addr = regs->psw.addr; | 123 | user_sregs.regs.psw.addr = regs->psw.addr; |
124 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); | 124 | memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); |
125 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, | 125 | memcpy(&user_sregs.regs.acrs, current->thread.acrs, |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c0cd255fddbd..65b52320d145 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -22,23 +22,23 @@ | |||
22 | 22 | ||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | |||
26 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
27 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
28 | #include <linux/kernel_stat.h> | 27 | #include <linux/kernel_stat.h> |
29 | #include <linux/smp_lock.h> | 28 | #include <linux/smp_lock.h> |
30 | |||
31 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
32 | #include <linux/cache.h> | 30 | #include <linux/cache.h> |
33 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
34 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
35 | 33 | #include <linux/timex.h> | |
34 | #include <asm/setup.h> | ||
36 | #include <asm/sigp.h> | 35 | #include <asm/sigp.h> |
37 | #include <asm/pgalloc.h> | 36 | #include <asm/pgalloc.h> |
38 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
39 | #include <asm/s390_ext.h> | 38 | #include <asm/s390_ext.h> |
40 | #include <asm/cpcmd.h> | 39 | #include <asm/cpcmd.h> |
41 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | #include <asm/timer.h> | ||
42 | 42 | ||
43 | extern volatile int __cpu_logical_map[]; | 43 | extern volatile int __cpu_logical_map[]; |
44 | 44 | ||
@@ -53,12 +53,6 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE; | |||
53 | 53 | ||
54 | static struct task_struct *current_set[NR_CPUS]; | 54 | static struct task_struct *current_set[NR_CPUS]; |
55 | 55 | ||
56 | /* | ||
57 | * Reboot, halt and power_off routines for SMP. | ||
58 | */ | ||
59 | extern char vmhalt_cmd[]; | ||
60 | extern char vmpoff_cmd[]; | ||
61 | |||
62 | static void smp_ext_bitcall(int, ec_bit_sig); | 56 | static void smp_ext_bitcall(int, ec_bit_sig); |
63 | static void smp_ext_bitcall_others(ec_bit_sig); | 57 | static void smp_ext_bitcall_others(ec_bit_sig); |
64 | 58 | ||
@@ -200,7 +194,7 @@ int smp_call_function_on(void (*func) (void *info), void *info, | |||
200 | } | 194 | } |
201 | EXPORT_SYMBOL(smp_call_function_on); | 195 | EXPORT_SYMBOL(smp_call_function_on); |
202 | 196 | ||
203 | static inline void do_send_stop(void) | 197 | static void do_send_stop(void) |
204 | { | 198 | { |
205 | int cpu, rc; | 199 | int cpu, rc; |
206 | 200 | ||
@@ -214,7 +208,7 @@ static inline void do_send_stop(void) | |||
214 | } | 208 | } |
215 | } | 209 | } |
216 | 210 | ||
217 | static inline void do_store_status(void) | 211 | static void do_store_status(void) |
218 | { | 212 | { |
219 | int cpu, rc; | 213 | int cpu, rc; |
220 | 214 | ||
@@ -230,7 +224,7 @@ static inline void do_store_status(void) | |||
230 | } | 224 | } |
231 | } | 225 | } |
232 | 226 | ||
233 | static inline void do_wait_for_stop(void) | 227 | static void do_wait_for_stop(void) |
234 | { | 228 | { |
235 | int cpu; | 229 | int cpu; |
236 | 230 | ||
@@ -250,7 +244,7 @@ static inline void do_wait_for_stop(void) | |||
250 | void smp_send_stop(void) | 244 | void smp_send_stop(void) |
251 | { | 245 | { |
252 | /* Disable all interrupts/machine checks */ | 246 | /* Disable all interrupts/machine checks */ |
253 | __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); | 247 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
254 | 248 | ||
255 | /* write magic number to zero page (absolute 0) */ | 249 | /* write magic number to zero page (absolute 0) */ |
256 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | 250 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; |
@@ -298,7 +292,7 @@ void machine_power_off_smp(void) | |||
298 | * cpus are handled. | 292 | * cpus are handled. |
299 | */ | 293 | */ |
300 | 294 | ||
301 | void do_ext_call_interrupt(__u16 code) | 295 | static void do_ext_call_interrupt(__u16 code) |
302 | { | 296 | { |
303 | unsigned long bits; | 297 | unsigned long bits; |
304 | 298 | ||
@@ -385,7 +379,7 @@ struct ec_creg_mask_parms { | |||
385 | /* | 379 | /* |
386 | * callback for setting/clearing control bits | 380 | * callback for setting/clearing control bits |
387 | */ | 381 | */ |
388 | void smp_ctl_bit_callback(void *info) { | 382 | static void smp_ctl_bit_callback(void *info) { |
389 | struct ec_creg_mask_parms *pp = info; | 383 | struct ec_creg_mask_parms *pp = info; |
390 | unsigned long cregs[16]; | 384 | unsigned long cregs[16]; |
391 | int i; | 385 | int i; |
@@ -458,17 +452,15 @@ __init smp_count_cpus(void) | |||
458 | /* | 452 | /* |
459 | * Activate a secondary processor. | 453 | * Activate a secondary processor. |
460 | */ | 454 | */ |
461 | extern void init_cpu_timer(void); | ||
462 | extern void init_cpu_vtimer(void); | ||
463 | |||
464 | int __devinit start_secondary(void *cpuvoid) | 455 | int __devinit start_secondary(void *cpuvoid) |
465 | { | 456 | { |
466 | /* Setup the cpu */ | 457 | /* Setup the cpu */ |
467 | cpu_init(); | 458 | cpu_init(); |
468 | preempt_disable(); | 459 | preempt_disable(); |
469 | /* init per CPU timer */ | 460 | /* Enable TOD clock interrupts on the secondary cpu. */ |
470 | init_cpu_timer(); | 461 | init_cpu_timer(); |
471 | #ifdef CONFIG_VIRT_TIMER | 462 | #ifdef CONFIG_VIRT_TIMER |
463 | /* Enable cpu timer interrupts on the secondary cpu. */ | ||
472 | init_cpu_vtimer(); | 464 | init_cpu_vtimer(); |
473 | #endif | 465 | #endif |
474 | /* Enable pfault pseudo page faults on this cpu. */ | 466 | /* Enable pfault pseudo page faults on this cpu. */ |
@@ -542,7 +534,7 @@ smp_put_cpu(int cpu) | |||
542 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | 534 | spin_unlock_irqrestore(&smp_reserve_lock, flags); |
543 | } | 535 | } |
544 | 536 | ||
545 | static inline int | 537 | static int |
546 | cpu_stopped(int cpu) | 538 | cpu_stopped(int cpu) |
547 | { | 539 | { |
548 | __u32 status; | 540 | __u32 status; |
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 0d14a4789bf2..2e5c65a1863e 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c | |||
@@ -11,11 +11,11 @@ | |||
11 | #include <linux/stacktrace.h> | 11 | #include <linux/stacktrace.h> |
12 | #include <linux/kallsyms.h> | 12 | #include <linux/kallsyms.h> |
13 | 13 | ||
14 | static inline unsigned long save_context_stack(struct stack_trace *trace, | 14 | static unsigned long save_context_stack(struct stack_trace *trace, |
15 | unsigned int *skip, | 15 | unsigned int *skip, |
16 | unsigned long sp, | 16 | unsigned long sp, |
17 | unsigned long low, | 17 | unsigned long low, |
18 | unsigned long high) | 18 | unsigned long high) |
19 | { | 19 | { |
20 | struct stack_frame *sf; | 20 | struct stack_frame *sf; |
21 | struct pt_regs *regs; | 21 | struct pt_regs *regs; |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 6cceed4df73e..3b91f27ab202 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -37,11 +37,15 @@ | |||
37 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/irq_regs.h> | 38 | #include <asm/irq_regs.h> |
39 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
40 | #include <asm/etr.h> | ||
40 | 41 | ||
41 | /* change this if you have some constant time drift */ | 42 | /* change this if you have some constant time drift */ |
42 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 43 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
43 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) | 44 | #define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12) |
44 | 45 | ||
46 | /* The value of the TOD clock for 1.1.1970. */ | ||
47 | #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL | ||
48 | |||
45 | /* | 49 | /* |
46 | * Create a small time difference between the timer interrupts | 50 | * Create a small time difference between the timer interrupts |
47 | * on the different cpus to avoid lock contention. | 51 | * on the different cpus to avoid lock contention. |
@@ -51,6 +55,7 @@ | |||
51 | #define TICK_SIZE tick | 55 | #define TICK_SIZE tick |
52 | 56 | ||
53 | static ext_int_info_t ext_int_info_cc; | 57 | static ext_int_info_t ext_int_info_cc; |
58 | static ext_int_info_t ext_int_etr_cc; | ||
54 | static u64 init_timer_cc; | 59 | static u64 init_timer_cc; |
55 | static u64 jiffies_timer_cc; | 60 | static u64 jiffies_timer_cc; |
56 | static u64 xtime_cc; | 61 | static u64 xtime_cc; |
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) | |||
89 | #define s390_do_profile() do { ; } while(0) | 94 | #define s390_do_profile() do { ; } while(0) |
90 | #endif /* CONFIG_PROFILING */ | 95 | #endif /* CONFIG_PROFILING */ |
91 | 96 | ||
92 | |||
93 | /* | 97 | /* |
94 | * timer_interrupt() needs to keep up the real-time clock, | 98 | * Advance the per cpu tick counter up to the time given with the |
95 | * as well as call the "do_timer()" routine every clocktick | 99 | * "time" argument. The per cpu update consists of accounting |
100 | * the virtual cpu time, calling update_process_times and calling | ||
101 | * the profiling hook. If xtime is before time it is advanced as well. | ||
96 | */ | 102 | */ |
97 | void account_ticks(void) | 103 | void account_ticks(u64 time) |
98 | { | 104 | { |
99 | __u64 tmp; | ||
100 | __u32 ticks; | 105 | __u32 ticks; |
106 | __u64 tmp; | ||
101 | 107 | ||
102 | /* Calculate how many ticks have passed. */ | 108 | /* Calculate how many ticks have passed. */ |
103 | if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) { | 109 | if (time < S390_lowcore.jiffy_timer) |
104 | /* | ||
105 | * We have to program the clock comparator even if | ||
106 | * no tick has passed. That happens if e.g. an i/o | ||
107 | * interrupt wakes up an idle processor that has | ||
108 | * switched off its hz timer. | ||
109 | */ | ||
110 | tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; | ||
111 | asm volatile ("SCKC %0" : : "m" (tmp)); | ||
112 | return; | 110 | return; |
113 | } | 111 | tmp = time - S390_lowcore.jiffy_timer; |
114 | tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer; | ||
115 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ | 112 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ |
116 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; | 113 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; |
117 | S390_lowcore.jiffy_timer += | 114 | S390_lowcore.jiffy_timer += |
@@ -124,10 +121,6 @@ void account_ticks(void) | |||
124 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; | 121 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; |
125 | } | 122 | } |
126 | 123 | ||
127 | /* set clock comparator for next tick */ | ||
128 | tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION; | ||
129 | asm volatile ("SCKC %0" : : "m" (tmp)); | ||
130 | |||
131 | #ifdef CONFIG_SMP | 124 | #ifdef CONFIG_SMP |
132 | /* | 125 | /* |
133 | * Do not rely on the boot cpu to do the calls to do_timer. | 126 | * Do not rely on the boot cpu to do the calls to do_timer. |
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1; | |||
173 | * Stop the HZ tick on the current CPU. | 166 | * Stop the HZ tick on the current CPU. |
174 | * Only cpu_idle may call this function. | 167 | * Only cpu_idle may call this function. |
175 | */ | 168 | */ |
176 | static inline void stop_hz_timer(void) | 169 | static void stop_hz_timer(void) |
177 | { | 170 | { |
178 | unsigned long flags; | 171 | unsigned long flags; |
179 | unsigned long seq, next; | 172 | unsigned long seq, next; |
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void) | |||
210 | if (timer >= jiffies_timer_cc) | 203 | if (timer >= jiffies_timer_cc) |
211 | todval = timer; | 204 | todval = timer; |
212 | } | 205 | } |
213 | asm volatile ("SCKC %0" : : "m" (todval)); | 206 | set_clock_comparator(todval); |
214 | } | 207 | } |
215 | 208 | ||
216 | /* | 209 | /* |
217 | * Start the HZ tick on the current CPU. | 210 | * Start the HZ tick on the current CPU. |
218 | * Only cpu_idle may call this function. | 211 | * Only cpu_idle may call this function. |
219 | */ | 212 | */ |
220 | static inline void start_hz_timer(void) | 213 | static void start_hz_timer(void) |
221 | { | 214 | { |
222 | BUG_ON(!in_interrupt()); | 215 | BUG_ON(!in_interrupt()); |
223 | 216 | ||
224 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | 217 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) |
225 | return; | 218 | return; |
226 | account_ticks(); | 219 | account_ticks(get_clock()); |
220 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
227 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | 221 | cpu_clear(smp_processor_id(), nohz_cpu_mask); |
228 | } | 222 | } |
229 | 223 | ||
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = { | |||
245 | .notifier_call = nohz_idle_notify, | 239 | .notifier_call = nohz_idle_notify, |
246 | }; | 240 | }; |
247 | 241 | ||
248 | void __init nohz_init(void) | 242 | static void __init nohz_init(void) |
249 | { | 243 | { |
250 | if (register_idle_notifier(&nohz_idle_nb)) | 244 | if (register_idle_notifier(&nohz_idle_nb)) |
251 | panic("Couldn't register idle notifier"); | 245 | panic("Couldn't register idle notifier"); |
@@ -254,24 +248,57 @@ void __init nohz_init(void) | |||
254 | #endif | 248 | #endif |
255 | 249 | ||
256 | /* | 250 | /* |
257 | * Start the clock comparator on the current CPU. | 251 | * Set up per cpu jiffy timer and set the clock comparator. |
252 | */ | ||
253 | static void setup_jiffy_timer(void) | ||
254 | { | ||
255 | /* Set up clock comparator to next jiffy. */ | ||
256 | S390_lowcore.jiffy_timer = | ||
257 | jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY; | ||
258 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Set up lowcore and control register of the current cpu to | ||
263 | * enable TOD clock and clock comparator interrupts. | ||
258 | */ | 264 | */ |
259 | void init_cpu_timer(void) | 265 | void init_cpu_timer(void) |
260 | { | 266 | { |
261 | unsigned long cr0; | 267 | setup_jiffy_timer(); |
262 | __u64 timer; | ||
263 | 268 | ||
264 | timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; | 269 | /* Enable clock comparator timer interrupt. */ |
265 | S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; | 270 | __ctl_set_bit(0,11); |
266 | timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION; | 271 | |
267 | asm volatile ("SCKC %0" : : "m" (timer)); | 272 | /* Always allow ETR external interrupts, even without an ETR. */ |
268 | /* allow clock comparator timer interrupt */ | 273 | __ctl_set_bit(0, 4); |
269 | __ctl_store(cr0, 0, 0); | ||
270 | cr0 |= 0x800; | ||
271 | __ctl_load(cr0, 0, 0); | ||
272 | } | 274 | } |
273 | 275 | ||
274 | extern void vtime_init(void); | 276 | static void clock_comparator_interrupt(__u16 code) |
277 | { | ||
278 | /* set clock comparator for next tick */ | ||
279 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
280 | } | ||
281 | |||
282 | static void etr_reset(void); | ||
283 | static void etr_init(void); | ||
284 | static void etr_ext_handler(__u16); | ||
285 | |||
286 | /* | ||
287 | * Get the TOD clock running. | ||
288 | */ | ||
289 | static u64 __init reset_tod_clock(void) | ||
290 | { | ||
291 | u64 time; | ||
292 | |||
293 | etr_reset(); | ||
294 | if (store_clock(&time) == 0) | ||
295 | return time; | ||
296 | /* TOD clock not running. Set the clock to Unix Epoch. */ | ||
297 | if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0) | ||
298 | panic("TOD clock not operational."); | ||
299 | |||
300 | return TOD_UNIX_EPOCH; | ||
301 | } | ||
275 | 302 | ||
276 | static cycle_t read_tod_clock(void) | 303 | static cycle_t read_tod_clock(void) |
277 | { | 304 | { |
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = { | |||
295 | */ | 322 | */ |
296 | void __init time_init(void) | 323 | void __init time_init(void) |
297 | { | 324 | { |
298 | __u64 set_time_cc; | 325 | init_timer_cc = reset_tod_clock(); |
299 | int cc; | 326 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; |
300 | |||
301 | /* kick the TOD clock */ | ||
302 | asm volatile( | ||
303 | " stck 0(%2)\n" | ||
304 | " ipm %0\n" | ||
305 | " srl %0,28" | ||
306 | : "=d" (cc), "=m" (init_timer_cc) | ||
307 | : "a" (&init_timer_cc) : "cc"); | ||
308 | switch (cc) { | ||
309 | case 0: /* clock in set state: all is fine */ | ||
310 | break; | ||
311 | case 1: /* clock in non-set state: FIXME */ | ||
312 | printk("time_init: TOD clock in non-set state\n"); | ||
313 | break; | ||
314 | case 2: /* clock in error state: FIXME */ | ||
315 | printk("time_init: TOD clock in error state\n"); | ||
316 | break; | ||
317 | case 3: /* clock in stopped or not-operational state: FIXME */ | ||
318 | printk("time_init: TOD clock stopped/non-operational\n"); | ||
319 | break; | ||
320 | } | ||
321 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; | 327 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; |
322 | 328 | ||
323 | /* set xtime */ | 329 | /* set xtime */ |
324 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; | 330 | tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime); |
325 | set_time_cc = init_timer_cc - 0x8126d60e46000000LL + | ||
326 | (0x3c26700LL*1000000*4096); | ||
327 | tod_to_timeval(set_time_cc, &xtime); | ||
328 | set_normalized_timespec(&wall_to_monotonic, | 331 | set_normalized_timespec(&wall_to_monotonic, |
329 | -xtime.tv_sec, -xtime.tv_nsec); | 332 | -xtime.tv_sec, -xtime.tv_nsec); |
330 | 333 | ||
331 | /* request the clock comparator external interrupt */ | 334 | /* request the clock comparator external interrupt */ |
332 | if (register_early_external_interrupt(0x1004, NULL, | 335 | if (register_early_external_interrupt(0x1004, |
336 | clock_comparator_interrupt, | ||
333 | &ext_int_info_cc) != 0) | 337 | &ext_int_info_cc) != 0) |
334 | panic("Couldn't request external interrupt 0x1004"); | 338 | panic("Couldn't request external interrupt 0x1004"); |
335 | 339 | ||
336 | if (clocksource_register(&clocksource_tod) != 0) | 340 | if (clocksource_register(&clocksource_tod) != 0) |
337 | panic("Could not register TOD clock source"); | 341 | panic("Could not register TOD clock source"); |
338 | 342 | ||
339 | init_cpu_timer(); | 343 | /* request the etr external interrupt */ |
344 | if (register_early_external_interrupt(0x1406, etr_ext_handler, | ||
345 | &ext_int_etr_cc) != 0) | ||
346 | panic("Couldn't request external interrupt 0x1406"); | ||
347 | |||
348 | /* Enable TOD clock interrupts on the boot cpu. */ | ||
349 | init_cpu_timer(); | ||
340 | 350 | ||
341 | #ifdef CONFIG_NO_IDLE_HZ | 351 | #ifdef CONFIG_NO_IDLE_HZ |
342 | nohz_init(); | 352 | nohz_init(); |
@@ -345,5 +355,1048 @@ void __init time_init(void) | |||
345 | #ifdef CONFIG_VIRT_TIMER | 355 | #ifdef CONFIG_VIRT_TIMER |
346 | vtime_init(); | 356 | vtime_init(); |
347 | #endif | 357 | #endif |
358 | etr_init(); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * External Time Reference (ETR) code. | ||
363 | */ | ||
364 | static int etr_port0_online; | ||
365 | static int etr_port1_online; | ||
366 | |||
367 | static int __init early_parse_etr(char *p) | ||
368 | { | ||
369 | if (strncmp(p, "off", 3) == 0) | ||
370 | etr_port0_online = etr_port1_online = 0; | ||
371 | else if (strncmp(p, "port0", 5) == 0) | ||
372 | etr_port0_online = 1; | ||
373 | else if (strncmp(p, "port1", 5) == 0) | ||
374 | etr_port1_online = 1; | ||
375 | else if (strncmp(p, "on", 2) == 0) | ||
376 | etr_port0_online = etr_port1_online = 1; | ||
377 | return 0; | ||
378 | } | ||
379 | early_param("etr", early_parse_etr); | ||
380 | |||
381 | enum etr_event { | ||
382 | ETR_EVENT_PORT0_CHANGE, | ||
383 | ETR_EVENT_PORT1_CHANGE, | ||
384 | ETR_EVENT_PORT_ALERT, | ||
385 | ETR_EVENT_SYNC_CHECK, | ||
386 | ETR_EVENT_SWITCH_LOCAL, | ||
387 | ETR_EVENT_UPDATE, | ||
388 | }; | ||
389 | |||
390 | enum etr_flags { | ||
391 | ETR_FLAG_ENOSYS, | ||
392 | ETR_FLAG_EACCES, | ||
393 | ETR_FLAG_STEAI, | ||
394 | }; | ||
395 | |||
396 | /* | ||
397 | * Valid bit combinations of the eacr register are (x = don't care): | ||
398 | * e0 e1 dp p0 p1 ea es sl | ||
399 | * 0 0 x 0 0 0 0 0 initial, disabled state | ||
400 | * 0 0 x 0 1 1 0 0 port 1 online | ||
401 | * 0 0 x 1 0 1 0 0 port 0 online | ||
402 | * 0 0 x 1 1 1 0 0 both ports online | ||
403 | * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode | ||
404 | * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode | ||
405 | * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync | ||
406 | * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync | ||
407 | * 0 1 x 1 1 1 0 0 both ports online, port 1 usable | ||
408 | * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync | ||
409 | * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync | ||
410 | * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode | ||
411 | * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode | ||
412 | * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync | ||
413 | * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync | ||
414 | * 1 0 x 1 1 1 0 0 both ports online, port 0 usable | ||
415 | * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync | ||
416 | * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync | ||
417 | * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync | ||
418 | * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync | ||
419 | */ | ||
420 | static struct etr_eacr etr_eacr; | ||
421 | static u64 etr_tolec; /* time of last eacr update */ | ||
422 | static unsigned long etr_flags; | ||
423 | static struct etr_aib etr_port0; | ||
424 | static int etr_port0_uptodate; | ||
425 | static struct etr_aib etr_port1; | ||
426 | static int etr_port1_uptodate; | ||
427 | static unsigned long etr_events; | ||
428 | static struct timer_list etr_timer; | ||
429 | static struct tasklet_struct etr_tasklet; | ||
430 | static DEFINE_PER_CPU(atomic_t, etr_sync_word); | ||
431 | |||
432 | static void etr_timeout(unsigned long dummy); | ||
433 | static void etr_tasklet_fn(unsigned long dummy); | ||
434 | |||
435 | /* | ||
436 | * The etr get_clock function. It will write the current clock value | ||
437 | * to the clock pointer and return 0 if the clock is in sync with the | ||
438 | * external time source. If the clock mode is local it will return | ||
439 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | ||
440 | * reference. This function is what ETR is all about.. | ||
441 | */ | ||
442 | int get_sync_clock(unsigned long long *clock) | ||
443 | { | ||
444 | atomic_t *sw_ptr; | ||
445 | unsigned int sw0, sw1; | ||
446 | |||
447 | sw_ptr = &get_cpu_var(etr_sync_word); | ||
448 | sw0 = atomic_read(sw_ptr); | ||
449 | *clock = get_clock(); | ||
450 | sw1 = atomic_read(sw_ptr); | ||
451 | put_cpu_var(etr_sync_sync); | ||
452 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | ||
453 | /* Success: time is in sync. */ | ||
454 | return 0; | ||
455 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
456 | return -ENOSYS; | ||
457 | if (test_bit(ETR_FLAG_EACCES, &etr_flags)) | ||
458 | return -EACCES; | ||
459 | return -EAGAIN; | ||
460 | } | ||
461 | EXPORT_SYMBOL(get_sync_clock); | ||
462 | |||
463 | /* | ||
464 | * Make get_sync_clock return -EAGAIN. | ||
465 | */ | ||
466 | static void etr_disable_sync_clock(void *dummy) | ||
467 | { | ||
468 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
469 | /* | ||
470 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | ||
471 | * fail until the sync bit is turned back on. In addition | ||
472 | * increase the "sequence" counter to avoid the race of an | ||
473 | * etr event and the complete recovery against get_sync_clock. | ||
474 | */ | ||
475 | atomic_clear_mask(0x80000000, sw_ptr); | ||
476 | atomic_inc(sw_ptr); | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Make get_sync_clock return 0 again. | ||
481 | * Needs to be called from a context disabled for preemption. | ||
482 | */ | ||
483 | static void etr_enable_sync_clock(void) | ||
484 | { | ||
485 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
486 | atomic_set_mask(0x80000000, sw_ptr); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Reset ETR attachment. | ||
491 | */ | ||
492 | static void etr_reset(void) | ||
493 | { | ||
494 | etr_eacr = (struct etr_eacr) { | ||
495 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, | ||
496 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, | ||
497 | .es = 0, .sl = 0 }; | ||
498 | if (etr_setr(&etr_eacr) == 0) | ||
499 | etr_tolec = get_clock(); | ||
500 | else { | ||
501 | set_bit(ETR_FLAG_ENOSYS, &etr_flags); | ||
502 | if (etr_port0_online || etr_port1_online) { | ||
503 | printk(KERN_WARNING "Running on non ETR capable " | ||
504 | "machine, only local mode available.\n"); | ||
505 | etr_port0_online = etr_port1_online = 0; | ||
506 | } | ||
507 | } | ||
508 | } | ||
509 | |||
510 | static void etr_init(void) | ||
511 | { | ||
512 | struct etr_aib aib; | ||
513 | |||
514 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
515 | return; | ||
516 | /* Check if this machine has the steai instruction. */ | ||
517 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | ||
518 | set_bit(ETR_FLAG_STEAI, &etr_flags); | ||
519 | setup_timer(&etr_timer, etr_timeout, 0UL); | ||
520 | tasklet_init(&etr_tasklet, etr_tasklet_fn, 0); | ||
521 | if (!etr_port0_online && !etr_port1_online) | ||
522 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
523 | if (etr_port0_online) { | ||
524 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | ||
525 | tasklet_hi_schedule(&etr_tasklet); | ||
526 | } | ||
527 | if (etr_port1_online) { | ||
528 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | ||
529 | tasklet_hi_schedule(&etr_tasklet); | ||
530 | } | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * Two sorts of ETR machine checks. The architecture reads: | ||
535 | * "When a machine-check niterruption occurs and if a switch-to-local or | ||
536 | * ETR-sync-check interrupt request is pending but disabled, this pending | ||
537 | * disabled interruption request is indicated and is cleared". | ||
538 | * Which means that we can get etr_switch_to_local events from the machine | ||
539 | * check handler although the interruption condition is disabled. Lovely.. | ||
540 | */ | ||
541 | |||
542 | /* | ||
543 | * Switch to local machine check. This is called when the last usable | ||
544 | * ETR port goes inactive. After switch to local the clock is not in sync. | ||
545 | */ | ||
546 | void etr_switch_to_local(void) | ||
547 | { | ||
548 | if (!etr_eacr.sl) | ||
549 | return; | ||
550 | etr_disable_sync_clock(NULL); | ||
551 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); | ||
552 | tasklet_hi_schedule(&etr_tasklet); | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * ETR sync check machine check. This is called when the ETR OTE and the | ||
557 | * local clock OTE are farther apart than the ETR sync check tolerance. | ||
558 | * After a ETR sync check the clock is not in sync. The machine check | ||
559 | * is broadcasted to all cpus at the same time. | ||
560 | */ | ||
561 | void etr_sync_check(void) | ||
562 | { | ||
563 | if (!etr_eacr.es) | ||
564 | return; | ||
565 | etr_disable_sync_clock(NULL); | ||
566 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); | ||
567 | tasklet_hi_schedule(&etr_tasklet); | ||
568 | } | ||
569 | |||
570 | /* | ||
571 | * ETR external interrupt. There are two causes: | ||
572 | * 1) port state change, check the usability of the port | ||
573 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the | ||
574 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) | ||
575 | * or ETR-data word 4 (edf4) has changed. | ||
576 | */ | ||
577 | static void etr_ext_handler(__u16 code) | ||
578 | { | ||
579 | struct etr_interruption_parameter *intparm = | ||
580 | (struct etr_interruption_parameter *) &S390_lowcore.ext_params; | ||
581 | |||
582 | if (intparm->pc0) | ||
583 | /* ETR port 0 state change. */ | ||
584 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | ||
585 | if (intparm->pc1) | ||
586 | /* ETR port 1 state change. */ | ||
587 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | ||
588 | if (intparm->eai) | ||
589 | /* | ||
590 | * ETR port alert on either port 0, 1 or both. | ||
591 | * Both ports are not up-to-date now. | ||
592 | */ | ||
593 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); | ||
594 | tasklet_hi_schedule(&etr_tasklet); | ||
595 | } | ||
596 | |||
597 | static void etr_timeout(unsigned long dummy) | ||
598 | { | ||
599 | set_bit(ETR_EVENT_UPDATE, &etr_events); | ||
600 | tasklet_hi_schedule(&etr_tasklet); | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Check if the etr mode is pss. | ||
605 | */ | ||
606 | static inline int etr_mode_is_pps(struct etr_eacr eacr) | ||
607 | { | ||
608 | return eacr.es && !eacr.sl; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Check if the etr mode is etr. | ||
613 | */ | ||
614 | static inline int etr_mode_is_etr(struct etr_eacr eacr) | ||
615 | { | ||
616 | return eacr.es && eacr.sl; | ||
617 | } | ||
618 | |||
619 | /* | ||
620 | * Check if the port can be used for TOD synchronization. | ||
621 | * For PPS mode the port has to receive OTEs. For ETR mode | ||
622 | * the port has to receive OTEs, the ETR stepping bit has to | ||
623 | * be zero and the validity bits for data frame 1, 2, and 3 | ||
624 | * have to be 1. | ||
625 | */ | ||
626 | static int etr_port_valid(struct etr_aib *aib, int port) | ||
627 | { | ||
628 | unsigned int psc; | ||
629 | |||
630 | /* Check that this port is receiving OTEs. */ | ||
631 | if (aib->tsp == 0) | ||
632 | return 0; | ||
633 | |||
634 | psc = port ? aib->esw.psc1 : aib->esw.psc0; | ||
635 | if (psc == etr_lpsc_pps_mode) | ||
636 | return 1; | ||
637 | if (psc == etr_lpsc_operational_step) | ||
638 | return !aib->esw.y && aib->slsw.v1 && | ||
639 | aib->slsw.v2 && aib->slsw.v3; | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | /* | ||
644 | * Check if two ports are on the same network. | ||
645 | */ | ||
646 | static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2) | ||
647 | { | ||
648 | // FIXME: any other fields we have to compare? | ||
649 | return aib1->edf1.net_id == aib2->edf1.net_id; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * Wrapper for etr_stei that converts physical port states | ||
654 | * to logical port states to be consistent with the output | ||
655 | * of stetr (see etr_psc vs. etr_lpsc). | ||
656 | */ | ||
657 | static void etr_steai_cv(struct etr_aib *aib, unsigned int func) | ||
658 | { | ||
659 | BUG_ON(etr_steai(aib, func) != 0); | ||
660 | /* Convert port state to logical port state. */ | ||
661 | if (aib->esw.psc0 == 1) | ||
662 | aib->esw.psc0 = 2; | ||
663 | else if (aib->esw.psc0 == 0 && aib->esw.p == 0) | ||
664 | aib->esw.psc0 = 1; | ||
665 | if (aib->esw.psc1 == 1) | ||
666 | aib->esw.psc1 = 2; | ||
667 | else if (aib->esw.psc1 == 0 && aib->esw.p == 1) | ||
668 | aib->esw.psc1 = 1; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * Check if the aib a2 is still connected to the same attachment as | ||
673 | * aib a1, the etv values differ by one and a2 is valid. | ||
674 | */ | ||
675 | static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | ||
676 | { | ||
677 | int state_a1, state_a2; | ||
678 | |||
679 | /* Paranoia check: e0/e1 should better be the same. */ | ||
680 | if (a1->esw.eacr.e0 != a2->esw.eacr.e0 || | ||
681 | a1->esw.eacr.e1 != a2->esw.eacr.e1) | ||
682 | return 0; | ||
683 | |||
684 | /* Still connected to the same etr ? */ | ||
685 | state_a1 = p ? a1->esw.psc1 : a1->esw.psc0; | ||
686 | state_a2 = p ? a2->esw.psc1 : a2->esw.psc0; | ||
687 | if (state_a1 == etr_lpsc_operational_step) { | ||
688 | if (state_a2 != etr_lpsc_operational_step || | ||
689 | a1->edf1.net_id != a2->edf1.net_id || | ||
690 | a1->edf1.etr_id != a2->edf1.etr_id || | ||
691 | a1->edf1.etr_pn != a2->edf1.etr_pn) | ||
692 | return 0; | ||
693 | } else if (state_a2 != etr_lpsc_pps_mode) | ||
694 | return 0; | ||
695 | |||
696 | /* The ETV value of a2 needs to be ETV of a1 + 1. */ | ||
697 | if (a1->edf2.etv + 1 != a2->edf2.etv) | ||
698 | return 0; | ||
699 | |||
700 | if (!etr_port_valid(a2, p)) | ||
701 | return 0; | ||
702 | |||
703 | return 1; | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * The time is "clock". xtime is what we think the time is. | ||
708 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | ||
709 | * "delay" is an approximation how long the synchronization took. If | ||
710 | * the time correction is positive, then "delay" is subtracted from | ||
711 | * the time difference and only the remaining part is passed to ntp. | ||
712 | */ | ||
713 | static void etr_adjust_time(unsigned long long clock, unsigned long long delay) | ||
714 | { | ||
715 | unsigned long long delta, ticks; | ||
716 | struct timex adjust; | ||
717 | |||
718 | /* | ||
719 | * We don't have to take the xtime lock because the cpu | ||
720 | * executing etr_adjust_time is running disabled in | ||
721 | * tasklet context and all other cpus are looping in | ||
722 | * etr_sync_cpu_start. | ||
723 | */ | ||
724 | if (clock > xtime_cc) { | ||
725 | /* It is later than we thought. */ | ||
726 | delta = ticks = clock - xtime_cc; | ||
727 | delta = ticks = (delta < delay) ? 0 : delta - delay; | ||
728 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
729 | init_timer_cc = init_timer_cc + delta; | ||
730 | jiffies_timer_cc = jiffies_timer_cc + delta; | ||
731 | xtime_cc = xtime_cc + delta; | ||
732 | adjust.offset = ticks * (1000000 / HZ); | ||
733 | } else { | ||
734 | /* It is earlier than we thought. */ | ||
735 | delta = ticks = xtime_cc - clock; | ||
736 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
737 | init_timer_cc = init_timer_cc - delta; | ||
738 | jiffies_timer_cc = jiffies_timer_cc - delta; | ||
739 | xtime_cc = xtime_cc - delta; | ||
740 | adjust.offset = -ticks * (1000000 / HZ); | ||
741 | } | ||
742 | if (adjust.offset != 0) { | ||
743 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | ||
744 | adjust.offset); | ||
745 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | ||
746 | do_adjtimex(&adjust); | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static void etr_sync_cpu_start(void *dummy) | ||
751 | { | ||
752 | int *in_sync = dummy; | ||
753 | |||
754 | etr_enable_sync_clock(); | ||
755 | /* | ||
756 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | ||
757 | * is called on all other cpus while the TOD clocks is stopped. | ||
758 | * __udelay will stop the cpu on an enabled wait psw until the | ||
759 | * TOD is running again. | ||
760 | */ | ||
761 | while (*in_sync == 0) | ||
762 | __udelay(1); | ||
763 | if (*in_sync != 1) | ||
764 | /* Didn't work. Clear per-cpu in sync bit again. */ | ||
765 | etr_disable_sync_clock(NULL); | ||
766 | /* | ||
767 | * This round of TOD syncing is done. Set the clock comparator | ||
768 | * to the next tick and let the processor continue. | ||
769 | */ | ||
770 | setup_jiffy_timer(); | ||
771 | } | ||
772 | |||
773 | static void etr_sync_cpu_end(void *dummy) | ||
774 | { | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Sync the TOD clock using the port refered to by aibp. This port | ||
779 | * has to be enabled and the other port has to be disabled. The | ||
780 | * last eacr update has to be more than 1.6 seconds in the past. | ||
781 | */ | ||
782 | static int etr_sync_clock(struct etr_aib *aib, int port) | ||
783 | { | ||
784 | struct etr_aib *sync_port; | ||
785 | unsigned long long clock, delay; | ||
786 | int in_sync, follows; | ||
787 | int rc; | ||
788 | |||
789 | /* Check if the current aib is adjacent to the sync port aib. */ | ||
790 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
791 | follows = etr_aib_follows(sync_port, aib, port); | ||
792 | memcpy(sync_port, aib, sizeof(*aib)); | ||
793 | if (!follows) | ||
794 | return -EAGAIN; | ||
795 | |||
796 | /* | ||
797 | * Catch all other cpus and make them wait until we have | ||
798 | * successfully synced the clock. smp_call_function will | ||
799 | * return after all other cpus are in etr_sync_cpu_start. | ||
800 | */ | ||
801 | in_sync = 0; | ||
802 | preempt_disable(); | ||
803 | smp_call_function(etr_sync_cpu_start,&in_sync,0,0); | ||
804 | local_irq_disable(); | ||
805 | etr_enable_sync_clock(); | ||
806 | |||
807 | /* Set clock to next OTE. */ | ||
808 | __ctl_set_bit(14, 21); | ||
809 | __ctl_set_bit(0, 29); | ||
810 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; | ||
811 | if (set_clock(clock) == 0) { | ||
812 | __udelay(1); /* Wait for the clock to start. */ | ||
813 | __ctl_clear_bit(0, 29); | ||
814 | __ctl_clear_bit(14, 21); | ||
815 | etr_stetr(aib); | ||
816 | /* Adjust Linux timing variables. */ | ||
817 | delay = (unsigned long long) | ||
818 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | ||
819 | etr_adjust_time(clock, delay); | ||
820 | setup_jiffy_timer(); | ||
821 | /* Verify that the clock is properly set. */ | ||
822 | if (!etr_aib_follows(sync_port, aib, port)) { | ||
823 | /* Didn't work. */ | ||
824 | etr_disable_sync_clock(NULL); | ||
825 | in_sync = -EAGAIN; | ||
826 | rc = -EAGAIN; | ||
827 | } else { | ||
828 | in_sync = 1; | ||
829 | rc = 0; | ||
830 | } | ||
831 | } else { | ||
832 | /* Could not set the clock ?!? */ | ||
833 | __ctl_clear_bit(0, 29); | ||
834 | __ctl_clear_bit(14, 21); | ||
835 | etr_disable_sync_clock(NULL); | ||
836 | in_sync = -EAGAIN; | ||
837 | rc = -EAGAIN; | ||
838 | } | ||
839 | local_irq_enable(); | ||
840 | smp_call_function(etr_sync_cpu_end,NULL,0,0); | ||
841 | preempt_enable(); | ||
842 | return rc; | ||
843 | } | ||
844 | |||
845 | /* | ||
846 | * Handle the immediate effects of the different events. | ||
847 | * The port change event is used for online/offline changes. | ||
848 | */ | ||
849 | static struct etr_eacr etr_handle_events(struct etr_eacr eacr) | ||
850 | { | ||
851 | if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) | ||
852 | eacr.es = 0; | ||
853 | if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) | ||
854 | eacr.es = eacr.sl = 0; | ||
855 | if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events)) | ||
856 | etr_port0_uptodate = etr_port1_uptodate = 0; | ||
857 | |||
858 | if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) { | ||
859 | if (eacr.e0) | ||
860 | /* | ||
861 | * Port change of an enabled port. We have to | ||
862 | * assume that this can have caused an stepping | ||
863 | * port switch. | ||
864 | */ | ||
865 | etr_tolec = get_clock(); | ||
866 | eacr.p0 = etr_port0_online; | ||
867 | if (!eacr.p0) | ||
868 | eacr.e0 = 0; | ||
869 | etr_port0_uptodate = 0; | ||
870 | } | ||
871 | if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) { | ||
872 | if (eacr.e1) | ||
873 | /* | ||
874 | * Port change of an enabled port. We have to | ||
875 | * assume that this can have caused an stepping | ||
876 | * port switch. | ||
877 | */ | ||
878 | etr_tolec = get_clock(); | ||
879 | eacr.p1 = etr_port1_online; | ||
880 | if (!eacr.p1) | ||
881 | eacr.e1 = 0; | ||
882 | etr_port1_uptodate = 0; | ||
883 | } | ||
884 | clear_bit(ETR_EVENT_UPDATE, &etr_events); | ||
885 | return eacr; | ||
886 | } | ||
887 | |||
888 | /* | ||
889 | * Set up a timer that expires after the etr_tolec + 1.6 seconds if | ||
890 | * one of the ports needs an update. | ||
891 | */ | ||
892 | static void etr_set_tolec_timeout(unsigned long long now) | ||
893 | { | ||
894 | unsigned long micros; | ||
895 | |||
896 | if ((!etr_eacr.p0 || etr_port0_uptodate) && | ||
897 | (!etr_eacr.p1 || etr_port1_uptodate)) | ||
898 | return; | ||
899 | micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0; | ||
900 | micros = (micros > 1600000) ? 0 : 1600000 - micros; | ||
901 | mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1); | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | * Set up a time that expires after 1/2 second. | ||
906 | */ | ||
907 | static void etr_set_sync_timeout(void) | ||
908 | { | ||
909 | mod_timer(&etr_timer, jiffies + HZ/2); | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * Update the aib information for one or both ports. | ||
914 | */ | ||
915 | static struct etr_eacr etr_handle_update(struct etr_aib *aib, | ||
916 | struct etr_eacr eacr) | ||
917 | { | ||
918 | /* With both ports disabled the aib information is useless. */ | ||
919 | if (!eacr.e0 && !eacr.e1) | ||
920 | return eacr; | ||
921 | |||
922 | /* Update port0 or port1 with aib stored in etr_tasklet_fn. */ | ||
923 | if (aib->esw.q == 0) { | ||
924 | /* Information for port 0 stored. */ | ||
925 | if (eacr.p0 && !etr_port0_uptodate) { | ||
926 | etr_port0 = *aib; | ||
927 | if (etr_port0_online) | ||
928 | etr_port0_uptodate = 1; | ||
929 | } | ||
930 | } else { | ||
931 | /* Information for port 1 stored. */ | ||
932 | if (eacr.p1 && !etr_port1_uptodate) { | ||
933 | etr_port1 = *aib; | ||
934 | if (etr_port0_online) | ||
935 | etr_port1_uptodate = 1; | ||
936 | } | ||
937 | } | ||
938 | |||
939 | /* | ||
940 | * Do not try to get the alternate port aib if the clock | ||
941 | * is not in sync yet. | ||
942 | */ | ||
943 | if (!eacr.es) | ||
944 | return eacr; | ||
945 | |||
946 | /* | ||
947 | * If steai is available we can get the information about | ||
948 | * the other port immediately. If only stetr is available the | ||
949 | * data-port bit toggle has to be used. | ||
950 | */ | ||
951 | if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { | ||
952 | if (eacr.p0 && !etr_port0_uptodate) { | ||
953 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); | ||
954 | etr_port0_uptodate = 1; | ||
955 | } | ||
956 | if (eacr.p1 && !etr_port1_uptodate) { | ||
957 | etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1); | ||
958 | etr_port1_uptodate = 1; | ||
959 | } | ||
960 | } else { | ||
961 | /* | ||
962 | * One port was updated above, if the other | ||
963 | * port is not uptodate toggle dp bit. | ||
964 | */ | ||
965 | if ((eacr.p0 && !etr_port0_uptodate) || | ||
966 | (eacr.p1 && !etr_port1_uptodate)) | ||
967 | eacr.dp ^= 1; | ||
968 | else | ||
969 | eacr.dp = 0; | ||
970 | } | ||
971 | return eacr; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * Write new etr control register if it differs from the current one. | ||
976 | * Return 1 if etr_tolec has been updated as well. | ||
977 | */ | ||
978 | static void etr_update_eacr(struct etr_eacr eacr) | ||
979 | { | ||
980 | int dp_changed; | ||
981 | |||
982 | if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0) | ||
983 | /* No change, return. */ | ||
984 | return; | ||
985 | /* | ||
986 | * The disable of an active port of the change of the data port | ||
987 | * bit can/will cause a change in the data port. | ||
988 | */ | ||
989 | dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 || | ||
990 | (etr_eacr.dp ^ eacr.dp) != 0; | ||
991 | etr_eacr = eacr; | ||
992 | etr_setr(&etr_eacr); | ||
993 | if (dp_changed) | ||
994 | etr_tolec = get_clock(); | ||
995 | } | ||
996 | |||
997 | /* | ||
998 | * ETR tasklet. In this function you'll find the main logic. In | ||
999 | * particular this is the only function that calls etr_update_eacr(), | ||
1000 | * it "controls" the etr control register. | ||
1001 | */ | ||
1002 | static void etr_tasklet_fn(unsigned long dummy) | ||
1003 | { | ||
1004 | unsigned long long now; | ||
1005 | struct etr_eacr eacr; | ||
1006 | struct etr_aib aib; | ||
1007 | int sync_port; | ||
1008 | |||
1009 | /* Create working copy of etr_eacr. */ | ||
1010 | eacr = etr_eacr; | ||
1011 | |||
1012 | /* Check for the different events and their immediate effects. */ | ||
1013 | eacr = etr_handle_events(eacr); | ||
1014 | |||
1015 | /* Check if ETR is supposed to be active. */ | ||
1016 | eacr.ea = eacr.p0 || eacr.p1; | ||
1017 | if (!eacr.ea) { | ||
1018 | /* Both ports offline. Reset everything. */ | ||
1019 | eacr.dp = eacr.es = eacr.sl = 0; | ||
1020 | on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); | ||
1021 | del_timer_sync(&etr_timer); | ||
1022 | etr_update_eacr(eacr); | ||
1023 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1024 | return; | ||
1025 | } | ||
1026 | |||
1027 | /* Store aib to get the current ETR status word. */ | ||
1028 | BUG_ON(etr_stetr(&aib) != 0); | ||
1029 | etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */ | ||
1030 | now = get_clock(); | ||
1031 | |||
1032 | /* | ||
1033 | * Update the port information if the last stepping port change | ||
1034 | * or data port change is older than 1.6 seconds. | ||
1035 | */ | ||
1036 | if (now >= etr_tolec + (1600000 << 12)) | ||
1037 | eacr = etr_handle_update(&aib, eacr); | ||
1038 | |||
1039 | /* | ||
1040 | * Select ports to enable. The prefered synchronization mode is PPS. | ||
1041 | * If a port can be enabled depends on a number of things: | ||
1042 | * 1) The port needs to be online and uptodate. A port is not | ||
1043 | * disabled just because it is not uptodate, but it is only | ||
1044 | * enabled if it is uptodate. | ||
1045 | * 2) The port needs to have the same mode (pps / etr). | ||
1046 | * 3) The port needs to be usable -> etr_port_valid() == 1 | ||
1047 | * 4) To enable the second port the clock needs to be in sync. | ||
1048 | * 5) If both ports are useable and are ETR ports, the network id | ||
1049 | * has to be the same. | ||
1050 | * The eacr.sl bit is used to indicate etr mode vs. pps mode. | ||
1051 | */ | ||
1052 | if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) { | ||
1053 | eacr.sl = 0; | ||
1054 | eacr.e0 = 1; | ||
1055 | if (!etr_mode_is_pps(etr_eacr)) | ||
1056 | eacr.es = 0; | ||
1057 | if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode) | ||
1058 | eacr.e1 = 0; | ||
1059 | // FIXME: uptodate checks ? | ||
1060 | else if (etr_port0_uptodate && etr_port1_uptodate) | ||
1061 | eacr.e1 = 1; | ||
1062 | sync_port = (etr_port0_uptodate && | ||
1063 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | ||
1064 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1065 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { | ||
1066 | eacr.sl = 0; | ||
1067 | eacr.e0 = 0; | ||
1068 | eacr.e1 = 1; | ||
1069 | if (!etr_mode_is_pps(etr_eacr)) | ||
1070 | eacr.es = 0; | ||
1071 | sync_port = (etr_port1_uptodate && | ||
1072 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | ||
1073 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1074 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { | ||
1075 | eacr.sl = 1; | ||
1076 | eacr.e0 = 1; | ||
1077 | if (!etr_mode_is_etr(etr_eacr)) | ||
1078 | eacr.es = 0; | ||
1079 | if (!eacr.es || !eacr.p1 || | ||
1080 | aib.esw.psc1 != etr_lpsc_operational_alt) | ||
1081 | eacr.e1 = 0; | ||
1082 | else if (etr_port0_uptodate && etr_port1_uptodate && | ||
1083 | etr_compare_network(&etr_port0, &etr_port1)) | ||
1084 | eacr.e1 = 1; | ||
1085 | sync_port = (etr_port0_uptodate && | ||
1086 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | ||
1087 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1088 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { | ||
1089 | eacr.sl = 1; | ||
1090 | eacr.e0 = 0; | ||
1091 | eacr.e1 = 1; | ||
1092 | if (!etr_mode_is_etr(etr_eacr)) | ||
1093 | eacr.es = 0; | ||
1094 | sync_port = (etr_port1_uptodate && | ||
1095 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | ||
1096 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1097 | } else { | ||
1098 | /* Both ports not usable. */ | ||
1099 | eacr.es = eacr.sl = 0; | ||
1100 | sync_port = -1; | ||
1101 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
1102 | } | ||
1103 | |||
1104 | /* | ||
1105 | * If the clock is in sync just update the eacr and return. | ||
1106 | * If there is no valid sync port wait for a port update. | ||
1107 | */ | ||
1108 | if (eacr.es || sync_port < 0) { | ||
1109 | etr_update_eacr(eacr); | ||
1110 | etr_set_tolec_timeout(now); | ||
1111 | return; | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Prepare control register for clock syncing | ||
1116 | * (reset data port bit, set sync check control. | ||
1117 | */ | ||
1118 | eacr.dp = 0; | ||
1119 | eacr.es = 1; | ||
1120 | |||
1121 | /* | ||
1122 | * Update eacr and try to synchronize the clock. If the update | ||
1123 | * of eacr caused a stepping port switch (or if we have to | ||
1124 | * assume that a stepping port switch has occured) or the | ||
1125 | * clock syncing failed, reset the sync check control bit | ||
1126 | * and set up a timer to try again after 0.5 seconds | ||
1127 | */ | ||
1128 | etr_update_eacr(eacr); | ||
1129 | if (now < etr_tolec + (1600000 << 12) || | ||
1130 | etr_sync_clock(&aib, sync_port) != 0) { | ||
1131 | /* Sync failed. Try again in 1/2 second. */ | ||
1132 | eacr.es = 0; | ||
1133 | etr_update_eacr(eacr); | ||
1134 | etr_set_sync_timeout(); | ||
1135 | } else | ||
1136 | etr_set_tolec_timeout(now); | ||
1137 | } | ||
1138 | |||
1139 | /* | ||
1140 | * Sysfs interface functions | ||
1141 | */ | ||
1142 | static struct sysdev_class etr_sysclass = { | ||
1143 | set_kset_name("etr") | ||
1144 | }; | ||
1145 | |||
1146 | static struct sys_device etr_port0_dev = { | ||
1147 | .id = 0, | ||
1148 | .cls = &etr_sysclass, | ||
1149 | }; | ||
1150 | |||
1151 | static struct sys_device etr_port1_dev = { | ||
1152 | .id = 1, | ||
1153 | .cls = &etr_sysclass, | ||
1154 | }; | ||
1155 | |||
1156 | /* | ||
1157 | * ETR class attributes | ||
1158 | */ | ||
1159 | static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf) | ||
1160 | { | ||
1161 | return sprintf(buf, "%i\n", etr_port0.esw.p); | ||
1162 | } | ||
1163 | |||
1164 | static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL); | ||
1165 | |||
1166 | static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf) | ||
1167 | { | ||
1168 | char *mode_str; | ||
1169 | |||
1170 | if (etr_mode_is_pps(etr_eacr)) | ||
1171 | mode_str = "pps"; | ||
1172 | else if (etr_mode_is_etr(etr_eacr)) | ||
1173 | mode_str = "etr"; | ||
1174 | else | ||
1175 | mode_str = "local"; | ||
1176 | return sprintf(buf, "%s\n", mode_str); | ||
1177 | } | ||
1178 | |||
1179 | static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL); | ||
1180 | |||
1181 | /* | ||
1182 | * ETR port attributes | ||
1183 | */ | ||
1184 | static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev) | ||
1185 | { | ||
1186 | if (dev == &etr_port0_dev) | ||
1187 | return etr_port0_online ? &etr_port0 : NULL; | ||
1188 | else | ||
1189 | return etr_port1_online ? &etr_port1 : NULL; | ||
1190 | } | ||
1191 | |||
1192 | static ssize_t etr_online_show(struct sys_device *dev, char *buf) | ||
1193 | { | ||
1194 | unsigned int online; | ||
1195 | |||
1196 | online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online; | ||
1197 | return sprintf(buf, "%i\n", online); | ||
1198 | } | ||
1199 | |||
1200 | static ssize_t etr_online_store(struct sys_device *dev, | ||
1201 | const char *buf, size_t count) | ||
1202 | { | ||
1203 | unsigned int value; | ||
1204 | |||
1205 | value = simple_strtoul(buf, NULL, 0); | ||
1206 | if (value != 0 && value != 1) | ||
1207 | return -EINVAL; | ||
1208 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
1209 | return -ENOSYS; | ||
1210 | if (dev == &etr_port0_dev) { | ||
1211 | if (etr_port0_online == value) | ||
1212 | return count; /* Nothing to do. */ | ||
1213 | etr_port0_online = value; | ||
1214 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | ||
1215 | tasklet_hi_schedule(&etr_tasklet); | ||
1216 | } else { | ||
1217 | if (etr_port1_online == value) | ||
1218 | return count; /* Nothing to do. */ | ||
1219 | etr_port1_online = value; | ||
1220 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | ||
1221 | tasklet_hi_schedule(&etr_tasklet); | ||
1222 | } | ||
1223 | return count; | ||
1224 | } | ||
1225 | |||
1226 | static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store); | ||
1227 | |||
1228 | static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf) | ||
1229 | { | ||
1230 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? | ||
1231 | etr_eacr.e0 : etr_eacr.e1); | ||
1232 | } | ||
1233 | |||
1234 | static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); | ||
1235 | |||
1236 | static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf) | ||
1237 | { | ||
1238 | if (!etr_port0_online && !etr_port1_online) | ||
1239 | /* Status word is not uptodate if both ports are offline. */ | ||
1240 | return -ENODATA; | ||
1241 | return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? | ||
1242 | etr_port0.esw.psc0 : etr_port0.esw.psc1); | ||
1243 | } | ||
1244 | |||
1245 | static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL); | ||
1246 | |||
1247 | static ssize_t etr_untuned_show(struct sys_device *dev, char *buf) | ||
1248 | { | ||
1249 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1250 | |||
1251 | if (!aib || !aib->slsw.v1) | ||
1252 | return -ENODATA; | ||
1253 | return sprintf(buf, "%i\n", aib->edf1.u); | ||
1254 | } | ||
1255 | |||
1256 | static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL); | ||
1257 | |||
1258 | static ssize_t etr_network_id_show(struct sys_device *dev, char *buf) | ||
1259 | { | ||
1260 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1261 | |||
1262 | if (!aib || !aib->slsw.v1) | ||
1263 | return -ENODATA; | ||
1264 | return sprintf(buf, "%i\n", aib->edf1.net_id); | ||
1265 | } | ||
1266 | |||
1267 | static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL); | ||
1268 | |||
1269 | static ssize_t etr_id_show(struct sys_device *dev, char *buf) | ||
1270 | { | ||
1271 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1272 | |||
1273 | if (!aib || !aib->slsw.v1) | ||
1274 | return -ENODATA; | ||
1275 | return sprintf(buf, "%i\n", aib->edf1.etr_id); | ||
1276 | } | ||
1277 | |||
1278 | static SYSDEV_ATTR(id, 0400, etr_id_show, NULL); | ||
1279 | |||
1280 | static ssize_t etr_port_number_show(struct sys_device *dev, char *buf) | ||
1281 | { | ||
1282 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1283 | |||
1284 | if (!aib || !aib->slsw.v1) | ||
1285 | return -ENODATA; | ||
1286 | return sprintf(buf, "%i\n", aib->edf1.etr_pn); | ||
1287 | } | ||
1288 | |||
1289 | static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL); | ||
1290 | |||
1291 | static ssize_t etr_coupled_show(struct sys_device *dev, char *buf) | ||
1292 | { | ||
1293 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1294 | |||
1295 | if (!aib || !aib->slsw.v3) | ||
1296 | return -ENODATA; | ||
1297 | return sprintf(buf, "%i\n", aib->edf3.c); | ||
1298 | } | ||
1299 | |||
1300 | static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL); | ||
1301 | |||
1302 | static ssize_t etr_local_time_show(struct sys_device *dev, char *buf) | ||
1303 | { | ||
1304 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1305 | |||
1306 | if (!aib || !aib->slsw.v3) | ||
1307 | return -ENODATA; | ||
1308 | return sprintf(buf, "%i\n", aib->edf3.blto); | ||
1309 | } | ||
1310 | |||
1311 | static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL); | ||
1312 | |||
1313 | static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf) | ||
1314 | { | ||
1315 | struct etr_aib *aib = etr_aib_from_dev(dev); | ||
1316 | |||
1317 | if (!aib || !aib->slsw.v3) | ||
1318 | return -ENODATA; | ||
1319 | return sprintf(buf, "%i\n", aib->edf3.buo); | ||
1320 | } | ||
1321 | |||
1322 | static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL); | ||
1323 | |||
1324 | static struct sysdev_attribute *etr_port_attributes[] = { | ||
1325 | &attr_online, | ||
1326 | &attr_stepping_control, | ||
1327 | &attr_state_code, | ||
1328 | &attr_untuned, | ||
1329 | &attr_network, | ||
1330 | &attr_id, | ||
1331 | &attr_port, | ||
1332 | &attr_coupled, | ||
1333 | &attr_local_time, | ||
1334 | &attr_utc_offset, | ||
1335 | NULL | ||
1336 | }; | ||
1337 | |||
1338 | static int __init etr_register_port(struct sys_device *dev) | ||
1339 | { | ||
1340 | struct sysdev_attribute **attr; | ||
1341 | int rc; | ||
1342 | |||
1343 | rc = sysdev_register(dev); | ||
1344 | if (rc) | ||
1345 | goto out; | ||
1346 | for (attr = etr_port_attributes; *attr; attr++) { | ||
1347 | rc = sysdev_create_file(dev, *attr); | ||
1348 | if (rc) | ||
1349 | goto out_unreg; | ||
1350 | } | ||
1351 | return 0; | ||
1352 | out_unreg: | ||
1353 | for (; attr >= etr_port_attributes; attr--) | ||
1354 | sysdev_remove_file(dev, *attr); | ||
1355 | sysdev_unregister(dev); | ||
1356 | out: | ||
1357 | return rc; | ||
1358 | } | ||
1359 | |||
1360 | static void __init etr_unregister_port(struct sys_device *dev) | ||
1361 | { | ||
1362 | struct sysdev_attribute **attr; | ||
1363 | |||
1364 | for (attr = etr_port_attributes; *attr; attr++) | ||
1365 | sysdev_remove_file(dev, *attr); | ||
1366 | sysdev_unregister(dev); | ||
1367 | } | ||
1368 | |||
1369 | static int __init etr_init_sysfs(void) | ||
1370 | { | ||
1371 | int rc; | ||
1372 | |||
1373 | rc = sysdev_class_register(&etr_sysclass); | ||
1374 | if (rc) | ||
1375 | goto out; | ||
1376 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port); | ||
1377 | if (rc) | ||
1378 | goto out_unreg_class; | ||
1379 | rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode); | ||
1380 | if (rc) | ||
1381 | goto out_remove_stepping_port; | ||
1382 | rc = etr_register_port(&etr_port0_dev); | ||
1383 | if (rc) | ||
1384 | goto out_remove_stepping_mode; | ||
1385 | rc = etr_register_port(&etr_port1_dev); | ||
1386 | if (rc) | ||
1387 | goto out_remove_port0; | ||
1388 | return 0; | ||
1389 | |||
1390 | out_remove_port0: | ||
1391 | etr_unregister_port(&etr_port0_dev); | ||
1392 | out_remove_stepping_mode: | ||
1393 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode); | ||
1394 | out_remove_stepping_port: | ||
1395 | sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port); | ||
1396 | out_unreg_class: | ||
1397 | sysdev_class_unregister(&etr_sysclass); | ||
1398 | out: | ||
1399 | return rc; | ||
348 | } | 1400 | } |
349 | 1401 | ||
1402 | device_initcall(etr_init_sysfs); | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 3cbb0dcf1f1d..f0e5a320e2ec 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer) | |||
283 | return buffer; | 283 | return buffer; |
284 | } | 284 | } |
285 | 285 | ||
286 | DEFINE_SPINLOCK(die_lock); | 286 | static DEFINE_SPINLOCK(die_lock); |
287 | 287 | ||
288 | void die(const char * str, struct pt_regs * regs, long err) | 288 | void die(const char * str, struct pt_regs * regs, long err) |
289 | { | 289 | { |
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs) | |||
364 | force_sig(SIGTRAP, current); | 364 | force_sig(SIGTRAP, current); |
365 | } | 365 | } |
366 | 366 | ||
367 | asmlinkage void | 367 | static void default_trap_handler(struct pt_regs * regs, long interruption_code) |
368 | default_trap_handler(struct pt_regs * regs, long interruption_code) | ||
369 | { | 368 | { |
370 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 369 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
371 | local_irq_enable(); | 370 | local_irq_enable(); |
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code) | |||
376 | } | 375 | } |
377 | 376 | ||
378 | #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ | 377 | #define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \ |
379 | asmlinkage void name(struct pt_regs * regs, long interruption_code) \ | 378 | static void name(struct pt_regs * regs, long interruption_code) \ |
380 | { \ | 379 | { \ |
381 | siginfo_t info; \ | 380 | siginfo_t info; \ |
382 | info.si_signo = signr; \ | 381 | info.si_signo = signr; \ |
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location, | |||
442 | "floating point exception", regs, &si); | 441 | "floating point exception", regs, &si); |
443 | } | 442 | } |
444 | 443 | ||
445 | asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | 444 | static void illegal_op(struct pt_regs * regs, long interruption_code) |
446 | { | 445 | { |
447 | siginfo_t info; | 446 | siginfo_t info; |
448 | __u8 opcode[6]; | 447 | __u8 opcode[6]; |
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | |||
491 | #endif | 490 | #endif |
492 | } else | 491 | } else |
493 | signal = SIGILL; | 492 | signal = SIGILL; |
494 | } else | 493 | } else { |
495 | signal = SIGILL; | 494 | /* |
495 | * If we get an illegal op in kernel mode, send it through the | ||
496 | * kprobes notifier. If kprobes doesn't pick it up, SIGILL | ||
497 | */ | ||
498 | if (notify_die(DIE_BPT, "bpt", regs, interruption_code, | ||
499 | 3, SIGTRAP) != NOTIFY_STOP) | ||
500 | signal = SIGILL; | ||
501 | } | ||
496 | 502 | ||
497 | #ifdef CONFIG_MATHEMU | 503 | #ifdef CONFIG_MATHEMU |
498 | if (signal == SIGFPE) | 504 | if (signal == SIGFPE) |
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception, | |||
585 | ILL_ILLOPN, get_check_address(regs)); | 591 | ILL_ILLOPN, get_check_address(regs)); |
586 | #endif | 592 | #endif |
587 | 593 | ||
588 | asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) | 594 | static void data_exception(struct pt_regs * regs, long interruption_code) |
589 | { | 595 | { |
590 | __u16 __user *location; | 596 | __u16 __user *location; |
591 | int signal = 0; | 597 | int signal = 0; |
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code) | |||
675 | } | 681 | } |
676 | } | 682 | } |
677 | 683 | ||
678 | asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code) | 684 | static void space_switch_exception(struct pt_regs * regs, long int_code) |
679 | { | 685 | { |
680 | siginfo_t info; | 686 | siginfo_t info; |
681 | 687 | ||
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index fe0f2e97ba7b..a48907392522 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -31,18 +31,19 @@ SECTIONS | |||
31 | 31 | ||
32 | _etext = .; /* End of text section */ | 32 | _etext = .; /* End of text section */ |
33 | 33 | ||
34 | . = ALIGN(16); /* Exception table */ | ||
35 | __start___ex_table = .; | ||
36 | __ex_table : { *(__ex_table) } | ||
37 | __stop___ex_table = .; | ||
38 | |||
39 | RODATA | 34 | RODATA |
40 | 35 | ||
41 | #ifdef CONFIG_SHARED_KERNEL | 36 | #ifdef CONFIG_SHARED_KERNEL |
42 | . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ | 37 | . = ALIGN(1048576); /* VM shared segments are 1MB aligned */ |
38 | #endif | ||
43 | 39 | ||
40 | . = ALIGN(4096); | ||
44 | _eshared = .; /* End of shareable data */ | 41 | _eshared = .; /* End of shareable data */ |
45 | #endif | 42 | |
43 | . = ALIGN(16); /* Exception table */ | ||
44 | __start___ex_table = .; | ||
45 | __ex_table : { *(__ex_table) } | ||
46 | __stop___ex_table = .; | ||
46 | 47 | ||
47 | .data : { /* Data */ | 48 | .data : { /* Data */ |
48 | *(.data) | 49 | *(.data) |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 21baaf5496d6..9d5b02801b46 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
26 | 26 | ||
27 | static ext_int_info_t ext_int_info_timer; | 27 | static ext_int_info_t ext_int_info_timer; |
28 | DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | 28 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); |
29 | 29 | ||
30 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 30 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
31 | /* | 31 | /* |
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer); | |||
524 | void init_cpu_vtimer(void) | 524 | void init_cpu_vtimer(void) |
525 | { | 525 | { |
526 | struct vtimer_queue *vt_list; | 526 | struct vtimer_queue *vt_list; |
527 | unsigned long cr0; | ||
528 | 527 | ||
529 | /* kick the virtual timer */ | 528 | /* kick the virtual timer */ |
530 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; | 529 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; |
531 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; | 530 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; |
532 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | 531 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); |
533 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); | 532 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); |
534 | __ctl_store(cr0, 0, 0); | 533 | |
535 | cr0 |= 0x400; | 534 | /* enable cpu timer interrupts */ |
536 | __ctl_load(cr0, 0, 0); | 535 | __ctl_set_bit(0,10); |
537 | 536 | ||
538 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | 537 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); |
539 | INIT_LIST_HEAD(&vt_list->list); | 538 | INIT_LIST_HEAD(&vt_list->list); |
@@ -572,6 +571,7 @@ void __init vtime_init(void) | |||
572 | if (register_idle_notifier(&vtimer_idle_nb)) | 571 | if (register_idle_notifier(&vtimer_idle_nb)) |
573 | panic("Couldn't register idle notifier"); | 572 | panic("Couldn't register idle notifier"); |
574 | 573 | ||
574 | /* Enable cpu timer interrupts on the boot cpu. */ | ||
575 | init_cpu_vtimer(); | 575 | init_cpu_vtimer(); |
576 | } | 576 | } |
577 | 577 | ||
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index b5f94cf3bde8..7a44fed21b35 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o | 7 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o qrnnd.o |
8 | lib-$(CONFIG_32BIT) += div64.o | 8 | lib-$(CONFIG_32BIT) += div64.o |
9 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o | 9 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o |
10 | lib-$(CONFIG_SMP) += spinlock.o | 10 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 027c4742a001..02854449b74b 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/delay.c | 2 | * arch/s390/lib/delay.c |
3 | * Precise Delay Loops for S390 | 3 | * Precise Delay Loops for S390 |
4 | * | 4 | * |
5 | * S390 version | 5 | * S390 version |
@@ -13,10 +13,8 @@ | |||
13 | 13 | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | 16 | #include <linux/timex.h> | |
17 | #ifdef CONFIG_SMP | 17 | #include <linux/irqflags.h> |
18 | #include <asm/smp.h> | ||
19 | #endif | ||
20 | 18 | ||
21 | void __delay(unsigned long loops) | 19 | void __delay(unsigned long loops) |
22 | { | 20 | { |
@@ -31,17 +29,39 @@ void __delay(unsigned long loops) | |||
31 | } | 29 | } |
32 | 30 | ||
33 | /* | 31 | /* |
34 | * Waits for 'usecs' microseconds using the tod clock, giving up the time slice | 32 | * Waits for 'usecs' microseconds using the TOD clock comparator. |
35 | * of the virtual PU inbetween to avoid congestion. | ||
36 | */ | 33 | */ |
37 | void __udelay(unsigned long usecs) | 34 | void __udelay(unsigned long usecs) |
38 | { | 35 | { |
39 | uint64_t start_cc; | 36 | u64 end, time, jiffy_timer = 0; |
37 | unsigned long flags, cr0, mask, dummy; | ||
38 | |||
39 | local_irq_save(flags); | ||
40 | if (raw_irqs_disabled_flags(flags)) { | ||
41 | jiffy_timer = S390_lowcore.jiffy_timer; | ||
42 | S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); | ||
43 | __ctl_store(cr0, 0, 0); | ||
44 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | ||
45 | __ctl_load(dummy , 0, 0); | ||
46 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | ||
47 | } else | ||
48 | mask = psw_kernel_bits | PSW_MASK_WAIT | | ||
49 | PSW_MASK_EXT | PSW_MASK_IO; | ||
50 | |||
51 | end = get_clock() + ((u64) usecs << 12); | ||
52 | do { | ||
53 | time = end < S390_lowcore.jiffy_timer ? | ||
54 | end : S390_lowcore.jiffy_timer; | ||
55 | set_clock_comparator(time); | ||
56 | trace_hardirqs_on(); | ||
57 | __load_psw_mask(mask); | ||
58 | local_irq_disable(); | ||
59 | } while (get_clock() < end); | ||
40 | 60 | ||
41 | if (usecs == 0) | 61 | if (raw_irqs_disabled_flags(flags)) { |
42 | return; | 62 | __ctl_load(cr0, 0, 0); |
43 | start_cc = get_clock(); | 63 | S390_lowcore.jiffy_timer = jiffy_timer; |
44 | do { | 64 | } |
45 | cpu_relax(); | 65 | set_clock_comparator(S390_lowcore.jiffy_timer); |
46 | } while (((get_clock() - start_cc)/4096) < usecs); | 66 | local_irq_restore(flags); |
47 | } | 67 | } |
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S new file mode 100644 index 000000000000..eb1df632e749 --- /dev/null +++ b/arch/s390/lib/qrnnd.S | |||
@@ -0,0 +1,77 @@ | |||
1 | # S/390 __udiv_qrnnd | ||
2 | |||
3 | # r2 : &__r | ||
4 | # r3 : upper half of 64 bit word n | ||
5 | # r4 : lower half of 64 bit word n | ||
6 | # r5 : divisor d | ||
7 | # the reminder r of the division is to be stored to &__r and | ||
8 | # the quotient q is to be returned | ||
9 | |||
10 | .text | ||
11 | .globl __udiv_qrnnd | ||
12 | __udiv_qrnnd: | ||
13 | st %r2,24(%r15) # store pointer to reminder for later | ||
14 | lr %r0,%r3 # reload n | ||
15 | lr %r1,%r4 | ||
16 | ltr %r2,%r5 # reload and test divisor | ||
17 | jp 5f | ||
18 | # divisor >= 0x80000000 | ||
19 | srdl %r0,2 # n/4 | ||
20 | srl %r2,1 # d/2 | ||
21 | slr %r1,%r2 # special case if last bit of d is set | ||
22 | brc 3,0f # (n/4) div (n/2) can overflow by 1 | ||
23 | ahi %r0,-1 # trick: subtract n/2, then divide | ||
24 | 0: dr %r0,%r2 # signed division | ||
25 | ahi %r1,1 # trick part 2: add 1 to the quotient | ||
26 | # now (n >> 2) = (d >> 1) * %r1 + %r0 | ||
27 | lhi %r3,1 | ||
28 | nr %r3,%r1 # test last bit of q | ||
29 | jz 1f | ||
30 | alr %r0,%r2 # add (d>>1) to r | ||
31 | 1: srl %r1,1 # q >>= 1 | ||
32 | # now (n >> 2) = (d&-2) * %r1 + %r0 | ||
33 | lhi %r3,1 | ||
34 | nr %r3,%r5 # test last bit of d | ||
35 | jz 2f | ||
36 | slr %r0,%r1 # r -= q | ||
37 | brc 3,2f # borrow ? | ||
38 | alr %r0,%r5 # r += d | ||
39 | ahi %r1,-1 | ||
40 | 2: # now (n >> 2) = d * %r1 + %r0 | ||
41 | alr %r1,%r1 # q <<= 1 | ||
42 | alr %r0,%r0 # r <<= 1 | ||
43 | brc 12,3f # overflow on r ? | ||
44 | slr %r0,%r5 # r -= d | ||
45 | ahi %r1,1 # q += 1 | ||
46 | 3: lhi %r3,2 | ||
47 | nr %r3,%r4 # test next to last bit of n | ||
48 | jz 4f | ||
49 | ahi %r0,1 # r += 1 | ||
50 | 4: clr %r0,%r5 # r >= d ? | ||
51 | jl 6f | ||
52 | slr %r0,%r5 # r -= d | ||
53 | ahi %r1,1 # q += 1 | ||
54 | # now (n >> 1) = d * %r1 + %r0 | ||
55 | j 6f | ||
56 | 5: # divisor < 0x80000000 | ||
57 | srdl %r0,1 | ||
58 | dr %r0,%r2 # signed division | ||
59 | # now (n >> 1) = d * %r1 + %r0 | ||
60 | 6: alr %r1,%r1 # q <<= 1 | ||
61 | alr %r0,%r0 # r <<= 1 | ||
62 | brc 12,7f # overflow on r ? | ||
63 | slr %r0,%r5 # r -= d | ||
64 | ahi %r1,1 # q += 1 | ||
65 | 7: lhi %r3,1 | ||
66 | nr %r3,%r4 # isolate last bit of n | ||
67 | alr %r0,%r3 # r += (n & 1) | ||
68 | clr %r0,%r5 # r >= d ? | ||
69 | jl 8f | ||
70 | slr %r0,%r5 # r -= d | ||
71 | ahi %r1,1 # q += 1 | ||
72 | 8: # now n = d * %r1 + %r0 | ||
73 | l %r2,24(%r15) | ||
74 | st %r0,0(%r2) | ||
75 | lr %r2,%r1 | ||
76 | br %r14 | ||
77 | .end __udiv_qrnnd | ||
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h new file mode 100644 index 000000000000..126011df14f1 --- /dev/null +++ b/arch/s390/lib/uaccess.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * arch/s390/uaccess.h | ||
3 | * | ||
4 | * Copyright IBM Corp. 2007 | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef __ARCH_S390_LIB_UACCESS_H | ||
9 | #define __ARCH_S390_LIB_UACCESS_H | ||
10 | |||
11 | extern size_t copy_from_user_std(size_t, const void __user *, void *); | ||
12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | ||
13 | extern size_t strnlen_user_std(size_t, const char __user *); | ||
14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | ||
15 | extern int futex_atomic_cmpxchg_std(int __user *, int, int); | ||
16 | extern int futex_atomic_op_std(int, int __user *, int, int *); | ||
17 | |||
18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); | ||
19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); | ||
20 | extern int futex_atomic_op_pt(int, int __user *, int, int *); | ||
21 | extern int futex_atomic_cmpxchg_pt(int __user *, int, int); | ||
22 | |||
23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ | ||
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index f9a23d57eb79..6d8772339d76 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | #include <asm/futex.h> | 14 | #include <asm/futex.h> |
15 | #include "uaccess.h" | ||
15 | 16 | ||
16 | #ifndef __s390x__ | 17 | #ifndef __s390x__ |
17 | #define AHI "ahi" | 18 | #define AHI "ahi" |
@@ -27,10 +28,7 @@ | |||
27 | #define SLR "slgr" | 28 | #define SLR "slgr" |
28 | #endif | 29 | #endif |
29 | 30 | ||
30 | extern size_t copy_from_user_std(size_t, const void __user *, void *); | 31 | static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) |
31 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | ||
32 | |||
33 | size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) | ||
34 | { | 32 | { |
35 | register unsigned long reg0 asm("0") = 0x81UL; | 33 | register unsigned long reg0 asm("0") = 0x81UL; |
36 | unsigned long tmp1, tmp2; | 34 | unsigned long tmp1, tmp2; |
@@ -69,14 +67,14 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) | |||
69 | return size; | 67 | return size; |
70 | } | 68 | } |
71 | 69 | ||
72 | size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) | 70 | static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) |
73 | { | 71 | { |
74 | if (size <= 256) | 72 | if (size <= 256) |
75 | return copy_from_user_std(size, ptr, x); | 73 | return copy_from_user_std(size, ptr, x); |
76 | return copy_from_user_mvcos(size, ptr, x); | 74 | return copy_from_user_mvcos(size, ptr, x); |
77 | } | 75 | } |
78 | 76 | ||
79 | size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) | 77 | static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) |
80 | { | 78 | { |
81 | register unsigned long reg0 asm("0") = 0x810000UL; | 79 | register unsigned long reg0 asm("0") = 0x810000UL; |
82 | unsigned long tmp1, tmp2; | 80 | unsigned long tmp1, tmp2; |
@@ -105,14 +103,16 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) | |||
105 | return size; | 103 | return size; |
106 | } | 104 | } |
107 | 105 | ||
108 | size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) | 106 | static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, |
107 | const void *x) | ||
109 | { | 108 | { |
110 | if (size <= 256) | 109 | if (size <= 256) |
111 | return copy_to_user_std(size, ptr, x); | 110 | return copy_to_user_std(size, ptr, x); |
112 | return copy_to_user_mvcos(size, ptr, x); | 111 | return copy_to_user_mvcos(size, ptr, x); |
113 | } | 112 | } |
114 | 113 | ||
115 | size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) | 114 | static size_t copy_in_user_mvcos(size_t size, void __user *to, |
115 | const void __user *from) | ||
116 | { | 116 | { |
117 | register unsigned long reg0 asm("0") = 0x810081UL; | 117 | register unsigned long reg0 asm("0") = 0x810081UL; |
118 | unsigned long tmp1, tmp2; | 118 | unsigned long tmp1, tmp2; |
@@ -134,7 +134,7 @@ size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) | |||
134 | return size; | 134 | return size; |
135 | } | 135 | } |
136 | 136 | ||
137 | size_t clear_user_mvcos(size_t size, void __user *to) | 137 | static size_t clear_user_mvcos(size_t size, void __user *to) |
138 | { | 138 | { |
139 | register unsigned long reg0 asm("0") = 0x810000UL; | 139 | register unsigned long reg0 asm("0") = 0x810000UL; |
140 | unsigned long tmp1, tmp2; | 140 | unsigned long tmp1, tmp2; |
@@ -162,10 +162,43 @@ size_t clear_user_mvcos(size_t size, void __user *to) | |||
162 | return size; | 162 | return size; |
163 | } | 163 | } |
164 | 164 | ||
165 | extern size_t strnlen_user_std(size_t, const char __user *); | 165 | static size_t strnlen_user_mvcos(size_t count, const char __user *src) |
166 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | 166 | { |
167 | extern int futex_atomic_op(int, int __user *, int, int *); | 167 | char buf[256]; |
168 | extern int futex_atomic_cmpxchg(int __user *, int, int); | 168 | int rc; |
169 | size_t done, len, len_str; | ||
170 | |||
171 | done = 0; | ||
172 | do { | ||
173 | len = min(count - done, (size_t) 256); | ||
174 | rc = uaccess.copy_from_user(len, src + done, buf); | ||
175 | if (unlikely(rc == len)) | ||
176 | return 0; | ||
177 | len -= rc; | ||
178 | len_str = strnlen(buf, len); | ||
179 | done += len_str; | ||
180 | } while ((len_str == len) && (done < count)); | ||
181 | return done + 1; | ||
182 | } | ||
183 | |||
184 | static size_t strncpy_from_user_mvcos(size_t count, const char __user *src, | ||
185 | char *dst) | ||
186 | { | ||
187 | int rc; | ||
188 | size_t done, len, len_str; | ||
189 | |||
190 | done = 0; | ||
191 | do { | ||
192 | len = min(count - done, (size_t) 4096); | ||
193 | rc = uaccess.copy_from_user(len, src + done, dst); | ||
194 | if (unlikely(rc == len)) | ||
195 | return -EFAULT; | ||
196 | len -= rc; | ||
197 | len_str = strnlen(dst, len); | ||
198 | done += len_str; | ||
199 | } while ((len_str == len) && (done < count)); | ||
200 | return done; | ||
201 | } | ||
169 | 202 | ||
170 | struct uaccess_ops uaccess_mvcos = { | 203 | struct uaccess_ops uaccess_mvcos = { |
171 | .copy_from_user = copy_from_user_mvcos_check, | 204 | .copy_from_user = copy_from_user_mvcos_check, |
@@ -176,6 +209,21 @@ struct uaccess_ops uaccess_mvcos = { | |||
176 | .clear_user = clear_user_mvcos, | 209 | .clear_user = clear_user_mvcos, |
177 | .strnlen_user = strnlen_user_std, | 210 | .strnlen_user = strnlen_user_std, |
178 | .strncpy_from_user = strncpy_from_user_std, | 211 | .strncpy_from_user = strncpy_from_user_std, |
179 | .futex_atomic_op = futex_atomic_op, | 212 | .futex_atomic_op = futex_atomic_op_std, |
180 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg, | 213 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, |
214 | }; | ||
215 | |||
216 | #ifdef CONFIG_S390_SWITCH_AMODE | ||
217 | struct uaccess_ops uaccess_mvcos_switch = { | ||
218 | .copy_from_user = copy_from_user_mvcos, | ||
219 | .copy_from_user_small = copy_from_user_mvcos, | ||
220 | .copy_to_user = copy_to_user_mvcos, | ||
221 | .copy_to_user_small = copy_to_user_mvcos, | ||
222 | .copy_in_user = copy_in_user_mvcos, | ||
223 | .clear_user = clear_user_mvcos, | ||
224 | .strnlen_user = strnlen_user_mvcos, | ||
225 | .strncpy_from_user = strncpy_from_user_mvcos, | ||
226 | .futex_atomic_op = futex_atomic_op_pt, | ||
227 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, | ||
181 | }; | 228 | }; |
229 | #endif | ||
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 49c3e46b4065..63181671e3e3 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/lib/uaccess_pt.c | 2 | * arch/s390/lib/uaccess_pt.c |
3 | * | 3 | * |
4 | * User access functions based on page table walks. | 4 | * User access functions based on page table walks for enhanced |
5 | * system layout without hardware support. | ||
5 | * | 6 | * |
6 | * Copyright IBM Corp. 2006 | 7 | * Copyright IBM Corp. 2006 |
7 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | 8 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) |
@@ -12,9 +13,10 @@ | |||
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
14 | #include <asm/futex.h> | 15 | #include <asm/futex.h> |
16 | #include "uaccess.h" | ||
15 | 17 | ||
16 | static inline int __handle_fault(struct mm_struct *mm, unsigned long address, | 18 | static int __handle_fault(struct mm_struct *mm, unsigned long address, |
17 | int write_access) | 19 | int write_access) |
18 | { | 20 | { |
19 | struct vm_area_struct *vma; | 21 | struct vm_area_struct *vma; |
20 | int ret = -EFAULT; | 22 | int ret = -EFAULT; |
@@ -79,8 +81,8 @@ out_sigbus: | |||
79 | return ret; | 81 | return ret; |
80 | } | 82 | } |
81 | 83 | ||
82 | static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | 84 | static size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
83 | size_t n, int write_user) | 85 | size_t n, int write_user) |
84 | { | 86 | { |
85 | struct mm_struct *mm = current->mm; | 87 | struct mm_struct *mm = current->mm; |
86 | unsigned long offset, pfn, done, size; | 88 | unsigned long offset, pfn, done, size; |
@@ -133,6 +135,49 @@ fault: | |||
133 | goto retry; | 135 | goto retry; |
134 | } | 136 | } |
135 | 137 | ||
138 | /* | ||
139 | * Do DAT for user address by page table walk, return kernel address. | ||
140 | * This function needs to be called with current->mm->page_table_lock held. | ||
141 | */ | ||
142 | static unsigned long __dat_user_addr(unsigned long uaddr) | ||
143 | { | ||
144 | struct mm_struct *mm = current->mm; | ||
145 | unsigned long pfn, ret; | ||
146 | pgd_t *pgd; | ||
147 | pmd_t *pmd; | ||
148 | pte_t *pte; | ||
149 | int rc; | ||
150 | |||
151 | ret = 0; | ||
152 | retry: | ||
153 | pgd = pgd_offset(mm, uaddr); | ||
154 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
155 | goto fault; | ||
156 | |||
157 | pmd = pmd_offset(pgd, uaddr); | ||
158 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
159 | goto fault; | ||
160 | |||
161 | pte = pte_offset_map(pmd, uaddr); | ||
162 | if (!pte || !pte_present(*pte)) | ||
163 | goto fault; | ||
164 | |||
165 | pfn = pte_pfn(*pte); | ||
166 | if (!pfn_valid(pfn)) | ||
167 | goto out; | ||
168 | |||
169 | ret = (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); | ||
170 | out: | ||
171 | return ret; | ||
172 | fault: | ||
173 | spin_unlock(&mm->page_table_lock); | ||
174 | rc = __handle_fault(mm, uaddr, 0); | ||
175 | spin_lock(&mm->page_table_lock); | ||
176 | if (rc) | ||
177 | goto out; | ||
178 | goto retry; | ||
179 | } | ||
180 | |||
136 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) | 181 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) |
137 | { | 182 | { |
138 | size_t rc; | 183 | size_t rc; |
@@ -155,3 +200,277 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | |||
155 | } | 200 | } |
156 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | 201 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); |
157 | } | 202 | } |
203 | |||
204 | static size_t clear_user_pt(size_t n, void __user *to) | ||
205 | { | ||
206 | long done, size, ret; | ||
207 | |||
208 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
209 | memset((void __kernel __force *) to, 0, n); | ||
210 | return 0; | ||
211 | } | ||
212 | done = 0; | ||
213 | do { | ||
214 | if (n - done > PAGE_SIZE) | ||
215 | size = PAGE_SIZE; | ||
216 | else | ||
217 | size = n - done; | ||
218 | ret = __user_copy_pt((unsigned long) to + done, | ||
219 | &empty_zero_page, size, 1); | ||
220 | done += size; | ||
221 | if (ret) | ||
222 | return ret + n - done; | ||
223 | } while (done < n); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static size_t strnlen_user_pt(size_t count, const char __user *src) | ||
228 | { | ||
229 | char *addr; | ||
230 | unsigned long uaddr = (unsigned long) src; | ||
231 | struct mm_struct *mm = current->mm; | ||
232 | unsigned long offset, pfn, done, len; | ||
233 | pgd_t *pgd; | ||
234 | pmd_t *pmd; | ||
235 | pte_t *pte; | ||
236 | size_t len_str; | ||
237 | |||
238 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
239 | return strnlen((const char __kernel __force *) src, count) + 1; | ||
240 | done = 0; | ||
241 | retry: | ||
242 | spin_lock(&mm->page_table_lock); | ||
243 | do { | ||
244 | pgd = pgd_offset(mm, uaddr); | ||
245 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
246 | goto fault; | ||
247 | |||
248 | pmd = pmd_offset(pgd, uaddr); | ||
249 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
250 | goto fault; | ||
251 | |||
252 | pte = pte_offset_map(pmd, uaddr); | ||
253 | if (!pte || !pte_present(*pte)) | ||
254 | goto fault; | ||
255 | |||
256 | pfn = pte_pfn(*pte); | ||
257 | if (!pfn_valid(pfn)) { | ||
258 | done = -1; | ||
259 | goto out; | ||
260 | } | ||
261 | |||
262 | offset = uaddr & (PAGE_SIZE-1); | ||
263 | addr = (char *)(pfn << PAGE_SHIFT) + offset; | ||
264 | len = min(count - done, PAGE_SIZE - offset); | ||
265 | len_str = strnlen(addr, len); | ||
266 | done += len_str; | ||
267 | uaddr += len_str; | ||
268 | } while ((len_str == len) && (done < count)); | ||
269 | out: | ||
270 | spin_unlock(&mm->page_table_lock); | ||
271 | return done + 1; | ||
272 | fault: | ||
273 | spin_unlock(&mm->page_table_lock); | ||
274 | if (__handle_fault(mm, uaddr, 0)) { | ||
275 | return 0; | ||
276 | } | ||
277 | goto retry; | ||
278 | } | ||
279 | |||
280 | static size_t strncpy_from_user_pt(size_t count, const char __user *src, | ||
281 | char *dst) | ||
282 | { | ||
283 | size_t n = strnlen_user_pt(count, src); | ||
284 | |||
285 | if (!n) | ||
286 | return -EFAULT; | ||
287 | if (n > count) | ||
288 | n = count; | ||
289 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
290 | memcpy(dst, (const char __kernel __force *) src, n); | ||
291 | if (dst[n-1] == '\0') | ||
292 | return n-1; | ||
293 | else | ||
294 | return n; | ||
295 | } | ||
296 | if (__user_copy_pt((unsigned long) src, dst, n, 0)) | ||
297 | return -EFAULT; | ||
298 | if (dst[n-1] == '\0') | ||
299 | return n-1; | ||
300 | else | ||
301 | return n; | ||
302 | } | ||
303 | |||
304 | static size_t copy_in_user_pt(size_t n, void __user *to, | ||
305 | const void __user *from) | ||
306 | { | ||
307 | struct mm_struct *mm = current->mm; | ||
308 | unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, | ||
309 | uaddr, done, size; | ||
310 | unsigned long uaddr_from = (unsigned long) from; | ||
311 | unsigned long uaddr_to = (unsigned long) to; | ||
312 | pgd_t *pgd_from, *pgd_to; | ||
313 | pmd_t *pmd_from, *pmd_to; | ||
314 | pte_t *pte_from, *pte_to; | ||
315 | int write_user; | ||
316 | |||
317 | done = 0; | ||
318 | retry: | ||
319 | spin_lock(&mm->page_table_lock); | ||
320 | do { | ||
321 | pgd_from = pgd_offset(mm, uaddr_from); | ||
322 | if (pgd_none(*pgd_from) || unlikely(pgd_bad(*pgd_from))) { | ||
323 | uaddr = uaddr_from; | ||
324 | write_user = 0; | ||
325 | goto fault; | ||
326 | } | ||
327 | pgd_to = pgd_offset(mm, uaddr_to); | ||
328 | if (pgd_none(*pgd_to) || unlikely(pgd_bad(*pgd_to))) { | ||
329 | uaddr = uaddr_to; | ||
330 | write_user = 1; | ||
331 | goto fault; | ||
332 | } | ||
333 | |||
334 | pmd_from = pmd_offset(pgd_from, uaddr_from); | ||
335 | if (pmd_none(*pmd_from) || unlikely(pmd_bad(*pmd_from))) { | ||
336 | uaddr = uaddr_from; | ||
337 | write_user = 0; | ||
338 | goto fault; | ||
339 | } | ||
340 | pmd_to = pmd_offset(pgd_to, uaddr_to); | ||
341 | if (pmd_none(*pmd_to) || unlikely(pmd_bad(*pmd_to))) { | ||
342 | uaddr = uaddr_to; | ||
343 | write_user = 1; | ||
344 | goto fault; | ||
345 | } | ||
346 | |||
347 | pte_from = pte_offset_map(pmd_from, uaddr_from); | ||
348 | if (!pte_from || !pte_present(*pte_from)) { | ||
349 | uaddr = uaddr_from; | ||
350 | write_user = 0; | ||
351 | goto fault; | ||
352 | } | ||
353 | pte_to = pte_offset_map(pmd_to, uaddr_to); | ||
354 | if (!pte_to || !pte_present(*pte_to) || !pte_write(*pte_to)) { | ||
355 | uaddr = uaddr_to; | ||
356 | write_user = 1; | ||
357 | goto fault; | ||
358 | } | ||
359 | |||
360 | pfn_from = pte_pfn(*pte_from); | ||
361 | if (!pfn_valid(pfn_from)) | ||
362 | goto out; | ||
363 | pfn_to = pte_pfn(*pte_to); | ||
364 | if (!pfn_valid(pfn_to)) | ||
365 | goto out; | ||
366 | |||
367 | offset_from = uaddr_from & (PAGE_SIZE-1); | ||
368 | offset_to = uaddr_from & (PAGE_SIZE-1); | ||
369 | offset_max = max(offset_from, offset_to); | ||
370 | size = min(n - done, PAGE_SIZE - offset_max); | ||
371 | |||
372 | memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, | ||
373 | (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); | ||
374 | done += size; | ||
375 | uaddr_from += size; | ||
376 | uaddr_to += size; | ||
377 | } while (done < n); | ||
378 | out: | ||
379 | spin_unlock(&mm->page_table_lock); | ||
380 | return n - done; | ||
381 | fault: | ||
382 | spin_unlock(&mm->page_table_lock); | ||
383 | if (__handle_fault(mm, uaddr, write_user)) | ||
384 | return n - done; | ||
385 | goto retry; | ||
386 | } | ||
387 | |||
388 | #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ | ||
389 | asm volatile("0: l %1,0(%6)\n" \ | ||
390 | "1: " insn \ | ||
391 | "2: cs %1,%2,0(%6)\n" \ | ||
392 | "3: jl 1b\n" \ | ||
393 | " lhi %0,0\n" \ | ||
394 | "4:\n" \ | ||
395 | EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ | ||
396 | : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ | ||
397 | "=m" (*uaddr) \ | ||
398 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | ||
399 | "m" (*uaddr) : "cc" ); | ||
400 | |||
401 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | ||
402 | { | ||
403 | int oldval = 0, newval, ret; | ||
404 | |||
405 | spin_lock(¤t->mm->page_table_lock); | ||
406 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
407 | if (!uaddr) { | ||
408 | spin_unlock(¤t->mm->page_table_lock); | ||
409 | return -EFAULT; | ||
410 | } | ||
411 | get_page(virt_to_page(uaddr)); | ||
412 | spin_unlock(¤t->mm->page_table_lock); | ||
413 | switch (op) { | ||
414 | case FUTEX_OP_SET: | ||
415 | __futex_atomic_op("lr %2,%5\n", | ||
416 | ret, oldval, newval, uaddr, oparg); | ||
417 | break; | ||
418 | case FUTEX_OP_ADD: | ||
419 | __futex_atomic_op("lr %2,%1\nar %2,%5\n", | ||
420 | ret, oldval, newval, uaddr, oparg); | ||
421 | break; | ||
422 | case FUTEX_OP_OR: | ||
423 | __futex_atomic_op("lr %2,%1\nor %2,%5\n", | ||
424 | ret, oldval, newval, uaddr, oparg); | ||
425 | break; | ||
426 | case FUTEX_OP_ANDN: | ||
427 | __futex_atomic_op("lr %2,%1\nnr %2,%5\n", | ||
428 | ret, oldval, newval, uaddr, oparg); | ||
429 | break; | ||
430 | case FUTEX_OP_XOR: | ||
431 | __futex_atomic_op("lr %2,%1\nxr %2,%5\n", | ||
432 | ret, oldval, newval, uaddr, oparg); | ||
433 | break; | ||
434 | default: | ||
435 | ret = -ENOSYS; | ||
436 | } | ||
437 | put_page(virt_to_page(uaddr)); | ||
438 | *old = oldval; | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | ||
443 | { | ||
444 | int ret; | ||
445 | |||
446 | spin_lock(¤t->mm->page_table_lock); | ||
447 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
448 | if (!uaddr) { | ||
449 | spin_unlock(¤t->mm->page_table_lock); | ||
450 | return -EFAULT; | ||
451 | } | ||
452 | get_page(virt_to_page(uaddr)); | ||
453 | spin_unlock(¤t->mm->page_table_lock); | ||
454 | asm volatile(" cs %1,%4,0(%5)\n" | ||
455 | "0: lr %0,%1\n" | ||
456 | "1:\n" | ||
457 | EX_TABLE(0b,1b) | ||
458 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | ||
459 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | ||
460 | : "cc", "memory" ); | ||
461 | put_page(virt_to_page(uaddr)); | ||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | struct uaccess_ops uaccess_pt = { | ||
466 | .copy_from_user = copy_from_user_pt, | ||
467 | .copy_from_user_small = copy_from_user_pt, | ||
468 | .copy_to_user = copy_to_user_pt, | ||
469 | .copy_to_user_small = copy_to_user_pt, | ||
470 | .copy_in_user = copy_in_user_pt, | ||
471 | .clear_user = clear_user_pt, | ||
472 | .strnlen_user = strnlen_user_pt, | ||
473 | .strncpy_from_user = strncpy_from_user_pt, | ||
474 | .futex_atomic_op = futex_atomic_op_pt, | ||
475 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt, | ||
476 | }; | ||
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index 56a0214e9928..28c4500a58d0 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <asm/futex.h> | 15 | #include <asm/futex.h> |
16 | #include "uaccess.h" | ||
16 | 17 | ||
17 | #ifndef __s390x__ | 18 | #ifndef __s390x__ |
18 | #define AHI "ahi" | 19 | #define AHI "ahi" |
@@ -28,9 +29,6 @@ | |||
28 | #define SLR "slgr" | 29 | #define SLR "slgr" |
29 | #endif | 30 | #endif |
30 | 31 | ||
31 | extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to); | ||
32 | extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from); | ||
33 | |||
34 | size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) | 32 | size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) |
35 | { | 33 | { |
36 | unsigned long tmp1, tmp2; | 34 | unsigned long tmp1, tmp2; |
@@ -72,7 +70,8 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) | |||
72 | return size; | 70 | return size; |
73 | } | 71 | } |
74 | 72 | ||
75 | size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) | 73 | static size_t copy_from_user_std_check(size_t size, const void __user *ptr, |
74 | void *x) | ||
76 | { | 75 | { |
77 | if (size <= 1024) | 76 | if (size <= 1024) |
78 | return copy_from_user_std(size, ptr, x); | 77 | return copy_from_user_std(size, ptr, x); |
@@ -110,14 +109,16 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) | |||
110 | return size; | 109 | return size; |
111 | } | 110 | } |
112 | 111 | ||
113 | size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) | 112 | static size_t copy_to_user_std_check(size_t size, void __user *ptr, |
113 | const void *x) | ||
114 | { | 114 | { |
115 | if (size <= 1024) | 115 | if (size <= 1024) |
116 | return copy_to_user_std(size, ptr, x); | 116 | return copy_to_user_std(size, ptr, x); |
117 | return copy_to_user_pt(size, ptr, x); | 117 | return copy_to_user_pt(size, ptr, x); |
118 | } | 118 | } |
119 | 119 | ||
120 | size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) | 120 | static size_t copy_in_user_std(size_t size, void __user *to, |
121 | const void __user *from) | ||
121 | { | 122 | { |
122 | unsigned long tmp1; | 123 | unsigned long tmp1; |
123 | 124 | ||
@@ -148,7 +149,7 @@ size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) | |||
148 | return size; | 149 | return size; |
149 | } | 150 | } |
150 | 151 | ||
151 | size_t clear_user_std(size_t size, void __user *to) | 152 | static size_t clear_user_std(size_t size, void __user *to) |
152 | { | 153 | { |
153 | unsigned long tmp1, tmp2; | 154 | unsigned long tmp1, tmp2; |
154 | 155 | ||
@@ -254,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) | |||
254 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
255 | "m" (*uaddr) : "cc"); | 256 | "m" (*uaddr) : "cc"); |
256 | 257 | ||
257 | int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | 258 | int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) |
258 | { | 259 | { |
259 | int oldval = 0, newval, ret; | 260 | int oldval = 0, newval, ret; |
260 | 261 | ||
@@ -286,7 +287,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | |||
286 | return ret; | 287 | return ret; |
287 | } | 288 | } |
288 | 289 | ||
289 | int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) | 290 | int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) |
290 | { | 291 | { |
291 | int ret; | 292 | int ret; |
292 | 293 | ||
@@ -311,6 +312,6 @@ struct uaccess_ops uaccess_std = { | |||
311 | .clear_user = clear_user_std, | 312 | .clear_user = clear_user_std, |
312 | .strnlen_user = strnlen_user_std, | 313 | .strnlen_user = strnlen_user_std, |
313 | .strncpy_from_user = strncpy_from_user_std, | 314 | .strncpy_from_user = strncpy_from_user_std, |
314 | .futex_atomic_op = futex_atomic_op, | 315 | .futex_atomic_op = futex_atomic_op_std, |
315 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg, | 316 | .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std, |
316 | }; | 317 | }; |
diff --git a/arch/s390/math-emu/Makefile b/arch/s390/math-emu/Makefile index c10df144f2ab..73b3e72efc46 100644 --- a/arch/s390/math-emu/Makefile +++ b/arch/s390/math-emu/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the FPU instruction emulation. | 2 | # Makefile for the FPU instruction emulation. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_MATHEMU) := math.o qrnnd.o | 5 | obj-$(CONFIG_MATHEMU) := math.o |
6 | 6 | ||
7 | EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w | 7 | EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w |
8 | EXTRA_AFLAGS := -traditional | 8 | EXTRA_AFLAGS := -traditional |
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c index 6b9aec5a2c18..3ee78ccb617d 100644 --- a/arch/s390/math-emu/math.c +++ b/arch/s390/math-emu/math.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/lowcore.h> | 16 | #include <asm/lowcore.h> |
17 | 17 | ||
18 | #include "sfp-util.h" | 18 | #include <asm/sfp-util.h> |
19 | #include <math-emu/soft-fp.h> | 19 | #include <math-emu/soft-fp.h> |
20 | #include <math-emu/single.h> | 20 | #include <math-emu/single.h> |
21 | #include <math-emu/double.h> | 21 | #include <math-emu/double.h> |
diff --git a/arch/s390/math-emu/qrnnd.S b/arch/s390/math-emu/qrnnd.S deleted file mode 100644 index b01c2b648e22..000000000000 --- a/arch/s390/math-emu/qrnnd.S +++ /dev/null | |||
@@ -1,77 +0,0 @@ | |||
1 | # S/390 __udiv_qrnnd | ||
2 | |||
3 | # r2 : &__r | ||
4 | # r3 : upper half of 64 bit word n | ||
5 | # r4 : lower half of 64 bit word n | ||
6 | # r5 : divisor d | ||
7 | # the reminder r of the division is to be stored to &__r and | ||
8 | # the quotient q is to be returned | ||
9 | |||
10 | .text | ||
11 | .globl __udiv_qrnnd | ||
12 | __udiv_qrnnd: | ||
13 | st %r2,24(%r15) # store pointer to reminder for later | ||
14 | lr %r0,%r3 # reload n | ||
15 | lr %r1,%r4 | ||
16 | ltr %r2,%r5 # reload and test divisor | ||
17 | jp 5f | ||
18 | # divisor >= 0x80000000 | ||
19 | srdl %r0,2 # n/4 | ||
20 | srl %r2,1 # d/2 | ||
21 | slr %r1,%r2 # special case if last bit of d is set | ||
22 | brc 3,0f # (n/4) div (n/2) can overflow by 1 | ||
23 | ahi %r0,-1 # trick: subtract n/2, then divide | ||
24 | 0: dr %r0,%r2 # signed division | ||
25 | ahi %r1,1 # trick part 2: add 1 to the quotient | ||
26 | # now (n >> 2) = (d >> 1) * %r1 + %r0 | ||
27 | lhi %r3,1 | ||
28 | nr %r3,%r1 # test last bit of q | ||
29 | jz 1f | ||
30 | alr %r0,%r2 # add (d>>1) to r | ||
31 | 1: srl %r1,1 # q >>= 1 | ||
32 | # now (n >> 2) = (d&-2) * %r1 + %r0 | ||
33 | lhi %r3,1 | ||
34 | nr %r3,%r5 # test last bit of d | ||
35 | jz 2f | ||
36 | slr %r0,%r1 # r -= q | ||
37 | brc 3,2f # borrow ? | ||
38 | alr %r0,%r5 # r += d | ||
39 | ahi %r1,-1 | ||
40 | 2: # now (n >> 2) = d * %r1 + %r0 | ||
41 | alr %r1,%r1 # q <<= 1 | ||
42 | alr %r0,%r0 # r <<= 1 | ||
43 | brc 12,3f # overflow on r ? | ||
44 | slr %r0,%r5 # r -= d | ||
45 | ahi %r1,1 # q += 1 | ||
46 | 3: lhi %r3,2 | ||
47 | nr %r3,%r4 # test next to last bit of n | ||
48 | jz 4f | ||
49 | ahi %r0,1 # r += 1 | ||
50 | 4: clr %r0,%r5 # r >= d ? | ||
51 | jl 6f | ||
52 | slr %r0,%r5 # r -= d | ||
53 | ahi %r1,1 # q += 1 | ||
54 | # now (n >> 1) = d * %r1 + %r0 | ||
55 | j 6f | ||
56 | 5: # divisor < 0x80000000 | ||
57 | srdl %r0,1 | ||
58 | dr %r0,%r2 # signed division | ||
59 | # now (n >> 1) = d * %r1 + %r0 | ||
60 | 6: alr %r1,%r1 # q <<= 1 | ||
61 | alr %r0,%r0 # r <<= 1 | ||
62 | brc 12,7f # overflow on r ? | ||
63 | slr %r0,%r5 # r -= d | ||
64 | ahi %r1,1 # q += 1 | ||
65 | 7: lhi %r3,1 | ||
66 | nr %r3,%r4 # isolate last bit of n | ||
67 | alr %r0,%r3 # r += (n & 1) | ||
68 | clr %r0,%r5 # r >= d ? | ||
69 | jl 8f | ||
70 | slr %r0,%r5 # r -= d | ||
71 | ahi %r1,1 # q += 1 | ||
72 | 8: # now n = d * %r1 + %r0 | ||
73 | l %r2,24(%r15) | ||
74 | st %r0,0(%r2) | ||
75 | lr %r2,%r1 | ||
76 | br %r14 | ||
77 | .end __udiv_qrnnd | ||
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h deleted file mode 100644 index 5b6ca4570ea4..000000000000 --- a/arch/s390/math-emu/sfp-util.h +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/sched.h> | ||
3 | #include <linux/types.h> | ||
4 | #include <asm/byteorder.h> | ||
5 | |||
6 | #define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ | ||
7 | unsigned int __sh = (ah); \ | ||
8 | unsigned int __sl = (al); \ | ||
9 | asm volatile( \ | ||
10 | " alr %1,%3\n" \ | ||
11 | " brc 12,0f\n" \ | ||
12 | " ahi %0,1\n" \ | ||
13 | "0: alr %0,%2" \ | ||
14 | : "+&d" (__sh), "+d" (__sl) \ | ||
15 | : "d" (bh), "d" (bl) : "cc"); \ | ||
16 | (sh) = __sh; \ | ||
17 | (sl) = __sl; \ | ||
18 | }) | ||
19 | |||
20 | #define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ | ||
21 | unsigned int __sh = (ah); \ | ||
22 | unsigned int __sl = (al); \ | ||
23 | asm volatile( \ | ||
24 | " slr %1,%3\n" \ | ||
25 | " brc 3,0f\n" \ | ||
26 | " ahi %0,-1\n" \ | ||
27 | "0: slr %0,%2" \ | ||
28 | : "+&d" (__sh), "+d" (__sl) \ | ||
29 | : "d" (bh), "d" (bl) : "cc"); \ | ||
30 | (sh) = __sh; \ | ||
31 | (sl) = __sl; \ | ||
32 | }) | ||
33 | |||
34 | /* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */ | ||
35 | #define umul_ppmm(wh, wl, u, v) ({ \ | ||
36 | unsigned int __wh = u; \ | ||
37 | unsigned int __wl = v; \ | ||
38 | asm volatile( \ | ||
39 | " ltr 1,%0\n" \ | ||
40 | " mr 0,%1\n" \ | ||
41 | " jnm 0f\n" \ | ||
42 | " alr 0,%1\n" \ | ||
43 | "0: ltr %1,%1\n" \ | ||
44 | " jnm 1f\n" \ | ||
45 | " alr 0,%0\n" \ | ||
46 | "1: lr %0,0\n" \ | ||
47 | " lr %1,1\n" \ | ||
48 | : "+d" (__wh), "+d" (__wl) \ | ||
49 | : : "0", "1", "cc"); \ | ||
50 | wh = __wh; \ | ||
51 | wl = __wl; \ | ||
52 | }) | ||
53 | |||
54 | #define udiv_qrnnd(q, r, n1, n0, d) \ | ||
55 | do { unsigned long __r; \ | ||
56 | (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ | ||
57 | (r) = __r; \ | ||
58 | } while (0) | ||
59 | extern unsigned long __udiv_qrnnd (unsigned long *, unsigned long, | ||
60 | unsigned long , unsigned long); | ||
61 | |||
62 | #define UDIV_NEEDS_NORMALIZATION 0 | ||
63 | |||
64 | #define abort() return 0 | ||
65 | |||
66 | #define __BYTE_ORDER __BIG_ENDIAN | ||
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index 607f50ead1fd..f93a056869bc 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c | |||
@@ -245,7 +245,7 @@ cmm_set_timeout(long nr, long seconds) | |||
245 | cmm_set_timer(); | 245 | cmm_set_timer(); |
246 | } | 246 | } |
247 | 247 | ||
248 | static inline int | 248 | static int |
249 | cmm_skip_blanks(char *cp, char **endp) | 249 | cmm_skip_blanks(char *cp, char **endp) |
250 | { | 250 | { |
251 | char *str; | 251 | char *str; |
@@ -414,7 +414,7 @@ cmm_smsg_target(char *from, char *msg) | |||
414 | } | 414 | } |
415 | #endif | 415 | #endif |
416 | 416 | ||
417 | struct ctl_table_header *cmm_sysctl_header; | 417 | static struct ctl_table_header *cmm_sysctl_header; |
418 | 418 | ||
419 | static int | 419 | static int |
420 | cmm_init (void) | 420 | cmm_init (void) |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 775bf19e742b..394980b05e6f 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
17 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
18 | #include <linux/ioport.h> | ||
18 | #include <asm/page.h> | 19 | #include <asm/page.h> |
19 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
20 | #include <asm/ebcdic.h> | 21 | #include <asm/ebcdic.h> |
@@ -70,6 +71,7 @@ struct qin64 { | |||
70 | struct dcss_segment { | 71 | struct dcss_segment { |
71 | struct list_head list; | 72 | struct list_head list; |
72 | char dcss_name[8]; | 73 | char dcss_name[8]; |
74 | char res_name[15]; | ||
73 | unsigned long start_addr; | 75 | unsigned long start_addr; |
74 | unsigned long end; | 76 | unsigned long end; |
75 | atomic_t ref_count; | 77 | atomic_t ref_count; |
@@ -77,6 +79,7 @@ struct dcss_segment { | |||
77 | unsigned int vm_segtype; | 79 | unsigned int vm_segtype; |
78 | struct qrange range[6]; | 80 | struct qrange range[6]; |
79 | int segcnt; | 81 | int segcnt; |
82 | struct resource *res; | ||
80 | }; | 83 | }; |
81 | 84 | ||
82 | static DEFINE_MUTEX(dcss_lock); | 85 | static DEFINE_MUTEX(dcss_lock); |
@@ -88,7 +91,7 @@ static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", | |||
88 | * Create the 8 bytes, ebcdic VM segment name from | 91 | * Create the 8 bytes, ebcdic VM segment name from |
89 | * an ascii name. | 92 | * an ascii name. |
90 | */ | 93 | */ |
91 | static void inline | 94 | static void |
92 | dcss_mkname(char *name, char *dcss_name) | 95 | dcss_mkname(char *name, char *dcss_name) |
93 | { | 96 | { |
94 | int i; | 97 | int i; |
@@ -303,6 +306,29 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
303 | goto out_free; | 306 | goto out_free; |
304 | } | 307 | } |
305 | 308 | ||
309 | seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); | ||
310 | if (seg->res == NULL) { | ||
311 | rc = -ENOMEM; | ||
312 | goto out_shared; | ||
313 | } | ||
314 | seg->res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
315 | seg->res->start = seg->start_addr; | ||
316 | seg->res->end = seg->end; | ||
317 | memcpy(&seg->res_name, seg->dcss_name, 8); | ||
318 | EBCASC(seg->res_name, 8); | ||
319 | seg->res_name[8] = '\0'; | ||
320 | strncat(seg->res_name, " (DCSS)", 7); | ||
321 | seg->res->name = seg->res_name; | ||
322 | rc = seg->vm_segtype; | ||
323 | if (rc == SEG_TYPE_SC || | ||
324 | ((rc == SEG_TYPE_SR || rc == SEG_TYPE_ER) && !do_nonshared)) | ||
325 | seg->res->flags |= IORESOURCE_READONLY; | ||
326 | if (request_resource(&iomem_resource, seg->res)) { | ||
327 | rc = -EBUSY; | ||
328 | kfree(seg->res); | ||
329 | goto out_shared; | ||
330 | } | ||
331 | |||
306 | if (do_nonshared) | 332 | if (do_nonshared) |
307 | dcss_command = DCSS_LOADNSR; | 333 | dcss_command = DCSS_LOADNSR; |
308 | else | 334 | else |
@@ -316,12 +342,11 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
316 | rc = dcss_diag_translate_rc (seg->end); | 342 | rc = dcss_diag_translate_rc (seg->end); |
317 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 343 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, |
318 | &seg->start_addr, &seg->end); | 344 | &seg->start_addr, &seg->end); |
319 | goto out_shared; | 345 | goto out_resource; |
320 | } | 346 | } |
321 | seg->do_nonshared = do_nonshared; | 347 | seg->do_nonshared = do_nonshared; |
322 | atomic_set(&seg->ref_count, 1); | 348 | atomic_set(&seg->ref_count, 1); |
323 | list_add(&seg->list, &dcss_list); | 349 | list_add(&seg->list, &dcss_list); |
324 | rc = seg->vm_segtype; | ||
325 | *addr = seg->start_addr; | 350 | *addr = seg->start_addr; |
326 | *end = seg->end; | 351 | *end = seg->end; |
327 | if (do_nonshared) | 352 | if (do_nonshared) |
@@ -329,12 +354,16 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
329 | "type %s in non-shared mode\n", name, | 354 | "type %s in non-shared mode\n", name, |
330 | (void*)seg->start_addr, (void*)seg->end, | 355 | (void*)seg->start_addr, (void*)seg->end, |
331 | segtype_string[seg->vm_segtype]); | 356 | segtype_string[seg->vm_segtype]); |
332 | else | 357 | else { |
333 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | 358 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " |
334 | "type %s in shared mode\n", name, | 359 | "type %s in shared mode\n", name, |
335 | (void*)seg->start_addr, (void*)seg->end, | 360 | (void*)seg->start_addr, (void*)seg->end, |
336 | segtype_string[seg->vm_segtype]); | 361 | segtype_string[seg->vm_segtype]); |
362 | } | ||
337 | goto out; | 363 | goto out; |
364 | out_resource: | ||
365 | release_resource(seg->res); | ||
366 | kfree(seg->res); | ||
338 | out_shared: | 367 | out_shared: |
339 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 368 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
340 | out_free: | 369 | out_free: |
@@ -401,6 +430,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr, | |||
401 | * -ENOENT : no such segment (segment gone!) | 430 | * -ENOENT : no such segment (segment gone!) |
402 | * -EAGAIN : segment is in use by other exploiters, try later | 431 | * -EAGAIN : segment is in use by other exploiters, try later |
403 | * -EINVAL : no segment with the given name is currently loaded - name invalid | 432 | * -EINVAL : no segment with the given name is currently loaded - name invalid |
433 | * -EBUSY : segment can temporarily not be used (overlaps with dcss) | ||
404 | * 0 : operation succeeded | 434 | * 0 : operation succeeded |
405 | */ | 435 | */ |
406 | int | 436 | int |
@@ -428,12 +458,24 @@ segment_modify_shared (char *name, int do_nonshared) | |||
428 | rc = -EAGAIN; | 458 | rc = -EAGAIN; |
429 | goto out_unlock; | 459 | goto out_unlock; |
430 | } | 460 | } |
431 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 461 | release_resource(seg->res); |
432 | &dummy, &dummy); | 462 | if (do_nonshared) { |
433 | if (do_nonshared) | ||
434 | dcss_command = DCSS_LOADNSR; | 463 | dcss_command = DCSS_LOADNSR; |
435 | else | 464 | seg->res->flags &= ~IORESOURCE_READONLY; |
436 | dcss_command = DCSS_LOADNOLY; | 465 | } else { |
466 | dcss_command = DCSS_LOADNOLY; | ||
467 | if (seg->vm_segtype == SEG_TYPE_SR || | ||
468 | seg->vm_segtype == SEG_TYPE_ER) | ||
469 | seg->res->flags |= IORESOURCE_READONLY; | ||
470 | } | ||
471 | if (request_resource(&iomem_resource, seg->res)) { | ||
472 | PRINT_WARN("segment_modify_shared: could not reload segment %s" | ||
473 | " - overlapping resources\n", name); | ||
474 | rc = -EBUSY; | ||
475 | kfree(seg->res); | ||
476 | goto out_del; | ||
477 | } | ||
478 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | ||
437 | diag_cc = dcss_diag(dcss_command, seg->dcss_name, | 479 | diag_cc = dcss_diag(dcss_command, seg->dcss_name, |
438 | &seg->start_addr, &seg->end); | 480 | &seg->start_addr, &seg->end); |
439 | if (diag_cc > 1) { | 481 | if (diag_cc > 1) { |
@@ -446,9 +488,9 @@ segment_modify_shared (char *name, int do_nonshared) | |||
446 | rc = 0; | 488 | rc = 0; |
447 | goto out_unlock; | 489 | goto out_unlock; |
448 | out_del: | 490 | out_del: |
491 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | ||
449 | list_del(&seg->list); | 492 | list_del(&seg->list); |
450 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 493 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
451 | &dummy, &dummy); | ||
452 | kfree(seg); | 494 | kfree(seg); |
453 | out_unlock: | 495 | out_unlock: |
454 | mutex_unlock(&dcss_lock); | 496 | mutex_unlock(&dcss_lock); |
@@ -478,6 +520,8 @@ segment_unload(char *name) | |||
478 | } | 520 | } |
479 | if (atomic_dec_return(&seg->ref_count) != 0) | 521 | if (atomic_dec_return(&seg->ref_count) != 0) |
480 | goto out_unlock; | 522 | goto out_unlock; |
523 | release_resource(seg->res); | ||
524 | kfree(seg->res); | ||
481 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 525 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
482 | list_del(&seg->list); | 526 | list_del(&seg->list); |
483 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | 527 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index cd85e34d8703..9ff143e87746 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -52,7 +52,7 @@ extern int sysctl_userprocess_debug; | |||
52 | extern void die(const char *,struct pt_regs *,long); | 52 | extern void die(const char *,struct pt_regs *,long); |
53 | 53 | ||
54 | #ifdef CONFIG_KPROBES | 54 | #ifdef CONFIG_KPROBES |
55 | ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); | 55 | static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain); |
56 | int register_page_fault_notifier(struct notifier_block *nb) | 56 | int register_page_fault_notifier(struct notifier_block *nb) |
57 | { | 57 | { |
58 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); | 58 | return atomic_notifier_chain_register(¬ify_page_fault_chain, nb); |
@@ -137,7 +137,9 @@ static int __check_access_register(struct pt_regs *regs, int error_code) | |||
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Check which address space the address belongs to. | 139 | * Check which address space the address belongs to. |
140 | * Returns 1 for user space and 0 for kernel space. | 140 | * May return 1 or 2 for user space and 0 for kernel space. |
141 | * Returns 2 for user space in primary addressing mode with | ||
142 | * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on. | ||
141 | */ | 143 | */ |
142 | static inline int check_user_space(struct pt_regs *regs, int error_code) | 144 | static inline int check_user_space(struct pt_regs *regs, int error_code) |
143 | { | 145 | { |
@@ -154,7 +156,7 @@ static inline int check_user_space(struct pt_regs *regs, int error_code) | |||
154 | return __check_access_register(regs, error_code); | 156 | return __check_access_register(regs, error_code); |
155 | if (descriptor == 2) | 157 | if (descriptor == 2) |
156 | return current->thread.mm_segment.ar4; | 158 | return current->thread.mm_segment.ar4; |
157 | return descriptor != 0; | 159 | return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; |
158 | } | 160 | } |
159 | 161 | ||
160 | /* | 162 | /* |
@@ -183,6 +185,77 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, | |||
183 | force_sig_info(SIGSEGV, &si, current); | 185 | force_sig_info(SIGSEGV, &si, current); |
184 | } | 186 | } |
185 | 187 | ||
188 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
189 | extern long sys_sigreturn(struct pt_regs *regs); | ||
190 | extern long sys_rt_sigreturn(struct pt_regs *regs); | ||
191 | extern long sys32_sigreturn(struct pt_regs *regs); | ||
192 | extern long sys32_rt_sigreturn(struct pt_regs *regs); | ||
193 | |||
194 | static inline void do_sigreturn(struct mm_struct *mm, struct pt_regs *regs, | ||
195 | int rt) | ||
196 | { | ||
197 | up_read(&mm->mmap_sem); | ||
198 | clear_tsk_thread_flag(current, TIF_SINGLE_STEP); | ||
199 | #ifdef CONFIG_COMPAT | ||
200 | if (test_tsk_thread_flag(current, TIF_31BIT)) { | ||
201 | if (rt) | ||
202 | sys32_rt_sigreturn(regs); | ||
203 | else | ||
204 | sys32_sigreturn(regs); | ||
205 | return; | ||
206 | } | ||
207 | #endif /* CONFIG_COMPAT */ | ||
208 | if (rt) | ||
209 | sys_rt_sigreturn(regs); | ||
210 | else | ||
211 | sys_sigreturn(regs); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | static int signal_return(struct mm_struct *mm, struct pt_regs *regs, | ||
216 | unsigned long address, unsigned long error_code) | ||
217 | { | ||
218 | pgd_t *pgd; | ||
219 | pmd_t *pmd; | ||
220 | pte_t *pte; | ||
221 | u16 *instruction; | ||
222 | unsigned long pfn, uaddr = regs->psw.addr; | ||
223 | |||
224 | spin_lock(&mm->page_table_lock); | ||
225 | pgd = pgd_offset(mm, uaddr); | ||
226 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
227 | goto out_fault; | ||
228 | pmd = pmd_offset(pgd, uaddr); | ||
229 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
230 | goto out_fault; | ||
231 | pte = pte_offset_map(pmd_offset(pgd_offset(mm, uaddr), uaddr), uaddr); | ||
232 | if (!pte || !pte_present(*pte)) | ||
233 | goto out_fault; | ||
234 | pfn = pte_pfn(*pte); | ||
235 | if (!pfn_valid(pfn)) | ||
236 | goto out_fault; | ||
237 | spin_unlock(&mm->page_table_lock); | ||
238 | |||
239 | instruction = (u16 *) ((pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE-1))); | ||
240 | if (*instruction == 0x0a77) | ||
241 | do_sigreturn(mm, regs, 0); | ||
242 | else if (*instruction == 0x0aad) | ||
243 | do_sigreturn(mm, regs, 1); | ||
244 | else { | ||
245 | printk("- XXX - do_exception: task = %s, primary, NO EXEC " | ||
246 | "-> SIGSEGV\n", current->comm); | ||
247 | up_read(&mm->mmap_sem); | ||
248 | current->thread.prot_addr = address; | ||
249 | current->thread.trap_no = error_code; | ||
250 | do_sigsegv(regs, error_code, SEGV_MAPERR, address); | ||
251 | } | ||
252 | return 0; | ||
253 | out_fault: | ||
254 | spin_unlock(&mm->page_table_lock); | ||
255 | return -EFAULT; | ||
256 | } | ||
257 | #endif /* CONFIG_S390_EXEC_PROTECT */ | ||
258 | |||
186 | /* | 259 | /* |
187 | * This routine handles page faults. It determines the address, | 260 | * This routine handles page faults. It determines the address, |
188 | * and the problem, and then passes it off to one of the appropriate | 261 | * and the problem, and then passes it off to one of the appropriate |
@@ -260,6 +333,17 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) | |||
260 | vma = find_vma(mm, address); | 333 | vma = find_vma(mm, address); |
261 | if (!vma) | 334 | if (!vma) |
262 | goto bad_area; | 335 | goto bad_area; |
336 | |||
337 | #ifdef CONFIG_S390_EXEC_PROTECT | ||
338 | if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) | ||
339 | if (!signal_return(mm, regs, address, error_code)) | ||
340 | /* | ||
341 | * signal_return() has done an up_read(&mm->mmap_sem) | ||
342 | * if it returns 0. | ||
343 | */ | ||
344 | return; | ||
345 | #endif | ||
346 | |||
263 | if (vma->vm_start <= address) | 347 | if (vma->vm_start <= address) |
264 | goto good_area; | 348 | goto good_area; |
265 | if (!(vma->vm_flags & VM_GROWSDOWN)) | 349 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
@@ -452,8 +536,7 @@ void pfault_fini(void) | |||
452 | : : "a" (&refbk), "m" (refbk) : "cc"); | 536 | : : "a" (&refbk), "m" (refbk) : "cc"); |
453 | } | 537 | } |
454 | 538 | ||
455 | asmlinkage void | 539 | static void pfault_interrupt(__u16 error_code) |
456 | pfault_interrupt(__u16 error_code) | ||
457 | { | 540 | { |
458 | struct task_struct *tsk; | 541 | struct task_struct *tsk; |
459 | __u16 subcode; | 542 | __u16 subcode; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 4bb21be3b007..b3e7c45efb63 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
27 | #include <linux/poison.h> | 27 | #include <linux/poison.h> |
28 | 28 | #include <linux/initrd.h> | |
29 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
30 | #include <asm/system.h> | 30 | #include <asm/system.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void) | |||
95 | pte_t new_pte; | 95 | pte_t new_pte; |
96 | unsigned long address, end; | 96 | unsigned long address, end; |
97 | 97 | ||
98 | address = ((unsigned long)&__start_rodata) & PAGE_MASK; | 98 | address = ((unsigned long)&_stext) & PAGE_MASK; |
99 | end = PFN_ALIGN((unsigned long)&__end_rodata); | 99 | end = PFN_ALIGN((unsigned long)&_eshared); |
100 | 100 | ||
101 | for (; address < end; address += PAGE_SIZE) { | 101 | for (; address < end; address += PAGE_SIZE) { |
102 | pgd = pgd_offset_k(address); | 102 | pgd = pgd_offset_k(address); |
103 | pmd = pmd_offset(pgd, address); | 103 | pmd = pmd_offset(pgd, address); |
104 | pte = pte_offset_kernel(pmd, address); | 104 | pte = pte_offset_kernel(pmd, address); |
105 | new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); | 105 | new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); |
106 | set_pte(pte, new_pte); | 106 | *pte = new_pte; |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
110 | extern void vmem_map_init(void); | ||
111 | |||
112 | /* | 110 | /* |
113 | * paging_init() sets up the page tables | 111 | * paging_init() sets up the page tables |
114 | */ | 112 | */ |
@@ -125,11 +123,11 @@ void __init paging_init(void) | |||
125 | #ifdef CONFIG_64BIT | 123 | #ifdef CONFIG_64BIT |
126 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; | 124 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; |
127 | for (i = 0; i < PTRS_PER_PGD; i++) | 125 | for (i = 0; i < PTRS_PER_PGD; i++) |
128 | pgd_clear(pg_dir + i); | 126 | pgd_clear_kernel(pg_dir + i); |
129 | #else | 127 | #else |
130 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | 128 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; |
131 | for (i = 0; i < PTRS_PER_PGD; i++) | 129 | for (i = 0; i < PTRS_PER_PGD; i++) |
132 | pmd_clear((pmd_t *)(pg_dir + i)); | 130 | pmd_clear_kernel((pmd_t *)(pg_dir + i)); |
133 | #endif | 131 | #endif |
134 | vmem_map_init(); | 132 | vmem_map_init(); |
135 | setup_ro_region(); | 133 | setup_ro_region(); |
@@ -174,10 +172,8 @@ void __init mem_init(void) | |||
174 | datasize >>10, | 172 | datasize >>10, |
175 | initsize >> 10); | 173 | initsize >> 10); |
176 | printk("Write protected kernel read-only data: %#lx - %#lx\n", | 174 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
177 | (unsigned long)&__start_rodata, | 175 | (unsigned long)&_stext, |
178 | PFN_ALIGN((unsigned long)&__end_rodata) - 1); | 176 | PFN_ALIGN((unsigned long)&_eshared) - 1); |
179 | printk("Virtual memmap size: %ldk\n", | ||
180 | (max_pfn * sizeof(struct page)) >> 10); | ||
181 | } | 177 | } |
182 | 178 | ||
183 | void free_initmem(void) | 179 | void free_initmem(void) |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index cd3d93e8c211..92a565190028 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -82,7 +82,7 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
82 | if (!pmd) | 82 | if (!pmd) |
83 | return NULL; | 83 | return NULL; |
84 | for (i = 0; i < PTRS_PER_PMD; i++) | 84 | for (i = 0; i < PTRS_PER_PMD; i++) |
85 | pmd_clear(pmd + i); | 85 | pmd_clear_kernel(pmd + i); |
86 | return pmd; | 86 | return pmd; |
87 | } | 87 | } |
88 | 88 | ||
@@ -97,7 +97,7 @@ static inline pte_t *vmem_pte_alloc(void) | |||
97 | return NULL; | 97 | return NULL; |
98 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; | 98 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; |
99 | for (i = 0; i < PTRS_PER_PTE; i++) | 99 | for (i = 0; i < PTRS_PER_PTE; i++) |
100 | set_pte(pte + i, empty_pte); | 100 | pte[i] = empty_pte; |
101 | return pte; | 101 | return pte; |
102 | } | 102 | } |
103 | 103 | ||
@@ -119,7 +119,7 @@ static int vmem_add_range(unsigned long start, unsigned long size) | |||
119 | pm_dir = vmem_pmd_alloc(); | 119 | pm_dir = vmem_pmd_alloc(); |
120 | if (!pm_dir) | 120 | if (!pm_dir) |
121 | goto out; | 121 | goto out; |
122 | pgd_populate(&init_mm, pg_dir, pm_dir); | 122 | pgd_populate_kernel(&init_mm, pg_dir, pm_dir); |
123 | } | 123 | } |
124 | 124 | ||
125 | pm_dir = pmd_offset(pg_dir, address); | 125 | pm_dir = pmd_offset(pg_dir, address); |
@@ -132,7 +132,7 @@ static int vmem_add_range(unsigned long start, unsigned long size) | |||
132 | 132 | ||
133 | pt_dir = pte_offset_kernel(pm_dir, address); | 133 | pt_dir = pte_offset_kernel(pm_dir, address); |
134 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); | 134 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); |
135 | set_pte(pt_dir, pte); | 135 | *pt_dir = pte; |
136 | } | 136 | } |
137 | ret = 0; | 137 | ret = 0; |
138 | out: | 138 | out: |
@@ -161,7 +161,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
161 | if (pmd_none(*pm_dir)) | 161 | if (pmd_none(*pm_dir)) |
162 | continue; | 162 | continue; |
163 | pt_dir = pte_offset_kernel(pm_dir, address); | 163 | pt_dir = pte_offset_kernel(pm_dir, address); |
164 | set_pte(pt_dir, pte); | 164 | *pt_dir = pte; |
165 | } | 165 | } |
166 | flush_tlb_kernel_range(start, start + size); | 166 | flush_tlb_kernel_range(start, start + size); |
167 | } | 167 | } |
@@ -191,7 +191,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
191 | pm_dir = vmem_pmd_alloc(); | 191 | pm_dir = vmem_pmd_alloc(); |
192 | if (!pm_dir) | 192 | if (!pm_dir) |
193 | goto out; | 193 | goto out; |
194 | pgd_populate(&init_mm, pg_dir, pm_dir); | 194 | pgd_populate_kernel(&init_mm, pg_dir, pm_dir); |
195 | } | 195 | } |
196 | 196 | ||
197 | pm_dir = pmd_offset(pg_dir, address); | 197 | pm_dir = pmd_offset(pg_dir, address); |
@@ -210,7 +210,7 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
210 | if (!new_page) | 210 | if (!new_page) |
211 | goto out; | 211 | goto out; |
212 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | 212 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); |
213 | set_pte(pt_dir, pte); | 213 | *pt_dir = pte; |
214 | } | 214 | } |
215 | } | 215 | } |
216 | ret = 0; | 216 | ret = 0; |