diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 16:32:35 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-29 16:32:35 -0500 |
commit | 33edcf133ba93ecba2e4b6472e97b689895d805c (patch) | |
tree | 327d7a20acef64005e7c5ccbfa1265be28aeb6ac /arch/s390 | |
parent | be4d638c1597580ed2294d899d9f1a2cd10e462c (diff) | |
parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/s390')
62 files changed, 1943 insertions, 830 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b4aa5869c7f9..19577aeffd7b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -43,6 +43,9 @@ config GENERIC_HWEIGHT | |||
43 | config GENERIC_TIME | 43 | config GENERIC_TIME |
44 | def_bool y | 44 | def_bool y |
45 | 45 | ||
46 | config GENERIC_TIME_VSYSCALL | ||
47 | def_bool y | ||
48 | |||
46 | config GENERIC_CLOCKEVENTS | 49 | config GENERIC_CLOCKEVENTS |
47 | def_bool y | 50 | def_bool y |
48 | 51 | ||
@@ -66,10 +69,15 @@ config PGSTE | |||
66 | bool | 69 | bool |
67 | default y if KVM | 70 | default y if KVM |
68 | 71 | ||
72 | config VIRT_CPU_ACCOUNTING | ||
73 | def_bool y | ||
74 | |||
69 | mainmenu "Linux Kernel Configuration" | 75 | mainmenu "Linux Kernel Configuration" |
70 | 76 | ||
71 | config S390 | 77 | config S390 |
72 | def_bool y | 78 | def_bool y |
79 | select USE_GENERIC_SMP_HELPERS if SMP | ||
80 | select HAVE_FUNCTION_TRACER | ||
73 | select HAVE_OPROFILE | 81 | select HAVE_OPROFILE |
74 | select HAVE_KPROBES | 82 | select HAVE_KPROBES |
75 | select HAVE_KRETPROBES | 83 | select HAVE_KRETPROBES |
@@ -226,6 +234,14 @@ config MARCH_Z9_109 | |||
226 | Class (z9 BC). The kernel will be slightly faster but will not | 234 | Class (z9 BC). The kernel will be slightly faster but will not |
227 | work on older machines such as the z990, z890, z900, and z800. | 235 | work on older machines such as the z990, z890, z900, and z800. |
228 | 236 | ||
237 | config MARCH_Z10 | ||
238 | bool "IBM System z10" | ||
239 | help | ||
240 | Select this to enable optimizations for IBM System z10. The | ||
241 | kernel will be slightly faster but will not work on older | ||
242 | machines such as the z990, z890, z900, z800, z9-109, z9-ec | ||
243 | and z9-bc. | ||
244 | |||
229 | endchoice | 245 | endchoice |
230 | 246 | ||
231 | config PACK_STACK | 247 | config PACK_STACK |
@@ -344,16 +360,6 @@ config QDIO | |||
344 | 360 | ||
345 | If unsure, say Y. | 361 | If unsure, say Y. |
346 | 362 | ||
347 | config QDIO_DEBUG | ||
348 | bool "Extended debugging information" | ||
349 | depends on QDIO | ||
350 | help | ||
351 | Say Y here to get extended debugging output in | ||
352 | /sys/kernel/debug/s390dbf/qdio... | ||
353 | Warning: this option reduces the performance of the QDIO module. | ||
354 | |||
355 | If unsure, say N. | ||
356 | |||
357 | config CHSC_SCH | 363 | config CHSC_SCH |
358 | tristate "Support for CHSC subchannels" | 364 | tristate "Support for CHSC subchannels" |
359 | help | 365 | help |
@@ -467,22 +473,9 @@ config PAGE_STATES | |||
467 | hypervisor. The ESSA instruction is used to do the states | 473 | hypervisor. The ESSA instruction is used to do the states |
468 | changes between a page that has content and the unused state. | 474 | changes between a page that has content and the unused state. |
469 | 475 | ||
470 | config VIRT_TIMER | ||
471 | bool "Virtual CPU timer support" | ||
472 | help | ||
473 | This provides a kernel interface for virtual CPU timers. | ||
474 | Default is disabled. | ||
475 | |||
476 | config VIRT_CPU_ACCOUNTING | ||
477 | bool "Base user process accounting on virtual cpu timer" | ||
478 | depends on VIRT_TIMER | ||
479 | help | ||
480 | Select this option to use CPU timer deltas to do user | ||
481 | process accounting. | ||
482 | |||
483 | config APPLDATA_BASE | 476 | config APPLDATA_BASE |
484 | bool "Linux - VM Monitor Stream, base infrastructure" | 477 | bool "Linux - VM Monitor Stream, base infrastructure" |
485 | depends on PROC_FS && VIRT_TIMER=y | 478 | depends on PROC_FS |
486 | help | 479 | help |
487 | This provides a kernel interface for creating and updating z/VM APPLDATA | 480 | This provides a kernel interface for creating and updating z/VM APPLDATA |
488 | monitor records. The monitor records are updated at certain time | 481 | monitor records. The monitor records are updated at certain time |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 792a4e7743ce..578c61f15a4b 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -34,6 +34,7 @@ cflags-$(CONFIG_MARCH_G5) += $(call cc-option,-march=g5) | |||
34 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) | 34 | cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) |
35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) | 35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) |
36 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) | 36 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) |
37 | cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) | ||
37 | 38 | ||
38 | #KBUILD_IMAGE is necessary for make rpm | 39 | #KBUILD_IMAGE is necessary for make rpm |
39 | KBUILD_IMAGE :=arch/s390/boot/image | 40 | KBUILD_IMAGE :=arch/s390/boot/image |
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h index 17a2636fec0a..f0b23fc759ba 100644 --- a/arch/s390/appldata/appldata.h +++ b/arch/s390/appldata/appldata.h | |||
@@ -26,10 +26,6 @@ | |||
26 | #define CTL_APPLDATA_NET_SUM 2125 | 26 | #define CTL_APPLDATA_NET_SUM 2125 |
27 | #define CTL_APPLDATA_PROC 2126 | 27 | #define CTL_APPLDATA_PROC 2126 |
28 | 28 | ||
29 | #define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x) | ||
30 | #define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) | ||
31 | #define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) | ||
32 | |||
33 | struct appldata_ops { | 29 | struct appldata_ops { |
34 | struct list_head list; | 30 | struct list_head list; |
35 | struct ctl_table_header *sysctl_header; | 31 | struct ctl_table_header *sysctl_header; |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index a06a47cdd5e0..27b70d8a359c 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -10,6 +10,9 @@ | |||
10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define KMSG_COMPONENT "appldata" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/module.h> | 16 | #include <linux/module.h> |
14 | #include <linux/init.h> | 17 | #include <linux/init.h> |
15 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
@@ -32,7 +35,6 @@ | |||
32 | #include "appldata.h" | 35 | #include "appldata.h" |
33 | 36 | ||
34 | 37 | ||
35 | #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */ | ||
36 | #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for | 38 | #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for |
37 | sampling interval in | 39 | sampling interval in |
38 | milliseconds */ | 40 | milliseconds */ |
@@ -390,8 +392,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
390 | (unsigned long) ops->data, ops->size, | 392 | (unsigned long) ops->data, ops->size, |
391 | ops->mod_lvl); | 393 | ops->mod_lvl); |
392 | if (rc != 0) { | 394 | if (rc != 0) { |
393 | P_ERROR("START DIAG 0xDC for %s failed, " | 395 | pr_err("Starting the data collection for %s " |
394 | "return code: %d\n", ops->name, rc); | 396 | "failed with rc=%d\n", ops->name, rc); |
395 | module_put(ops->owner); | 397 | module_put(ops->owner); |
396 | } else | 398 | } else |
397 | ops->active = 1; | 399 | ops->active = 1; |
@@ -401,8 +403,8 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp, | |||
401 | (unsigned long) ops->data, ops->size, | 403 | (unsigned long) ops->data, ops->size, |
402 | ops->mod_lvl); | 404 | ops->mod_lvl); |
403 | if (rc != 0) | 405 | if (rc != 0) |
404 | P_ERROR("STOP DIAG 0xDC for %s failed, " | 406 | pr_err("Stopping the data collection for %s " |
405 | "return code: %d\n", ops->name, rc); | 407 | "failed with rc=%d\n", ops->name, rc); |
406 | module_put(ops->owner); | 408 | module_put(ops->owner); |
407 | } | 409 | } |
408 | spin_unlock(&appldata_ops_lock); | 410 | spin_unlock(&appldata_ops_lock); |
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c index 3b746556e1a3..fa741f84c5b9 100644 --- a/arch/s390/appldata/appldata_net_sum.c +++ b/arch/s390/appldata/appldata_net_sum.c | |||
@@ -67,7 +67,6 @@ static void appldata_get_net_sum_data(void *data) | |||
67 | int i; | 67 | int i; |
68 | struct appldata_net_sum_data *net_data; | 68 | struct appldata_net_sum_data *net_data; |
69 | struct net_device *dev; | 69 | struct net_device *dev; |
70 | struct net_device_stats *stats; | ||
71 | unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors, | 70 | unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors, |
72 | tx_errors, rx_dropped, tx_dropped, collisions; | 71 | tx_errors, rx_dropped, tx_dropped, collisions; |
73 | 72 | ||
@@ -86,7 +85,8 @@ static void appldata_get_net_sum_data(void *data) | |||
86 | collisions = 0; | 85 | collisions = 0; |
87 | read_lock(&dev_base_lock); | 86 | read_lock(&dev_base_lock); |
88 | for_each_netdev(&init_net, dev) { | 87 | for_each_netdev(&init_net, dev) { |
89 | stats = dev->get_stats(dev); | 88 | const struct net_device_stats *stats = dev_get_stats(dev); |
89 | |||
90 | rx_packets += stats->rx_packets; | 90 | rx_packets += stats->rx_packets; |
91 | tx_packets += stats->tx_packets; | 91 | tx_packets += stats->tx_packets; |
92 | rx_bytes += stats->rx_bytes; | 92 | rx_bytes += stats->rx_bytes; |
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c index eb44f9f8ab91..55c80ffd42b9 100644 --- a/arch/s390/appldata/appldata_os.c +++ b/arch/s390/appldata/appldata_os.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 9 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "appldata" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -22,7 +25,6 @@ | |||
22 | #include "appldata.h" | 25 | #include "appldata.h" |
23 | 26 | ||
24 | 27 | ||
25 | #define MY_PRINT_NAME "appldata_os" /* for debug messages, etc. */ | ||
26 | #define LOAD_INT(x) ((x) >> FSHIFT) | 28 | #define LOAD_INT(x) ((x) >> FSHIFT) |
27 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) | 29 | #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) |
28 | 30 | ||
@@ -143,21 +145,16 @@ static void appldata_get_os_data(void *data) | |||
143 | (unsigned long) ops.data, new_size, | 145 | (unsigned long) ops.data, new_size, |
144 | ops.mod_lvl); | 146 | ops.mod_lvl); |
145 | if (rc != 0) | 147 | if (rc != 0) |
146 | P_ERROR("os: START NEW DIAG 0xDC failed, " | 148 | pr_err("Starting a new OS data collection " |
147 | "return code: %d, new size = %i\n", rc, | 149 | "failed with rc=%d\n", rc); |
148 | new_size); | ||
149 | 150 | ||
150 | rc = appldata_diag(APPLDATA_RECORD_OS_ID, | 151 | rc = appldata_diag(APPLDATA_RECORD_OS_ID, |
151 | APPLDATA_STOP_REC, | 152 | APPLDATA_STOP_REC, |
152 | (unsigned long) ops.data, ops.size, | 153 | (unsigned long) ops.data, ops.size, |
153 | ops.mod_lvl); | 154 | ops.mod_lvl); |
154 | if (rc != 0) | 155 | if (rc != 0) |
155 | P_ERROR("os: STOP OLD DIAG 0xDC failed, " | 156 | pr_err("Stopping a faulty OS data " |
156 | "return code: %d, old size = %i\n", rc, | 157 | "collection failed with rc=%d\n", rc); |
157 | ops.size); | ||
158 | else | ||
159 | P_INFO("os: old record size = %i stopped\n", | ||
160 | ops.size); | ||
161 | } | 158 | } |
162 | ops.size = new_size; | 159 | ops.size = new_size; |
163 | } | 160 | } |
@@ -178,8 +175,8 @@ static int __init appldata_os_init(void) | |||
178 | max_size = sizeof(struct appldata_os_data) + | 175 | max_size = sizeof(struct appldata_os_data) + |
179 | (NR_CPUS * sizeof(struct appldata_os_per_cpu)); | 176 | (NR_CPUS * sizeof(struct appldata_os_per_cpu)); |
180 | if (max_size > APPLDATA_MAX_REC_SIZE) { | 177 | if (max_size > APPLDATA_MAX_REC_SIZE) { |
181 | P_ERROR("Max. size of OS record = %i, bigger than maximum " | 178 | pr_err("Maximum OS record size %i exceeds the maximum " |
182 | "record size (%i)\n", max_size, APPLDATA_MAX_REC_SIZE); | 179 | "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE); |
183 | rc = -ENOMEM; | 180 | rc = -ENOMEM; |
184 | goto out; | 181 | goto out; |
185 | } | 182 | } |
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index e33f32b54c08..c42cd898f68b 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -17,6 +17,9 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #define KMSG_COMPONENT "aes_s390" | ||
21 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
22 | |||
20 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
21 | #include <crypto/algapi.h> | 24 | #include <crypto/algapi.h> |
22 | #include <linux/err.h> | 25 | #include <linux/err.h> |
@@ -169,7 +172,8 @@ static int fallback_init_cip(struct crypto_tfm *tfm) | |||
169 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 172 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
170 | 173 | ||
171 | if (IS_ERR(sctx->fallback.cip)) { | 174 | if (IS_ERR(sctx->fallback.cip)) { |
172 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | 175 | pr_err("Allocating AES fallback algorithm %s failed\n", |
176 | name); | ||
173 | return PTR_ERR(sctx->fallback.blk); | 177 | return PTR_ERR(sctx->fallback.blk); |
174 | } | 178 | } |
175 | 179 | ||
@@ -349,7 +353,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm) | |||
349 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | 353 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); |
350 | 354 | ||
351 | if (IS_ERR(sctx->fallback.blk)) { | 355 | if (IS_ERR(sctx->fallback.blk)) { |
352 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); | 356 | pr_err("Allocating AES fallback algorithm %s failed\n", |
357 | name); | ||
353 | return PTR_ERR(sctx->fallback.blk); | 358 | return PTR_ERR(sctx->fallback.blk); |
354 | } | 359 | } |
355 | 360 | ||
@@ -515,9 +520,8 @@ static int __init aes_s390_init(void) | |||
515 | 520 | ||
516 | /* z9 109 and z9 BC/EC only support 128 bit key length */ | 521 | /* z9 109 and z9 BC/EC only support 128 bit key length */ |
517 | if (keylen_flag == AES_KEYLEN_128) | 522 | if (keylen_flag == AES_KEYLEN_128) |
518 | printk(KERN_INFO | 523 | pr_info("AES hardware acceleration is only available for" |
519 | "aes_s390: hardware acceleration only available for " | 524 | " 128-bit keys\n"); |
520 | "128 bit keys\n"); | ||
521 | 525 | ||
522 | ret = crypto_register_alg(&aes_alg); | 526 | ret = crypto_register_alg(&aes_alg); |
523 | if (ret) | 527 | if (ret) |
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index b9a1ce1f28e4..b1e892a43816 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c | |||
@@ -3,10 +3,13 @@ | |||
3 | * Hypervisor filesystem for Linux on s390. Diag 204 and 224 | 3 | * Hypervisor filesystem for Linux on s390. Diag 204 and 224 |
4 | * implementation. | 4 | * implementation. |
5 | * | 5 | * |
6 | * Copyright (C) IBM Corp. 2006 | 6 | * Copyright IBM Corp. 2006, 2008 |
7 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | 7 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "hypfs" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/types.h> | 13 | #include <linux/types.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
12 | #include <linux/string.h> | 15 | #include <linux/string.h> |
@@ -527,13 +530,14 @@ __init int hypfs_diag_init(void) | |||
527 | int rc; | 530 | int rc; |
528 | 531 | ||
529 | if (diag204_probe()) { | 532 | if (diag204_probe()) { |
530 | printk(KERN_ERR "hypfs: diag 204 not working."); | 533 | pr_err("The hardware system does not support hypfs\n"); |
531 | return -ENODATA; | 534 | return -ENODATA; |
532 | } | 535 | } |
533 | rc = diag224_get_name_table(); | 536 | rc = diag224_get_name_table(); |
534 | if (rc) { | 537 | if (rc) { |
535 | diag204_free_buffer(); | 538 | diag204_free_buffer(); |
536 | printk(KERN_ERR "hypfs: could not get name table.\n"); | 539 | pr_err("The hardware system does not provide all " |
540 | "functions required by hypfs\n"); | ||
537 | } | 541 | } |
538 | return rc; | 542 | return rc; |
539 | } | 543 | } |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 36313801cd5c..9d4f8e6c0800 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -2,10 +2,13 @@ | |||
2 | * arch/s390/hypfs/inode.c | 2 | * arch/s390/hypfs/inode.c |
3 | * Hypervisor filesystem for Linux on s390. | 3 | * Hypervisor filesystem for Linux on s390. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2006 | 5 | * Copyright IBM Corp. 2006, 2008 |
6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> | 6 | * Author(s): Michael Holzheu <holzheu@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "hypfs" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/types.h> | 12 | #include <linux/types.h> |
10 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
11 | #include <linux/fs.h> | 14 | #include <linux/fs.h> |
@@ -200,7 +203,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
200 | else | 203 | else |
201 | rc = hypfs_diag_create_files(sb, sb->s_root); | 204 | rc = hypfs_diag_create_files(sb, sb->s_root); |
202 | if (rc) { | 205 | if (rc) { |
203 | printk(KERN_ERR "hypfs: Update failed\n"); | 206 | pr_err("Updating the hypfs tree failed\n"); |
204 | hypfs_delete_tree(sb->s_root); | 207 | hypfs_delete_tree(sb->s_root); |
205 | goto out; | 208 | goto out; |
206 | } | 209 | } |
@@ -252,8 +255,7 @@ static int hypfs_parse_options(char *options, struct super_block *sb) | |||
252 | break; | 255 | break; |
253 | case opt_err: | 256 | case opt_err: |
254 | default: | 257 | default: |
255 | printk(KERN_ERR "hypfs: Unrecognized mount option " | 258 | pr_err("%s is not a valid mount option\n", str); |
256 | "\"%s\" or missing value\n", str); | ||
257 | return -EINVAL; | 259 | return -EINVAL; |
258 | } | 260 | } |
259 | } | 261 | } |
@@ -280,8 +282,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) | |||
280 | if (!sbi) | 282 | if (!sbi) |
281 | return -ENOMEM; | 283 | return -ENOMEM; |
282 | mutex_init(&sbi->lock); | 284 | mutex_init(&sbi->lock); |
283 | sbi->uid = current->uid; | 285 | sbi->uid = current_uid(); |
284 | sbi->gid = current->gid; | 286 | sbi->gid = current_gid(); |
285 | sb->s_fs_info = sbi; | 287 | sb->s_fs_info = sbi; |
286 | sb->s_blocksize = PAGE_CACHE_SIZE; | 288 | sb->s_blocksize = PAGE_CACHE_SIZE; |
287 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | 289 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
@@ -317,7 +319,7 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) | |||
317 | } | 319 | } |
318 | hypfs_update_update(sb); | 320 | hypfs_update_update(sb); |
319 | sb->s_root = root_dentry; | 321 | sb->s_root = root_dentry; |
320 | printk(KERN_INFO "hypfs: Hypervisor filesystem mounted\n"); | 322 | pr_info("Hypervisor filesystem mounted\n"); |
321 | return 0; | 323 | return 0; |
322 | 324 | ||
323 | err_tree: | 325 | err_tree: |
@@ -513,7 +515,7 @@ fail_sysfs: | |||
513 | if (!MACHINE_IS_VM) | 515 | if (!MACHINE_IS_VM) |
514 | hypfs_diag_exit(); | 516 | hypfs_diag_exit(); |
515 | fail_diag: | 517 | fail_diag: |
516 | printk(KERN_ERR "hypfs: Initialization failed with rc = %i.\n", rc); | 518 | pr_err("Initialization of hypfs failed with rc=%i\n", rc); |
517 | return rc; | 519 | return rc; |
518 | } | 520 | } |
519 | 521 | ||
diff --git a/arch/s390/include/asm/auxvec.h b/arch/s390/include/asm/auxvec.h index 0d340720fd99..a1f153e89133 100644 --- a/arch/s390/include/asm/auxvec.h +++ b/arch/s390/include/asm/auxvec.h | |||
@@ -1,4 +1,6 @@ | |||
1 | #ifndef __ASMS390_AUXVEC_H | 1 | #ifndef __ASMS390_AUXVEC_H |
2 | #define __ASMS390_AUXVEC_H | 2 | #define __ASMS390_AUXVEC_H |
3 | 3 | ||
4 | #define AT_SYSINFO_EHDR 33 | ||
5 | |||
4 | #endif | 6 | #endif |
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index 384e3621e341..7efd0abe8887 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h | |||
@@ -47,7 +47,10 @@ | |||
47 | 47 | ||
48 | #endif /* CONFIG_DEBUG_BUGVERBOSE */ | 48 | #endif /* CONFIG_DEBUG_BUGVERBOSE */ |
49 | 49 | ||
50 | #define BUG() __EMIT_BUG(0) | 50 | #define BUG() do { \ |
51 | __EMIT_BUG(0); \ | ||
52 | for (;;); \ | ||
53 | } while (0) | ||
51 | 54 | ||
52 | #define WARN_ON(x) ({ \ | 55 | #define WARN_ON(x) ({ \ |
53 | int __ret_warn_on = !!(x); \ | 56 | int __ret_warn_on = !!(x); \ |
diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h index 1fe2492baa8d..8bcf277c8468 100644 --- a/arch/s390/include/asm/byteorder.h +++ b/arch/s390/include/asm/byteorder.h | |||
@@ -11,32 +11,39 @@ | |||
11 | 11 | ||
12 | #include <asm/types.h> | 12 | #include <asm/types.h> |
13 | 13 | ||
14 | #ifdef __GNUC__ | 14 | #define __BIG_ENDIAN |
15 | |||
16 | #ifndef __s390x__ | ||
17 | # define __SWAB_64_THRU_32__ | ||
18 | #endif | ||
15 | 19 | ||
16 | #ifdef __s390x__ | 20 | #ifdef __s390x__ |
17 | static inline __u64 ___arch__swab64p(const __u64 *x) | 21 | static inline __u64 __arch_swab64p(const __u64 *x) |
18 | { | 22 | { |
19 | __u64 result; | 23 | __u64 result; |
20 | 24 | ||
21 | asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); | 25 | asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); |
22 | return result; | 26 | return result; |
23 | } | 27 | } |
28 | #define __arch_swab64p __arch_swab64p | ||
24 | 29 | ||
25 | static inline __u64 ___arch__swab64(__u64 x) | 30 | static inline __u64 __arch_swab64(__u64 x) |
26 | { | 31 | { |
27 | __u64 result; | 32 | __u64 result; |
28 | 33 | ||
29 | asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); | 34 | asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); |
30 | return result; | 35 | return result; |
31 | } | 36 | } |
37 | #define __arch_swab64 __arch_swab64 | ||
32 | 38 | ||
33 | static inline void ___arch__swab64s(__u64 *x) | 39 | static inline void __arch_swab64s(__u64 *x) |
34 | { | 40 | { |
35 | *x = ___arch__swab64p(x); | 41 | *x = __arch_swab64p(x); |
36 | } | 42 | } |
43 | #define __arch_swab64s __arch_swab64s | ||
37 | #endif /* __s390x__ */ | 44 | #endif /* __s390x__ */ |
38 | 45 | ||
39 | static inline __u32 ___arch__swab32p(const __u32 *x) | 46 | static inline __u32 __arch_swab32p(const __u32 *x) |
40 | { | 47 | { |
41 | __u32 result; | 48 | __u32 result; |
42 | 49 | ||
@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x) | |||
53 | #endif /* __s390x__ */ | 60 | #endif /* __s390x__ */ |
54 | return result; | 61 | return result; |
55 | } | 62 | } |
63 | #define __arch_swab32p __arch_swab32p | ||
56 | 64 | ||
57 | static inline __u32 ___arch__swab32(__u32 x) | 65 | #ifdef __s390x__ |
66 | static inline __u32 __arch_swab32(__u32 x) | ||
58 | { | 67 | { |
59 | #ifndef __s390x__ | ||
60 | return ___arch__swab32p(&x); | ||
61 | #else /* __s390x__ */ | ||
62 | __u32 result; | 68 | __u32 result; |
63 | 69 | ||
64 | asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); | 70 | asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); |
65 | return result; | 71 | return result; |
66 | #endif /* __s390x__ */ | ||
67 | } | ||
68 | |||
69 | static __inline__ void ___arch__swab32s(__u32 *x) | ||
70 | { | ||
71 | *x = ___arch__swab32p(x); | ||
72 | } | 72 | } |
73 | #define __arch_swab32 __arch_swab32 | ||
74 | #endif /* __s390x__ */ | ||
73 | 75 | ||
74 | static __inline__ __u16 ___arch__swab16p(const __u16 *x) | 76 | static inline __u16 __arch_swab16p(const __u16 *x) |
75 | { | 77 | { |
76 | __u16 result; | 78 | __u16 result; |
77 | 79 | ||
@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x) | |||
86 | #endif /* __s390x__ */ | 88 | #endif /* __s390x__ */ |
87 | return result; | 89 | return result; |
88 | } | 90 | } |
91 | #define __arch_swab16p __arch_swab16p | ||
89 | 92 | ||
90 | static __inline__ __u16 ___arch__swab16(__u16 x) | 93 | #include <linux/byteorder.h> |
91 | { | ||
92 | return ___arch__swab16p(&x); | ||
93 | } | ||
94 | |||
95 | static __inline__ void ___arch__swab16s(__u16 *x) | ||
96 | { | ||
97 | *x = ___arch__swab16p(x); | ||
98 | } | ||
99 | |||
100 | #ifdef __s390x__ | ||
101 | #define __arch__swab64(x) ___arch__swab64(x) | ||
102 | #define __arch__swab64p(x) ___arch__swab64p(x) | ||
103 | #define __arch__swab64s(x) ___arch__swab64s(x) | ||
104 | #endif /* __s390x__ */ | ||
105 | #define __arch__swab32(x) ___arch__swab32(x) | ||
106 | #define __arch__swab16(x) ___arch__swab16(x) | ||
107 | #define __arch__swab32p(x) ___arch__swab32p(x) | ||
108 | #define __arch__swab16p(x) ___arch__swab16p(x) | ||
109 | #define __arch__swab32s(x) ___arch__swab32s(x) | ||
110 | #define __arch__swab16s(x) ___arch__swab16s(x) | ||
111 | |||
112 | #ifndef __s390x__ | ||
113 | #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) | ||
114 | # define __BYTEORDER_HAS_U64__ | ||
115 | # define __SWAB_64_THRU_32__ | ||
116 | #endif | ||
117 | #else /* __s390x__ */ | ||
118 | #define __BYTEORDER_HAS_U64__ | ||
119 | #endif /* __s390x__ */ | ||
120 | |||
121 | #endif /* __GNUC__ */ | ||
122 | |||
123 | #include <linux/byteorder/big_endian.h> | ||
124 | 94 | ||
125 | #endif /* _S390_BYTEORDER_H */ | 95 | #endif /* _S390_BYTEORDER_H */ |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 261785ab5b22..d480f39d65e6 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -120,6 +120,10 @@ typedef s390_compat_regs compat_elf_gregset_t; | |||
120 | #include <asm/system.h> /* for save_access_regs */ | 120 | #include <asm/system.h> /* for save_access_regs */ |
121 | #include <asm/mmu_context.h> | 121 | #include <asm/mmu_context.h> |
122 | 122 | ||
123 | #include <asm/vdso.h> | ||
124 | |||
125 | extern unsigned int vdso_enabled; | ||
126 | |||
123 | /* | 127 | /* |
124 | * This is used to ensure we don't load something for the wrong architecture. | 128 | * This is used to ensure we don't load something for the wrong architecture. |
125 | */ | 129 | */ |
@@ -191,4 +195,16 @@ do { \ | |||
191 | current->mm->context.noexec == 0; \ | 195 | current->mm->context.noexec == 0; \ |
192 | }) | 196 | }) |
193 | 197 | ||
198 | #define ARCH_DLINFO \ | ||
199 | do { \ | ||
200 | if (vdso_enabled) \ | ||
201 | NEW_AUX_ENT(AT_SYSINFO_EHDR, \ | ||
202 | (unsigned long)current->mm->context.vdso_base); \ | ||
203 | } while (0) | ||
204 | |||
205 | struct linux_binprm; | ||
206 | |||
207 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 | ||
208 | int arch_setup_additional_pages(struct linux_binprm *, int); | ||
209 | |||
194 | #endif | 210 | #endif |
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h index 8be1f3a58042..ef6170995076 100644 --- a/arch/s390/include/asm/fcx.h +++ b/arch/s390/include/asm/fcx.h | |||
@@ -248,8 +248,8 @@ struct dcw { | |||
248 | #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ | 248 | #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \ |
249 | TCCB_MAX_DCW * sizeof(struct dcw) + \ | 249 | TCCB_MAX_DCW * sizeof(struct dcw) + \ |
250 | sizeof(struct tccb_tcat)) | 250 | sizeof(struct tccb_tcat)) |
251 | #define TCCB_SAC_DEFAULT 0xf901 | 251 | #define TCCB_SAC_DEFAULT 0x1ffe |
252 | #define TCCB_SAC_INTRG 0xf902 | 252 | #define TCCB_SAC_INTRG 0x1fff |
253 | 253 | ||
254 | /** | 254 | /** |
255 | * struct tccb_tcah - Transport-Command-Area Header (TCAH) | 255 | * struct tccb_tcah - Transport-Command-Area Header (TCAH) |
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h new file mode 100644 index 000000000000..5a5bc75e19d4 --- /dev/null +++ b/arch/s390/include/asm/ftrace.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASM_S390_FTRACE_H | ||
2 | #define _ASM_S390_FTRACE_H | ||
3 | |||
4 | #ifndef __ASSEMBLY__ | ||
5 | extern void _mcount(void); | ||
6 | #endif | ||
7 | |||
8 | #endif /* _ASM_S390_FTRACE_H */ | ||
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h index 34bb8916db4f..1420a1115948 100644 --- a/arch/s390/include/asm/isc.h +++ b/arch/s390/include/asm/isc.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define CHSC_SCH_ISC 7 /* CHSC subchannels */ | 17 | #define CHSC_SCH_ISC 7 /* CHSC subchannels */ |
18 | /* Adapter interrupts. */ | 18 | /* Adapter interrupts. */ |
19 | #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ | 19 | #define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ |
20 | #define AP_ISC 6 /* adjunct processor (crypto) devices */ | ||
20 | 21 | ||
21 | /* Functions for registration of I/O interruption subclasses */ | 22 | /* Functions for registration of I/O interruption subclasses */ |
22 | void isc_register(unsigned int isc); | 23 | void isc_register(unsigned int isc); |
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index d2b4ff831477..3b59216e6284 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -6,6 +6,7 @@ typedef struct { | |||
6 | struct list_head pgtable_list; | 6 | struct list_head pgtable_list; |
7 | unsigned long asce_bits; | 7 | unsigned long asce_bits; |
8 | unsigned long asce_limit; | 8 | unsigned long asce_limit; |
9 | unsigned long vdso_base; | ||
9 | int noexec; | 10 | int noexec; |
10 | int has_pgste; /* The mmu context has extended page tables */ | 11 | int has_pgste; /* The mmu context has extended page tables */ |
11 | int alloc_pgste; /* cloned contexts will have extended page tables */ | 12 | int alloc_pgste; /* cloned contexts will have extended page tables */ |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 991ba939408c..32e8f6aa4384 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -152,4 +152,6 @@ void arch_alloc_page(struct page *page, int order); | |||
152 | #include <asm-generic/memory_model.h> | 152 | #include <asm-generic/memory_model.h> |
153 | #include <asm-generic/page.h> | 153 | #include <asm-generic/page.h> |
154 | 154 | ||
155 | #define __HAVE_ARCH_GATE_AREA 1 | ||
156 | |||
155 | #endif /* _S390_PAGE_H */ | 157 | #endif /* _S390_PAGE_H */ |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f5b2bf3d7c1d..b2658b9220fe 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -28,6 +28,8 @@ void disable_noexec(struct mm_struct *, struct task_struct *); | |||
28 | 28 | ||
29 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | 29 | static inline void clear_table(unsigned long *s, unsigned long val, size_t n) |
30 | { | 30 | { |
31 | typedef struct { char _[n]; } addrtype; | ||
32 | |||
31 | *s = val; | 33 | *s = val; |
32 | n = (n / 256) - 1; | 34 | n = (n / 256) - 1; |
33 | asm volatile( | 35 | asm volatile( |
@@ -39,7 +41,8 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n) | |||
39 | "0: mvc 256(256,%0),0(%0)\n" | 41 | "0: mvc 256(256,%0),0(%0)\n" |
40 | " la %0,256(%0)\n" | 42 | " la %0,256(%0)\n" |
41 | " brct %1,0b\n" | 43 | " brct %1,0b\n" |
42 | : "+a" (s), "+d" (n)); | 44 | : "+a" (s), "+d" (n), "=m" (*(addrtype *) s) |
45 | : "m" (*(addrtype *) s)); | ||
43 | } | 46 | } |
44 | 47 | ||
45 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) | 48 | static inline void crst_table_init(unsigned long *crst, unsigned long entry) |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 4af80af2a88f..066b99502e09 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifndef __ASM_S390_PROCESSOR_H | 13 | #ifndef __ASM_S390_PROCESSOR_H |
14 | #define __ASM_S390_PROCESSOR_H | 14 | #define __ASM_S390_PROCESSOR_H |
15 | 15 | ||
16 | #include <linux/linkage.h> | ||
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | 18 | ||
18 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
@@ -258,7 +259,7 @@ static inline void enabled_wait(void) | |||
258 | * Function to drop a processor into disabled wait state | 259 | * Function to drop a processor into disabled wait state |
259 | */ | 260 | */ |
260 | 261 | ||
261 | static inline void disabled_wait(unsigned long code) | 262 | static inline void ATTRIB_NORET disabled_wait(unsigned long code) |
262 | { | 263 | { |
263 | unsigned long ctl_buf; | 264 | unsigned long ctl_buf; |
264 | psw_t dw_psw; | 265 | psw_t dw_psw; |
@@ -322,6 +323,7 @@ static inline void disabled_wait(unsigned long code) | |||
322 | : "=m" (ctl_buf) | 323 | : "=m" (ctl_buf) |
323 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); | 324 | : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0"); |
324 | #endif /* __s390x__ */ | 325 | #endif /* __s390x__ */ |
326 | while (1); | ||
325 | } | 327 | } |
326 | 328 | ||
327 | /* | 329 | /* |
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index 4734c3f05354..27fc1746de15 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h | |||
@@ -373,16 +373,16 @@ struct qdio_initialize { | |||
373 | #define QDIO_FLAG_SYNC_OUTPUT 0x02 | 373 | #define QDIO_FLAG_SYNC_OUTPUT 0x02 |
374 | #define QDIO_FLAG_PCI_OUT 0x10 | 374 | #define QDIO_FLAG_PCI_OUT 0x10 |
375 | 375 | ||
376 | extern int qdio_initialize(struct qdio_initialize *init_data); | 376 | extern int qdio_initialize(struct qdio_initialize *); |
377 | extern int qdio_allocate(struct qdio_initialize *init_data); | 377 | extern int qdio_allocate(struct qdio_initialize *); |
378 | extern int qdio_establish(struct qdio_initialize *init_data); | 378 | extern int qdio_establish(struct qdio_initialize *); |
379 | extern int qdio_activate(struct ccw_device *); | 379 | extern int qdio_activate(struct ccw_device *); |
380 | 380 | ||
381 | extern int do_QDIO(struct ccw_device*, unsigned int flags, | 381 | extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, |
382 | int q_nr, int qidx, int count); | 382 | int q_nr, int bufnr, int count); |
383 | extern int qdio_cleanup(struct ccw_device*, int how); | 383 | extern int qdio_cleanup(struct ccw_device*, int); |
384 | extern int qdio_shutdown(struct ccw_device*, int how); | 384 | extern int qdio_shutdown(struct ccw_device*, int); |
385 | extern int qdio_free(struct ccw_device *); | 385 | extern int qdio_free(struct ccw_device *); |
386 | extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev); | 386 | extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); |
387 | 387 | ||
388 | #endif /* __QDIO_H__ */ | 388 | #endif /* __QDIO_H__ */ |
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index e16d56f8dfe1..ec403d4304f8 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h | |||
@@ -61,6 +61,7 @@ typedef enum | |||
61 | { | 61 | { |
62 | ec_schedule=0, | 62 | ec_schedule=0, |
63 | ec_call_function, | 63 | ec_call_function, |
64 | ec_call_function_single, | ||
64 | ec_bit_last | 65 | ec_bit_last |
65 | } ec_bit_sig; | 66 | } ec_bit_sig; |
66 | 67 | ||
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ae89cf2478fc..024b91e06239 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -91,8 +91,9 @@ extern int __cpu_up (unsigned int cpu); | |||
91 | extern struct mutex smp_cpu_state_mutex; | 91 | extern struct mutex smp_cpu_state_mutex; |
92 | extern int smp_cpu_polarization[]; | 92 | extern int smp_cpu_polarization[]; |
93 | 93 | ||
94 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 94 | extern void arch_send_call_function_single_ipi(int cpu); |
95 | void *info, int wait); | 95 | extern void arch_send_call_function_ipi(cpumask_t mask); |
96 | |||
96 | #endif | 97 | #endif |
97 | 98 | ||
98 | #ifndef CONFIG_SMP | 99 | #ifndef CONFIG_SMP |
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h index 79d01343f8b0..ad93212d9e16 100644 --- a/arch/s390/include/asm/sysinfo.h +++ b/arch/s390/include/asm/sysinfo.h | |||
@@ -118,4 +118,15 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2) | |||
118 | return r0; | 118 | return r0; |
119 | } | 119 | } |
120 | 120 | ||
121 | /* | ||
122 | * Service level reporting interface. | ||
123 | */ | ||
124 | struct service_level { | ||
125 | struct list_head list; | ||
126 | void (*seq_print)(struct seq_file *, struct service_level *); | ||
127 | }; | ||
128 | |||
129 | int register_service_level(struct service_level *); | ||
130 | int unregister_service_level(struct service_level *); | ||
131 | |||
121 | #endif /* __ASM_S390_SYSINFO_H */ | 132 | #endif /* __ASM_S390_SYSINFO_H */ |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 819e7d99ca0c..024ef42ed6d7 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __ASM_SYSTEM_H | 12 | #define __ASM_SYSTEM_H |
13 | 13 | ||
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | ||
15 | #include <asm/types.h> | 16 | #include <asm/types.h> |
16 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
@@ -98,13 +99,9 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
98 | prev = __switch_to(prev,next); \ | 99 | prev = __switch_to(prev,next); \ |
99 | } while (0) | 100 | } while (0) |
100 | 101 | ||
101 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
102 | extern void account_vtime(struct task_struct *); | 102 | extern void account_vtime(struct task_struct *); |
103 | extern void account_tick_vtime(struct task_struct *); | 103 | extern void account_tick_vtime(struct task_struct *); |
104 | extern void account_system_vtime(struct task_struct *); | 104 | extern void account_system_vtime(struct task_struct *); |
105 | #else | ||
106 | #define account_vtime(x) do { /* empty */ } while (0) | ||
107 | #endif | ||
108 | 105 | ||
109 | #ifdef CONFIG_PFAULT | 106 | #ifdef CONFIG_PFAULT |
110 | extern void pfault_irq_init(void); | 107 | extern void pfault_irq_init(void); |
@@ -413,8 +410,6 @@ __set_psw_mask(unsigned long mask) | |||
413 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) | 410 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
414 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | 411 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) |
415 | 412 | ||
416 | int stfle(unsigned long long *list, int doublewords); | ||
417 | |||
418 | #ifdef CONFIG_SMP | 413 | #ifdef CONFIG_SMP |
419 | 414 | ||
420 | extern void smp_ctl_set_bit(int cr, int bit); | 415 | extern void smp_ctl_set_bit(int cr, int bit); |
@@ -438,6 +433,23 @@ static inline unsigned int stfl(void) | |||
438 | return S390_lowcore.stfl_fac_list; | 433 | return S390_lowcore.stfl_fac_list; |
439 | } | 434 | } |
440 | 435 | ||
436 | static inline int __stfle(unsigned long long *list, int doublewords) | ||
437 | { | ||
438 | typedef struct { unsigned long long _[doublewords]; } addrtype; | ||
439 | register unsigned long __nr asm("0") = doublewords - 1; | ||
440 | |||
441 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | ||
442 | : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); | ||
443 | return __nr + 1; | ||
444 | } | ||
445 | |||
446 | static inline int stfle(unsigned long long *list, int doublewords) | ||
447 | { | ||
448 | if (!(stfl() & (1UL << 24))) | ||
449 | return -EOPNOTSUPP; | ||
450 | return __stfle(list, doublewords); | ||
451 | } | ||
452 | |||
441 | static inline unsigned short stap(void) | 453 | static inline unsigned short stap(void) |
442 | { | 454 | { |
443 | unsigned short cpu_address; | 455 | unsigned short cpu_address; |
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h index d98d79e35cd6..61705d60f995 100644 --- a/arch/s390/include/asm/timer.h +++ b/arch/s390/include/asm/timer.h | |||
@@ -48,18 +48,9 @@ extern int del_virt_timer(struct vtimer_list *timer); | |||
48 | extern void init_cpu_vtimer(void); | 48 | extern void init_cpu_vtimer(void); |
49 | extern void vtime_init(void); | 49 | extern void vtime_init(void); |
50 | 50 | ||
51 | #ifdef CONFIG_VIRT_TIMER | ||
52 | |||
53 | extern void vtime_start_cpu_timer(void); | 51 | extern void vtime_start_cpu_timer(void); |
54 | extern void vtime_stop_cpu_timer(void); | 52 | extern void vtime_stop_cpu_timer(void); |
55 | 53 | ||
56 | #else | ||
57 | |||
58 | static inline void vtime_start_cpu_timer(void) { } | ||
59 | static inline void vtime_stop_cpu_timer(void) { } | ||
60 | |||
61 | #endif /* CONFIG_VIRT_TIMER */ | ||
62 | |||
63 | #endif /* __KERNEL__ */ | 54 | #endif /* __KERNEL__ */ |
64 | 55 | ||
65 | #endif /* _ASM_S390_TIMER_H */ | 56 | #endif /* _ASM_S390_TIMER_H */ |
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h new file mode 100644 index 000000000000..a44f4fe16a35 --- /dev/null +++ b/arch/s390/include/asm/vdso.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __S390_VDSO_H__ | ||
2 | #define __S390_VDSO_H__ | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | /* Default link addresses for the vDSOs */ | ||
7 | #define VDSO32_LBASE 0 | ||
8 | #define VDSO64_LBASE 0 | ||
9 | |||
10 | #define VDSO_VERSION_STRING LINUX_2.6.26 | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | /* | ||
15 | * Note about this structure: | ||
16 | * | ||
17 | * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this | ||
18 | * structure is supposed to be known only to the function in the vdso | ||
19 | * itself and may change without notice. | ||
20 | */ | ||
21 | |||
22 | struct vdso_data { | ||
23 | __u64 tb_update_count; /* Timebase atomicity ctr 0x00 */ | ||
24 | __u64 xtime_tod_stamp; /* TOD clock for xtime 0x08 */ | ||
25 | __u64 xtime_clock_sec; /* Kernel time 0x10 */ | ||
26 | __u64 xtime_clock_nsec; /* 0x18 */ | ||
27 | __u64 wtom_clock_sec; /* Wall to monotonic clock 0x20 */ | ||
28 | __u64 wtom_clock_nsec; /* 0x28 */ | ||
29 | __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ | ||
30 | __u32 tz_dsttime; /* Type of dst correction 0x34 */ | ||
31 | }; | ||
32 | |||
33 | extern struct vdso_data *vdso_data; | ||
34 | |||
35 | #endif /* __ASSEMBLY__ */ | ||
36 | |||
37 | #endif /* __KERNEL__ */ | ||
38 | |||
39 | #endif /* __S390_VDSO_H__ */ | ||
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 50f657e77344..3edc6c6f258b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -2,6 +2,11 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | ifdef CONFIG_FUNCTION_TRACER | ||
6 | # Do not trace early boot code | ||
7 | CFLAGS_REMOVE_early.o = -pg | ||
8 | endif | ||
9 | |||
5 | # | 10 | # |
6 | # Passing null pointers is ok for smp code, since we access the lowcore here. | 11 | # Passing null pointers is ok for smp code, since we access the lowcore here. |
7 | # | 12 | # |
@@ -12,9 +17,10 @@ CFLAGS_smp.o := -Wno-nonnull | |||
12 | # | 17 | # |
13 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | 18 | CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' |
14 | 19 | ||
15 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ | 20 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ |
16 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 21 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
17 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o | 22 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ |
23 | vdso.o vtime.o | ||
18 | 24 | ||
19 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 25 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
20 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 26 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
@@ -30,12 +36,16 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ | |||
30 | compat_wrapper.o compat_exec_domain.o \ | 36 | compat_wrapper.o compat_exec_domain.o \ |
31 | $(compat-obj-y) | 37 | $(compat-obj-y) |
32 | 38 | ||
33 | obj-$(CONFIG_VIRT_TIMER) += vtime.o | ||
34 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 39 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
35 | obj-$(CONFIG_KPROBES) += kprobes.o | 40 | obj-$(CONFIG_KPROBES) += kprobes.o |
41 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o | ||
36 | 42 | ||
37 | # Kexec part | 43 | # Kexec part |
38 | S390_KEXEC_OBJS := machine_kexec.o crash.o | 44 | S390_KEXEC_OBJS := machine_kexec.o crash.o |
39 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) | 45 | S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o) |
40 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) | 46 | obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS) |
41 | 47 | ||
48 | # vdso | ||
49 | obj-$(CONFIG_64BIT) += vdso64/ | ||
50 | obj-$(CONFIG_32BIT) += vdso32/ | ||
51 | obj-$(CONFIG_COMPAT) += vdso32/ | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 3d144e6020c6..e641f60bac99 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/kbuild.h> | 8 | #include <linux/kbuild.h> |
9 | #include <asm/vdso.h> | ||
9 | 10 | ||
10 | int main(void) | 11 | int main(void) |
11 | { | 12 | { |
@@ -38,5 +39,19 @@ int main(void) | |||
38 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); | 39 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
39 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); | 40 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); |
40 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); | 41 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); |
42 | BLANK(); | ||
43 | /* timeval/timezone offsets for use by vdso */ | ||
44 | DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count)); | ||
45 | DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); | ||
46 | DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); | ||
47 | DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); | ||
48 | DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); | ||
49 | DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); | ||
50 | DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); | ||
51 | /* constants used by the vdso */ | ||
52 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); | ||
53 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); | ||
54 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); | ||
55 | |||
41 | return 0; | 56 | return 0; |
42 | } | 57 | } |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 4646382af34f..6cc87d8c8682 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -148,9 +148,9 @@ asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user | |||
148 | { | 148 | { |
149 | int retval; | 149 | int retval; |
150 | 150 | ||
151 | if (!(retval = put_user(high2lowuid(current->uid), ruid)) && | 151 | if (!(retval = put_user(high2lowuid(current->cred->uid), ruid)) && |
152 | !(retval = put_user(high2lowuid(current->euid), euid))) | 152 | !(retval = put_user(high2lowuid(current->cred->euid), euid))) |
153 | retval = put_user(high2lowuid(current->suid), suid); | 153 | retval = put_user(high2lowuid(current->cred->suid), suid); |
154 | 154 | ||
155 | return retval; | 155 | return retval; |
156 | } | 156 | } |
@@ -165,9 +165,9 @@ asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user | |||
165 | { | 165 | { |
166 | int retval; | 166 | int retval; |
167 | 167 | ||
168 | if (!(retval = put_user(high2lowgid(current->gid), rgid)) && | 168 | if (!(retval = put_user(high2lowgid(current->cred->gid), rgid)) && |
169 | !(retval = put_user(high2lowgid(current->egid), egid))) | 169 | !(retval = put_user(high2lowgid(current->cred->egid), egid))) |
170 | retval = put_user(high2lowgid(current->sgid), sgid); | 170 | retval = put_user(high2lowgid(current->cred->sgid), sgid); |
171 | 171 | ||
172 | return retval; | 172 | return retval; |
173 | } | 173 | } |
@@ -217,20 +217,20 @@ asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist) | |||
217 | if (gidsetsize < 0) | 217 | if (gidsetsize < 0) |
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | 219 | ||
220 | get_group_info(current->group_info); | 220 | get_group_info(current->cred->group_info); |
221 | i = current->group_info->ngroups; | 221 | i = current->cred->group_info->ngroups; |
222 | if (gidsetsize) { | 222 | if (gidsetsize) { |
223 | if (i > gidsetsize) { | 223 | if (i > gidsetsize) { |
224 | i = -EINVAL; | 224 | i = -EINVAL; |
225 | goto out; | 225 | goto out; |
226 | } | 226 | } |
227 | if (groups16_to_user(grouplist, current->group_info)) { | 227 | if (groups16_to_user(grouplist, current->cred->group_info)) { |
228 | i = -EFAULT; | 228 | i = -EFAULT; |
229 | goto out; | 229 | goto out; |
230 | } | 230 | } |
231 | } | 231 | } |
232 | out: | 232 | out: |
233 | put_group_info(current->group_info); | 233 | put_group_info(current->cred->group_info); |
234 | return i; | 234 | return i; |
235 | } | 235 | } |
236 | 236 | ||
@@ -261,22 +261,22 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist) | |||
261 | 261 | ||
262 | asmlinkage long sys32_getuid16(void) | 262 | asmlinkage long sys32_getuid16(void) |
263 | { | 263 | { |
264 | return high2lowuid(current->uid); | 264 | return high2lowuid(current->cred->uid); |
265 | } | 265 | } |
266 | 266 | ||
267 | asmlinkage long sys32_geteuid16(void) | 267 | asmlinkage long sys32_geteuid16(void) |
268 | { | 268 | { |
269 | return high2lowuid(current->euid); | 269 | return high2lowuid(current->cred->euid); |
270 | } | 270 | } |
271 | 271 | ||
272 | asmlinkage long sys32_getgid16(void) | 272 | asmlinkage long sys32_getgid16(void) |
273 | { | 273 | { |
274 | return high2lowgid(current->gid); | 274 | return high2lowgid(current->cred->gid); |
275 | } | 275 | } |
276 | 276 | ||
277 | asmlinkage long sys32_getegid16(void) | 277 | asmlinkage long sys32_getegid16(void) |
278 | { | 278 | { |
279 | return high2lowgid(current->egid); | 279 | return high2lowgid(current->cred->egid); |
280 | } | 280 | } |
281 | 281 | ||
282 | /* | 282 | /* |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index d8c1131e0815..3e8b8816f309 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Christian Borntraeger (cborntra@de.ibm.com), | 7 | * Christian Borntraeger (cborntra@de.ibm.com), |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "cpcmd" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
@@ -104,8 +107,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
104 | (((unsigned long)response + rlen) >> 31)) { | 107 | (((unsigned long)response + rlen) >> 31)) { |
105 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); | 108 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); |
106 | if (!lowbuf) { | 109 | if (!lowbuf) { |
107 | printk(KERN_WARNING | 110 | pr_warning("The cpcmd kernel function failed to " |
108 | "cpcmd: could not allocate response buffer\n"); | 111 | "allocate a response buffer\n"); |
109 | return -ENOMEM; | 112 | return -ENOMEM; |
110 | } | 113 | } |
111 | spin_lock_irqsave(&cpcmd_lock, flags); | 114 | spin_lock_irqsave(&cpcmd_lock, flags); |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index d80fcd4a7fe1..ba03fc0a3a56 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -10,6 +10,9 @@ | |||
10 | * Bugreports to: <Linux390@de.ibm.com> | 10 | * Bugreports to: <Linux390@de.ibm.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define KMSG_COMPONENT "s390dbf" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/stddef.h> | 16 | #include <linux/stddef.h> |
14 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
@@ -388,7 +391,7 @@ debug_info_copy(debug_info_t* in, int mode) | |||
388 | debug_info_free(rc); | 391 | debug_info_free(rc); |
389 | } while (1); | 392 | } while (1); |
390 | 393 | ||
391 | if(!rc || (mode == NO_AREAS)) | 394 | if (mode == NO_AREAS) |
392 | goto out; | 395 | goto out; |
393 | 396 | ||
394 | for(i = 0; i < in->nr_areas; i++){ | 397 | for(i = 0; i < in->nr_areas; i++){ |
@@ -693,8 +696,8 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, | |||
693 | /* Since debugfs currently does not support uid/gid other than root, */ | 696 | /* Since debugfs currently does not support uid/gid other than root, */ |
694 | /* we do not allow gid/uid != 0 until we get support for that. */ | 697 | /* we do not allow gid/uid != 0 until we get support for that. */ |
695 | if ((uid != 0) || (gid != 0)) | 698 | if ((uid != 0) || (gid != 0)) |
696 | printk(KERN_WARNING "debug: Warning - Currently only uid/gid " | 699 | pr_warning("Root becomes the owner of all s390dbf files " |
697 | "= 0 are supported. Using root as owner now!"); | 700 | "in sysfs\n"); |
698 | if (!initialized) | 701 | if (!initialized) |
699 | BUG(); | 702 | BUG(); |
700 | mutex_lock(&debug_mutex); | 703 | mutex_lock(&debug_mutex); |
@@ -709,7 +712,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area, | |||
709 | debug_register_view(rc, &debug_pages_view); | 712 | debug_register_view(rc, &debug_pages_view); |
710 | out: | 713 | out: |
711 | if (!rc){ | 714 | if (!rc){ |
712 | printk(KERN_ERR "debug: debug_register failed for %s\n",name); | 715 | pr_err("Registering debug feature %s failed\n", name); |
713 | } | 716 | } |
714 | mutex_unlock(&debug_mutex); | 717 | mutex_unlock(&debug_mutex); |
715 | return rc; | 718 | return rc; |
@@ -763,8 +766,8 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) | |||
763 | if(pages_per_area > 0){ | 766 | if(pages_per_area > 0){ |
764 | new_areas = debug_areas_alloc(pages_per_area, nr_areas); | 767 | new_areas = debug_areas_alloc(pages_per_area, nr_areas); |
765 | if(!new_areas) { | 768 | if(!new_areas) { |
766 | printk(KERN_WARNING "debug: could not allocate memory "\ | 769 | pr_info("Allocating memory for %i pages failed\n", |
767 | "for pagenumber: %i\n",pages_per_area); | 770 | pages_per_area); |
768 | rc = -ENOMEM; | 771 | rc = -ENOMEM; |
769 | goto out; | 772 | goto out; |
770 | } | 773 | } |
@@ -780,8 +783,7 @@ debug_set_size(debug_info_t* id, int nr_areas, int pages_per_area) | |||
780 | memset(id->active_entries,0,sizeof(int)*id->nr_areas); | 783 | memset(id->active_entries,0,sizeof(int)*id->nr_areas); |
781 | memset(id->active_pages, 0, sizeof(int)*id->nr_areas); | 784 | memset(id->active_pages, 0, sizeof(int)*id->nr_areas); |
782 | spin_unlock_irqrestore(&id->lock,flags); | 785 | spin_unlock_irqrestore(&id->lock,flags); |
783 | printk(KERN_INFO "debug: %s: set new size (%i pages)\n"\ | 786 | pr_info("%s: set new size (%i pages)\n" ,id->name, pages_per_area); |
784 | ,id->name, pages_per_area); | ||
785 | out: | 787 | out: |
786 | return rc; | 788 | return rc; |
787 | } | 789 | } |
@@ -800,10 +802,9 @@ debug_set_level(debug_info_t* id, int new_level) | |||
800 | spin_lock_irqsave(&id->lock,flags); | 802 | spin_lock_irqsave(&id->lock,flags); |
801 | if(new_level == DEBUG_OFF_LEVEL){ | 803 | if(new_level == DEBUG_OFF_LEVEL){ |
802 | id->level = DEBUG_OFF_LEVEL; | 804 | id->level = DEBUG_OFF_LEVEL; |
803 | printk(KERN_INFO "debug: %s: switched off\n",id->name); | 805 | pr_info("%s: switched off\n",id->name); |
804 | } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { | 806 | } else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) { |
805 | printk(KERN_INFO | 807 | pr_info("%s: level %i is out of range (%i - %i)\n", |
806 | "debug: %s: level %i is out of range (%i - %i)\n", | ||
807 | id->name, new_level, 0, DEBUG_MAX_LEVEL); | 808 | id->name, new_level, 0, DEBUG_MAX_LEVEL); |
808 | } else { | 809 | } else { |
809 | id->level = new_level; | 810 | id->level = new_level; |
@@ -1108,8 +1109,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1108 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, | 1109 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, |
1109 | id , &debug_file_ops); | 1110 | id , &debug_file_ops); |
1110 | if (!pde){ | 1111 | if (!pde){ |
1111 | printk(KERN_WARNING "debug: debugfs_create_file() failed!"\ | 1112 | pr_err("Registering view %s/%s failed due to out of " |
1112 | " Cannot register view %s/%s\n", id->name,view->name); | 1113 | "memory\n", id->name,view->name); |
1113 | rc = -1; | 1114 | rc = -1; |
1114 | goto out; | 1115 | goto out; |
1115 | } | 1116 | } |
@@ -1119,10 +1120,8 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1119 | break; | 1120 | break; |
1120 | } | 1121 | } |
1121 | if (i == DEBUG_MAX_VIEWS) { | 1122 | if (i == DEBUG_MAX_VIEWS) { |
1122 | printk(KERN_WARNING "debug: cannot register view %s/%s\n", | 1123 | pr_err("Registering view %s/%s would exceed the maximum " |
1123 | id->name,view->name); | 1124 | "number of views %i\n", id->name, view->name, i); |
1124 | printk(KERN_WARNING | ||
1125 | "debug: maximum number of views reached (%i)!\n", i); | ||
1126 | debugfs_remove(pde); | 1125 | debugfs_remove(pde); |
1127 | rc = -1; | 1126 | rc = -1; |
1128 | } else { | 1127 | } else { |
@@ -1303,7 +1302,8 @@ debug_input_level_fn(debug_info_t * id, struct debug_view *view, | |||
1303 | new_level = debug_get_uint(str); | 1302 | new_level = debug_get_uint(str); |
1304 | } | 1303 | } |
1305 | if(new_level < 0) { | 1304 | if(new_level < 0) { |
1306 | printk(KERN_INFO "debug: level `%s` is not valid\n", str); | 1305 | pr_warning("%s is not a valid level for a debug " |
1306 | "feature\n", str); | ||
1307 | rc = -EINVAL; | 1307 | rc = -EINVAL; |
1308 | } else { | 1308 | } else { |
1309 | debug_set_level(id, new_level); | 1309 | debug_set_level(id, new_level); |
@@ -1380,7 +1380,8 @@ debug_input_flush_fn(debug_info_t * id, struct debug_view *view, | |||
1380 | goto out; | 1380 | goto out; |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | printk(KERN_INFO "debug: area `%c` is not valid\n", input_buf[0]); | 1383 | pr_info("Flushing debug data failed because %c is not a valid " |
1384 | "area\n", input_buf[0]); | ||
1384 | 1385 | ||
1385 | out: | 1386 | out: |
1386 | *offset += user_len; | 1387 | *offset += user_len; |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 198ea18a534d..55de521aef77 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -109,13 +109,6 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
109 | * R15 - kernel stack pointer | 109 | * R15 - kernel stack pointer |
110 | */ | 110 | */ |
111 | 111 | ||
112 | .macro STORE_TIMER lc_offset | ||
113 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
114 | stpt \lc_offset | ||
115 | #endif | ||
116 | .endm | ||
117 | |||
118 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
119 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | 112 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
120 | lm %r10,%r11,\lc_from | 113 | lm %r10,%r11,\lc_from |
121 | sl %r10,\lc_to | 114 | sl %r10,\lc_to |
@@ -128,7 +121,6 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
128 | al %r10,BASED(.Lc_1) | 121 | al %r10,BASED(.Lc_1) |
129 | 1: stm %r10,%r11,\lc_sum | 122 | 1: stm %r10,%r11,\lc_sum |
130 | .endm | 123 | .endm |
131 | #endif | ||
132 | 124 | ||
133 | .macro SAVE_ALL_BASE savearea | 125 | .macro SAVE_ALL_BASE savearea |
134 | stm %r12,%r15,\savearea | 126 | stm %r12,%r15,\savearea |
@@ -198,7 +190,7 @@ STACK_SIZE = 1 << STACK_SHIFT | |||
198 | ni \psworg+1,0xfd # clear wait state bit | 190 | ni \psworg+1,0xfd # clear wait state bit |
199 | .endif | 191 | .endif |
200 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | 192 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user |
201 | STORE_TIMER __LC_EXIT_TIMER | 193 | stpt __LC_EXIT_TIMER |
202 | lpsw \psworg # back to caller | 194 | lpsw \psworg # back to caller |
203 | .endm | 195 | .endm |
204 | 196 | ||
@@ -247,20 +239,18 @@ __critical_start: | |||
247 | 239 | ||
248 | .globl system_call | 240 | .globl system_call |
249 | system_call: | 241 | system_call: |
250 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 242 | stpt __LC_SYNC_ENTER_TIMER |
251 | sysc_saveall: | 243 | sysc_saveall: |
252 | SAVE_ALL_BASE __LC_SAVE_AREA | 244 | SAVE_ALL_BASE __LC_SAVE_AREA |
253 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 245 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
254 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 246 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
255 | lh %r7,0x8a # get svc number from lowcore | 247 | lh %r7,0x8a # get svc number from lowcore |
256 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
257 | sysc_vtime: | 248 | sysc_vtime: |
258 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 249 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
259 | sysc_stime: | 250 | sysc_stime: |
260 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 251 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
261 | sysc_update: | 252 | sysc_update: |
262 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 253 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
263 | #endif | ||
264 | sysc_do_svc: | 254 | sysc_do_svc: |
265 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 255 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
266 | ltr %r7,%r7 # test for svc 0 | 256 | ltr %r7,%r7 # test for svc 0 |
@@ -436,7 +426,7 @@ ret_from_fork: | |||
436 | basr %r14,%r1 | 426 | basr %r14,%r1 |
437 | TRACE_IRQS_ON | 427 | TRACE_IRQS_ON |
438 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts | 428 | stosm __SF_EMPTY(%r15),0x03 # reenable interrupts |
439 | b BASED(sysc_return) | 429 | b BASED(sysc_tracenogo) |
440 | 430 | ||
441 | # | 431 | # |
442 | # kernel_execve function needs to deal with pt_regs that is not | 432 | # kernel_execve function needs to deal with pt_regs that is not |
@@ -490,20 +480,18 @@ pgm_check_handler: | |||
490 | * we just ignore the PER event (FIXME: is there anything we have to do | 480 | * we just ignore the PER event (FIXME: is there anything we have to do |
491 | * for LPSW?). | 481 | * for LPSW?). |
492 | */ | 482 | */ |
493 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 483 | stpt __LC_SYNC_ENTER_TIMER |
494 | SAVE_ALL_BASE __LC_SAVE_AREA | 484 | SAVE_ALL_BASE __LC_SAVE_AREA |
495 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 485 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
496 | bnz BASED(pgm_per) # got per exception -> special case | 486 | bnz BASED(pgm_per) # got per exception -> special case |
497 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 487 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
498 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 488 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
499 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
500 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 489 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
501 | bz BASED(pgm_no_vtime) | 490 | bz BASED(pgm_no_vtime) |
502 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 491 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
503 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 492 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
504 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 493 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
505 | pgm_no_vtime: | 494 | pgm_no_vtime: |
506 | #endif | ||
507 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 495 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
508 | TRACE_IRQS_OFF | 496 | TRACE_IRQS_OFF |
509 | l %r3,__LC_PGM_ILC # load program interruption code | 497 | l %r3,__LC_PGM_ILC # load program interruption code |
@@ -536,14 +524,12 @@ pgm_per: | |||
536 | pgm_per_std: | 524 | pgm_per_std: |
537 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 525 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
538 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 526 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
539 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
540 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 527 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
541 | bz BASED(pgm_no_vtime2) | 528 | bz BASED(pgm_no_vtime2) |
542 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 529 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
543 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 530 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
544 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 531 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
545 | pgm_no_vtime2: | 532 | pgm_no_vtime2: |
546 | #endif | ||
547 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 533 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
548 | TRACE_IRQS_OFF | 534 | TRACE_IRQS_OFF |
549 | l %r1,__TI_task(%r9) | 535 | l %r1,__TI_task(%r9) |
@@ -565,11 +551,9 @@ pgm_no_vtime2: | |||
565 | pgm_svcper: | 551 | pgm_svcper: |
566 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 552 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
567 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 553 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
568 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
569 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 554 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
570 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 555 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
571 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 556 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
572 | #endif | ||
573 | lh %r7,0x8a # get svc number from lowcore | 557 | lh %r7,0x8a # get svc number from lowcore |
574 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 558 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
575 | TRACE_IRQS_OFF | 559 | TRACE_IRQS_OFF |
@@ -599,19 +583,17 @@ kernel_per: | |||
599 | 583 | ||
600 | .globl io_int_handler | 584 | .globl io_int_handler |
601 | io_int_handler: | 585 | io_int_handler: |
602 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 586 | stpt __LC_ASYNC_ENTER_TIMER |
603 | stck __LC_INT_CLOCK | 587 | stck __LC_INT_CLOCK |
604 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | 588 | SAVE_ALL_BASE __LC_SAVE_AREA+16 |
605 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 589 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
606 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 | 590 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 |
607 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
608 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 591 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
609 | bz BASED(io_no_vtime) | 592 | bz BASED(io_no_vtime) |
610 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 593 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
611 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 594 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
612 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 595 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
613 | io_no_vtime: | 596 | io_no_vtime: |
614 | #endif | ||
615 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 597 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
616 | TRACE_IRQS_OFF | 598 | TRACE_IRQS_OFF |
617 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ | 599 | l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ |
@@ -741,19 +723,17 @@ io_notify_resume: | |||
741 | 723 | ||
742 | .globl ext_int_handler | 724 | .globl ext_int_handler |
743 | ext_int_handler: | 725 | ext_int_handler: |
744 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 726 | stpt __LC_ASYNC_ENTER_TIMER |
745 | stck __LC_INT_CLOCK | 727 | stck __LC_INT_CLOCK |
746 | SAVE_ALL_BASE __LC_SAVE_AREA+16 | 728 | SAVE_ALL_BASE __LC_SAVE_AREA+16 |
747 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 729 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
748 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 | 730 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 |
749 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
750 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 731 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
751 | bz BASED(ext_no_vtime) | 732 | bz BASED(ext_no_vtime) |
752 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 733 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
753 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 734 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
754 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 735 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
755 | ext_no_vtime: | 736 | ext_no_vtime: |
756 | #endif | ||
757 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 737 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
758 | TRACE_IRQS_OFF | 738 | TRACE_IRQS_OFF |
759 | la %r2,SP_PTREGS(%r15) # address of register-save area | 739 | la %r2,SP_PTREGS(%r15) # address of register-save area |
@@ -776,7 +756,6 @@ mcck_int_handler: | |||
776 | la %r12,__LC_MCK_OLD_PSW | 756 | la %r12,__LC_MCK_OLD_PSW |
777 | tm __LC_MCCK_CODE,0x80 # system damage? | 757 | tm __LC_MCCK_CODE,0x80 # system damage? |
778 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid | 758 | bo BASED(mcck_int_main) # yes -> rest of mcck code invalid |
779 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
780 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER | 759 | mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER |
781 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA | 760 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA |
782 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? | 761 | tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? |
@@ -793,9 +772,7 @@ mcck_int_handler: | |||
793 | la %r14,__LC_LAST_UPDATE_TIMER | 772 | la %r14,__LC_LAST_UPDATE_TIMER |
794 | 0: spt 0(%r14) | 773 | 0: spt 0(%r14) |
795 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | 774 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) |
796 | 1: | 775 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
797 | #endif | ||
798 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | ||
799 | bno BASED(mcck_int_main) # no -> skip cleanup critical | 776 | bno BASED(mcck_int_main) # no -> skip cleanup critical |
800 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 777 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
801 | bnz BASED(mcck_int_main) # from user -> load async stack | 778 | bnz BASED(mcck_int_main) # from user -> load async stack |
@@ -812,7 +789,6 @@ mcck_int_main: | |||
812 | be BASED(0f) | 789 | be BASED(0f) |
813 | l %r15,__LC_PANIC_STACK # load panic stack | 790 | l %r15,__LC_PANIC_STACK # load panic stack |
814 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 | 791 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32 |
815 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
816 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 792 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
817 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical | 793 | bno BASED(mcck_no_vtime) # no -> skip cleanup critical |
818 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 794 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -821,7 +797,6 @@ mcck_int_main: | |||
821 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 797 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
822 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 798 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
823 | mcck_no_vtime: | 799 | mcck_no_vtime: |
824 | #endif | ||
825 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 800 | l %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
826 | la %r2,SP_PTREGS(%r15) # load pt_regs | 801 | la %r2,SP_PTREGS(%r15) # load pt_regs |
827 | l %r1,BASED(.Ls390_mcck) | 802 | l %r1,BASED(.Ls390_mcck) |
@@ -843,16 +818,13 @@ mcck_no_vtime: | |||
843 | mcck_return: | 818 | mcck_return: |
844 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW | 819 | mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW |
845 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 820 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
846 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
847 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 | 821 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52 |
848 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 822 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
849 | bno BASED(0f) | 823 | bno BASED(0f) |
850 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 824 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
851 | stpt __LC_EXIT_TIMER | 825 | stpt __LC_EXIT_TIMER |
852 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 826 | lpsw __LC_RETURN_MCCK_PSW # back to caller |
853 | 0: | 827 | 0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
854 | #endif | ||
855 | lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 | ||
856 | lpsw __LC_RETURN_MCCK_PSW # back to caller | 828 | lpsw __LC_RETURN_MCCK_PSW # back to caller |
857 | 829 | ||
858 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 | 830 | RESTORE_ALL __LC_RETURN_MCCK_PSW,0 |
@@ -976,13 +948,11 @@ cleanup_system_call: | |||
976 | b BASED(1f) | 948 | b BASED(1f) |
977 | 0: la %r12,__LC_SAVE_AREA+32 | 949 | 0: la %r12,__LC_SAVE_AREA+32 |
978 | 1: | 950 | 1: |
979 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
980 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) | 951 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) |
981 | bh BASED(0f) | 952 | bh BASED(0f) |
982 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 953 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
983 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) | 954 | 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) |
984 | bhe BASED(cleanup_vtime) | 955 | bhe BASED(cleanup_vtime) |
985 | #endif | ||
986 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) | 956 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) |
987 | bh BASED(0f) | 957 | bh BASED(0f) |
988 | mvc __LC_SAVE_AREA(16),0(%r12) | 958 | mvc __LC_SAVE_AREA(16),0(%r12) |
@@ -993,7 +963,6 @@ cleanup_system_call: | |||
993 | l %r12,__LC_SAVE_AREA+48 # argh | 963 | l %r12,__LC_SAVE_AREA+48 # argh |
994 | st %r15,12(%r12) | 964 | st %r15,12(%r12) |
995 | lh %r7,0x8a | 965 | lh %r7,0x8a |
996 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
997 | cleanup_vtime: | 966 | cleanup_vtime: |
998 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) | 967 | clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) |
999 | bhe BASED(cleanup_stime) | 968 | bhe BASED(cleanup_stime) |
@@ -1004,18 +973,15 @@ cleanup_stime: | |||
1004 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 973 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
1005 | cleanup_update: | 974 | cleanup_update: |
1006 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 975 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
1007 | #endif | ||
1008 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) | 976 | mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) |
1009 | la %r12,__LC_RETURN_PSW | 977 | la %r12,__LC_RETURN_PSW |
1010 | br %r14 | 978 | br %r14 |
1011 | cleanup_system_call_insn: | 979 | cleanup_system_call_insn: |
1012 | .long sysc_saveall + 0x80000000 | 980 | .long sysc_saveall + 0x80000000 |
1013 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1014 | .long system_call + 0x80000000 | 981 | .long system_call + 0x80000000 |
1015 | .long sysc_vtime + 0x80000000 | 982 | .long sysc_vtime + 0x80000000 |
1016 | .long sysc_stime + 0x80000000 | 983 | .long sysc_stime + 0x80000000 |
1017 | .long sysc_update + 0x80000000 | 984 | .long sysc_update + 0x80000000 |
1018 | #endif | ||
1019 | 985 | ||
1020 | cleanup_sysc_return: | 986 | cleanup_sysc_return: |
1021 | mvc __LC_RETURN_PSW(4),0(%r12) | 987 | mvc __LC_RETURN_PSW(4),0(%r12) |
@@ -1026,11 +992,9 @@ cleanup_sysc_return: | |||
1026 | cleanup_sysc_leave: | 992 | cleanup_sysc_leave: |
1027 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) | 993 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) |
1028 | be BASED(2f) | 994 | be BASED(2f) |
1029 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1030 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 995 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1031 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) | 996 | clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) |
1032 | be BASED(2f) | 997 | be BASED(2f) |
1033 | #endif | ||
1034 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 998 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) |
1035 | c %r12,BASED(.Lmck_old_psw) | 999 | c %r12,BASED(.Lmck_old_psw) |
1036 | bne BASED(0f) | 1000 | bne BASED(0f) |
@@ -1043,9 +1007,7 @@ cleanup_sysc_leave: | |||
1043 | br %r14 | 1007 | br %r14 |
1044 | cleanup_sysc_leave_insn: | 1008 | cleanup_sysc_leave_insn: |
1045 | .long sysc_done - 4 + 0x80000000 | 1009 | .long sysc_done - 4 + 0x80000000 |
1046 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1047 | .long sysc_done - 8 + 0x80000000 | 1010 | .long sysc_done - 8 + 0x80000000 |
1048 | #endif | ||
1049 | 1011 | ||
1050 | cleanup_io_return: | 1012 | cleanup_io_return: |
1051 | mvc __LC_RETURN_PSW(4),0(%r12) | 1013 | mvc __LC_RETURN_PSW(4),0(%r12) |
@@ -1056,11 +1018,9 @@ cleanup_io_return: | |||
1056 | cleanup_io_leave: | 1018 | cleanup_io_leave: |
1057 | clc 4(4,%r12),BASED(cleanup_io_leave_insn) | 1019 | clc 4(4,%r12),BASED(cleanup_io_leave_insn) |
1058 | be BASED(2f) | 1020 | be BASED(2f) |
1059 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1060 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 1021 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1061 | clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) | 1022 | clc 4(4,%r12),BASED(cleanup_io_leave_insn+4) |
1062 | be BASED(2f) | 1023 | be BASED(2f) |
1063 | #endif | ||
1064 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) | 1024 | mvc __LC_RETURN_PSW(8),SP_PSW(%r15) |
1065 | c %r12,BASED(.Lmck_old_psw) | 1025 | c %r12,BASED(.Lmck_old_psw) |
1066 | bne BASED(0f) | 1026 | bne BASED(0f) |
@@ -1073,9 +1033,7 @@ cleanup_io_leave: | |||
1073 | br %r14 | 1033 | br %r14 |
1074 | cleanup_io_leave_insn: | 1034 | cleanup_io_leave_insn: |
1075 | .long io_done - 4 + 0x80000000 | 1035 | .long io_done - 4 + 0x80000000 |
1076 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1077 | .long io_done - 8 + 0x80000000 | 1036 | .long io_done - 8 + 0x80000000 |
1078 | #endif | ||
1079 | 1037 | ||
1080 | /* | 1038 | /* |
1081 | * Integer constants | 1039 | * Integer constants |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 89c121ae6339..16bb4fd1a403 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -96,20 +96,12 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
96 | #define LOCKDEP_SYS_EXIT | 96 | #define LOCKDEP_SYS_EXIT |
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | .macro STORE_TIMER lc_offset | ||
100 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
101 | stpt \lc_offset | ||
102 | #endif | ||
103 | .endm | ||
104 | |||
105 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
106 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum | 99 | .macro UPDATE_VTIME lc_from,lc_to,lc_sum |
107 | lg %r10,\lc_from | 100 | lg %r10,\lc_from |
108 | slg %r10,\lc_to | 101 | slg %r10,\lc_to |
109 | alg %r10,\lc_sum | 102 | alg %r10,\lc_sum |
110 | stg %r10,\lc_sum | 103 | stg %r10,\lc_sum |
111 | .endm | 104 | .endm |
112 | #endif | ||
113 | 105 | ||
114 | /* | 106 | /* |
115 | * Register usage in interrupt handlers: | 107 | * Register usage in interrupt handlers: |
@@ -186,7 +178,7 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ | |||
186 | ni \psworg+1,0xfd # clear wait state bit | 178 | ni \psworg+1,0xfd # clear wait state bit |
187 | .endif | 179 | .endif |
188 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user | 180 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user |
189 | STORE_TIMER __LC_EXIT_TIMER | 181 | stpt __LC_EXIT_TIMER |
190 | lpswe \psworg # back to caller | 182 | lpswe \psworg # back to caller |
191 | .endm | 183 | .endm |
192 | 184 | ||
@@ -233,20 +225,18 @@ __critical_start: | |||
233 | 225 | ||
234 | .globl system_call | 226 | .globl system_call |
235 | system_call: | 227 | system_call: |
236 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 228 | stpt __LC_SYNC_ENTER_TIMER |
237 | sysc_saveall: | 229 | sysc_saveall: |
238 | SAVE_ALL_BASE __LC_SAVE_AREA | 230 | SAVE_ALL_BASE __LC_SAVE_AREA |
239 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 231 | SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
240 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 232 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
241 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 233 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
242 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
243 | sysc_vtime: | 234 | sysc_vtime: |
244 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 235 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
245 | sysc_stime: | 236 | sysc_stime: |
246 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 237 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
247 | sysc_update: | 238 | sysc_update: |
248 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 239 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
249 | #endif | ||
250 | sysc_do_svc: | 240 | sysc_do_svc: |
251 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 241 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
252 | ltgr %r7,%r7 # test for svc 0 | 242 | ltgr %r7,%r7 # test for svc 0 |
@@ -417,7 +407,7 @@ ret_from_fork: | |||
417 | 0: brasl %r14,schedule_tail | 407 | 0: brasl %r14,schedule_tail |
418 | TRACE_IRQS_ON | 408 | TRACE_IRQS_ON |
419 | stosm 24(%r15),0x03 # reenable interrupts | 409 | stosm 24(%r15),0x03 # reenable interrupts |
420 | j sysc_return | 410 | j sysc_tracenogo |
421 | 411 | ||
422 | # | 412 | # |
423 | # kernel_execve function needs to deal with pt_regs that is not | 413 | # kernel_execve function needs to deal with pt_regs that is not |
@@ -469,20 +459,18 @@ pgm_check_handler: | |||
469 | * we just ignore the PER event (FIXME: is there anything we have to do | 459 | * we just ignore the PER event (FIXME: is there anything we have to do |
470 | * for LPSW?). | 460 | * for LPSW?). |
471 | */ | 461 | */ |
472 | STORE_TIMER __LC_SYNC_ENTER_TIMER | 462 | stpt __LC_SYNC_ENTER_TIMER |
473 | SAVE_ALL_BASE __LC_SAVE_AREA | 463 | SAVE_ALL_BASE __LC_SAVE_AREA |
474 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception | 464 | tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception |
475 | jnz pgm_per # got per exception -> special case | 465 | jnz pgm_per # got per exception -> special case |
476 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 466 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
477 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 467 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
478 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
479 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 468 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
480 | jz pgm_no_vtime | 469 | jz pgm_no_vtime |
481 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 470 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
482 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 471 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
483 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 472 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
484 | pgm_no_vtime: | 473 | pgm_no_vtime: |
485 | #endif | ||
486 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 474 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
487 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | 475 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK |
488 | TRACE_IRQS_OFF | 476 | TRACE_IRQS_OFF |
@@ -516,14 +504,12 @@ pgm_per: | |||
516 | pgm_per_std: | 504 | pgm_per_std: |
517 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 505 | SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
518 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA | 506 | CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA |
519 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
520 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 507 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
521 | jz pgm_no_vtime2 | 508 | jz pgm_no_vtime2 |
522 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 509 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
523 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 510 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
524 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 511 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
525 | pgm_no_vtime2: | 512 | pgm_no_vtime2: |
526 | #endif | ||
527 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 513 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
528 | TRACE_IRQS_OFF | 514 | TRACE_IRQS_OFF |
529 | lg %r1,__TI_task(%r9) | 515 | lg %r1,__TI_task(%r9) |
@@ -545,11 +531,9 @@ pgm_no_vtime2: | |||
545 | pgm_svcper: | 531 | pgm_svcper: |
546 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 532 | SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
547 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA | 533 | CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA |
548 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
549 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER | 534 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER |
550 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 535 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
551 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 536 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
552 | #endif | ||
553 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore | 537 | llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore |
554 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 538 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
555 | lg %r1,__TI_task(%r9) | 539 | lg %r1,__TI_task(%r9) |
@@ -575,19 +559,17 @@ kernel_per: | |||
575 | */ | 559 | */ |
576 | .globl io_int_handler | 560 | .globl io_int_handler |
577 | io_int_handler: | 561 | io_int_handler: |
578 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 562 | stpt __LC_ASYNC_ENTER_TIMER |
579 | stck __LC_INT_CLOCK | 563 | stck __LC_INT_CLOCK |
580 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 564 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
581 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 565 | SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
582 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 | 566 | CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 |
583 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
584 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 567 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
585 | jz io_no_vtime | 568 | jz io_no_vtime |
586 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 569 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
587 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 570 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
588 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 571 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
589 | io_no_vtime: | 572 | io_no_vtime: |
590 | #endif | ||
591 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 573 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
592 | TRACE_IRQS_OFF | 574 | TRACE_IRQS_OFF |
593 | la %r2,SP_PTREGS(%r15) # address of register-save area | 575 | la %r2,SP_PTREGS(%r15) # address of register-save area |
@@ -739,19 +721,17 @@ io_notify_resume: | |||
739 | */ | 721 | */ |
740 | .globl ext_int_handler | 722 | .globl ext_int_handler |
741 | ext_int_handler: | 723 | ext_int_handler: |
742 | STORE_TIMER __LC_ASYNC_ENTER_TIMER | 724 | stpt __LC_ASYNC_ENTER_TIMER |
743 | stck __LC_INT_CLOCK | 725 | stck __LC_INT_CLOCK |
744 | SAVE_ALL_BASE __LC_SAVE_AREA+32 | 726 | SAVE_ALL_BASE __LC_SAVE_AREA+32 |
745 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 727 | SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
746 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 | 728 | CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 |
747 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
748 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 729 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
749 | jz ext_no_vtime | 730 | jz ext_no_vtime |
750 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER | 731 | UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER |
751 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 732 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
752 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 733 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
753 | ext_no_vtime: | 734 | ext_no_vtime: |
754 | #endif | ||
755 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 735 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
756 | TRACE_IRQS_OFF | 736 | TRACE_IRQS_OFF |
757 | la %r2,SP_PTREGS(%r15) # address of register-save area | 737 | la %r2,SP_PTREGS(%r15) # address of register-save area |
@@ -773,7 +753,6 @@ mcck_int_handler: | |||
773 | la %r12,__LC_MCK_OLD_PSW | 753 | la %r12,__LC_MCK_OLD_PSW |
774 | tm __LC_MCCK_CODE,0x80 # system damage? | 754 | tm __LC_MCCK_CODE,0x80 # system damage? |
775 | jo mcck_int_main # yes -> rest of mcck code invalid | 755 | jo mcck_int_main # yes -> rest of mcck code invalid |
776 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
777 | la %r14,4095 | 756 | la %r14,4095 |
778 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER | 757 | mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER |
779 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) | 758 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) |
@@ -791,9 +770,7 @@ mcck_int_handler: | |||
791 | la %r14,__LC_LAST_UPDATE_TIMER | 770 | la %r14,__LC_LAST_UPDATE_TIMER |
792 | 0: spt 0(%r14) | 771 | 0: spt 0(%r14) |
793 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) | 772 | mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) |
794 | 1: | 773 | 1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? |
795 | #endif | ||
796 | tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? | ||
797 | jno mcck_int_main # no -> skip cleanup critical | 774 | jno mcck_int_main # no -> skip cleanup critical |
798 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit | 775 | tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit |
799 | jnz mcck_int_main # from user -> load kernel stack | 776 | jnz mcck_int_main # from user -> load kernel stack |
@@ -809,7 +786,6 @@ mcck_int_main: | |||
809 | jz 0f | 786 | jz 0f |
810 | lg %r15,__LC_PANIC_STACK # load panic stack | 787 | lg %r15,__LC_PANIC_STACK # load panic stack |
811 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 | 788 | 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 |
812 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
813 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? | 789 | tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? |
814 | jno mcck_no_vtime # no -> no timer update | 790 | jno mcck_no_vtime # no -> no timer update |
815 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? | 791 | tm SP_PSW+1(%r15),0x01 # interrupting from user ? |
@@ -818,7 +794,6 @@ mcck_int_main: | |||
818 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 794 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
819 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER | 795 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER |
820 | mcck_no_vtime: | 796 | mcck_no_vtime: |
821 | #endif | ||
822 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 797 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
823 | la %r2,SP_PTREGS(%r15) # load pt_regs | 798 | la %r2,SP_PTREGS(%r15) # load pt_regs |
824 | brasl %r14,s390_do_machine_check | 799 | brasl %r14,s390_do_machine_check |
@@ -839,14 +814,11 @@ mcck_return: | |||
839 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW | 814 | mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW |
840 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit | 815 | ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit |
841 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 | 816 | lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 |
842 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
843 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 | 817 | mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104 |
844 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? | 818 | tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? |
845 | jno 0f | 819 | jno 0f |
846 | stpt __LC_EXIT_TIMER | 820 | stpt __LC_EXIT_TIMER |
847 | 0: | 821 | 0: lpswe __LC_RETURN_MCCK_PSW # back to caller |
848 | #endif | ||
849 | lpswe __LC_RETURN_MCCK_PSW # back to caller | ||
850 | 822 | ||
851 | /* | 823 | /* |
852 | * Restart interruption handler, kick starter for additional CPUs | 824 | * Restart interruption handler, kick starter for additional CPUs |
@@ -964,13 +936,11 @@ cleanup_system_call: | |||
964 | j 1f | 936 | j 1f |
965 | 0: la %r12,__LC_SAVE_AREA+64 | 937 | 0: la %r12,__LC_SAVE_AREA+64 |
966 | 1: | 938 | 1: |
967 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
968 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) | 939 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) |
969 | jh 0f | 940 | jh 0f |
970 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER | 941 | mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER |
971 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) | 942 | 0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) |
972 | jhe cleanup_vtime | 943 | jhe cleanup_vtime |
973 | #endif | ||
974 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) | 944 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) |
975 | jh 0f | 945 | jh 0f |
976 | mvc __LC_SAVE_AREA(32),0(%r12) | 946 | mvc __LC_SAVE_AREA(32),0(%r12) |
@@ -981,7 +951,6 @@ cleanup_system_call: | |||
981 | lg %r12,__LC_SAVE_AREA+96 # argh | 951 | lg %r12,__LC_SAVE_AREA+96 # argh |
982 | stg %r15,24(%r12) | 952 | stg %r15,24(%r12) |
983 | llgh %r7,__LC_SVC_INT_CODE | 953 | llgh %r7,__LC_SVC_INT_CODE |
984 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
985 | cleanup_vtime: | 954 | cleanup_vtime: |
986 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) | 955 | clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) |
987 | jhe cleanup_stime | 956 | jhe cleanup_stime |
@@ -992,18 +961,15 @@ cleanup_stime: | |||
992 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER | 961 | UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER |
993 | cleanup_update: | 962 | cleanup_update: |
994 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER | 963 | mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER |
995 | #endif | ||
996 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) | 964 | mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) |
997 | la %r12,__LC_RETURN_PSW | 965 | la %r12,__LC_RETURN_PSW |
998 | br %r14 | 966 | br %r14 |
999 | cleanup_system_call_insn: | 967 | cleanup_system_call_insn: |
1000 | .quad sysc_saveall | 968 | .quad sysc_saveall |
1001 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1002 | .quad system_call | 969 | .quad system_call |
1003 | .quad sysc_vtime | 970 | .quad sysc_vtime |
1004 | .quad sysc_stime | 971 | .quad sysc_stime |
1005 | .quad sysc_update | 972 | .quad sysc_update |
1006 | #endif | ||
1007 | 973 | ||
1008 | cleanup_sysc_return: | 974 | cleanup_sysc_return: |
1009 | mvc __LC_RETURN_PSW(8),0(%r12) | 975 | mvc __LC_RETURN_PSW(8),0(%r12) |
@@ -1014,11 +980,9 @@ cleanup_sysc_return: | |||
1014 | cleanup_sysc_leave: | 980 | cleanup_sysc_leave: |
1015 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) | 981 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) |
1016 | je 2f | 982 | je 2f |
1017 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1018 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 983 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1019 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) | 984 | clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) |
1020 | je 2f | 985 | je 2f |
1021 | #endif | ||
1022 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 986 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) |
1023 | cghi %r12,__LC_MCK_OLD_PSW | 987 | cghi %r12,__LC_MCK_OLD_PSW |
1024 | jne 0f | 988 | jne 0f |
@@ -1031,9 +995,7 @@ cleanup_sysc_leave: | |||
1031 | br %r14 | 995 | br %r14 |
1032 | cleanup_sysc_leave_insn: | 996 | cleanup_sysc_leave_insn: |
1033 | .quad sysc_done - 4 | 997 | .quad sysc_done - 4 |
1034 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1035 | .quad sysc_done - 8 | 998 | .quad sysc_done - 8 |
1036 | #endif | ||
1037 | 999 | ||
1038 | cleanup_io_return: | 1000 | cleanup_io_return: |
1039 | mvc __LC_RETURN_PSW(8),0(%r12) | 1001 | mvc __LC_RETURN_PSW(8),0(%r12) |
@@ -1044,11 +1006,9 @@ cleanup_io_return: | |||
1044 | cleanup_io_leave: | 1006 | cleanup_io_leave: |
1045 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) | 1007 | clc 8(8,%r12),BASED(cleanup_io_leave_insn) |
1046 | je 2f | 1008 | je 2f |
1047 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1048 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER | 1009 | mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1049 | clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) | 1010 | clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) |
1050 | je 2f | 1011 | je 2f |
1051 | #endif | ||
1052 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) | 1012 | mvc __LC_RETURN_PSW(16),SP_PSW(%r15) |
1053 | cghi %r12,__LC_MCK_OLD_PSW | 1013 | cghi %r12,__LC_MCK_OLD_PSW |
1054 | jne 0f | 1014 | jne 0f |
@@ -1061,9 +1021,7 @@ cleanup_io_leave: | |||
1061 | br %r14 | 1021 | br %r14 |
1062 | cleanup_io_leave_insn: | 1022 | cleanup_io_leave_insn: |
1063 | .quad io_done - 4 | 1023 | .quad io_done - 4 |
1064 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
1065 | .quad io_done - 8 | 1024 | .quad io_done - 8 |
1066 | #endif | ||
1067 | 1025 | ||
1068 | /* | 1026 | /* |
1069 | * Integer constants | 1027 | * Integer constants |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 83477c7dc743..ec7e35f6055b 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -461,6 +461,55 @@ start: | |||
461 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 | 461 | .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 |
462 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff | 462 | .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff |
463 | 463 | ||
464 | # | ||
465 | # startup-code at 0x10000, running in absolute addressing mode | ||
466 | # this is called either by the ipl loader or directly by PSW restart | ||
467 | # or linload or SALIPL | ||
468 | # | ||
469 | .org 0x10000 | ||
470 | startup:basr %r13,0 # get base | ||
471 | .LPG0: | ||
472 | |||
473 | #ifndef CONFIG_MARCH_G5 | ||
474 | # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} | ||
475 | stidp __LC_CPUID # store cpuid | ||
476 | lhi %r0,(3f-2f) / 2 | ||
477 | la %r1,2f-.LPG0(%r13) | ||
478 | 0: clc __LC_CPUID+4(2),0(%r1) | ||
479 | jne 3f | ||
480 | lpsw 1f-.LPG0(13) # machine type not good enough, crash | ||
481 | .align 16 | ||
482 | 1: .long 0x000a0000,0x00000000 | ||
483 | 2: | ||
484 | #if defined(CONFIG_MARCH_Z10) | ||
485 | .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086, 0x2094, 0x2096 | ||
486 | #elif defined(CONFIG_MARCH_Z9_109) | ||
487 | .short 0x9672, 0x2064, 0x2066, 0x2084, 0x2086 | ||
488 | #elif defined(CONFIG_MARCH_Z990) | ||
489 | .short 0x9672, 0x2064, 0x2066 | ||
490 | #elif defined(CONFIG_MARCH_Z900) | ||
491 | .short 0x9672 | ||
492 | #endif | ||
493 | 3: la %r1,2(%r1) | ||
494 | brct %r0,0b | ||
495 | #endif | ||
496 | |||
497 | l %r13,0f-.LPG0(%r13) | ||
498 | b 0(%r13) | ||
499 | 0: .long startup_continue | ||
500 | |||
501 | # | ||
502 | # params at 10400 (setup.h) | ||
503 | # | ||
504 | .org PARMAREA | ||
505 | .long 0,0 # IPL_DEVICE | ||
506 | .long 0,0 # INITRD_START | ||
507 | .long 0,0 # INITRD_SIZE | ||
508 | |||
509 | .org COMMAND_LINE | ||
510 | .byte "root=/dev/ram0 ro" | ||
511 | .byte 0 | ||
512 | |||
464 | #ifdef CONFIG_64BIT | 513 | #ifdef CONFIG_64BIT |
465 | #include "head64.S" | 514 | #include "head64.S" |
466 | #else | 515 | #else |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index a816e2de32b9..db476d114caa 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -10,34 +10,13 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | # | ||
14 | # startup-code at 0x10000, running in absolute addressing mode | ||
15 | # this is called either by the ipl loader or directly by PSW restart | ||
16 | # or linload or SALIPL | ||
17 | # | ||
18 | .org 0x10000 | ||
19 | startup:basr %r13,0 # get base | ||
20 | .LPG0: l %r13,0f-.LPG0(%r13) | ||
21 | b 0(%r13) | ||
22 | 0: .long startup_continue | ||
23 | |||
24 | # | ||
25 | # params at 10400 (setup.h) | ||
26 | # | ||
27 | .org PARMAREA | ||
28 | .long 0,0 # IPL_DEVICE | ||
29 | .long 0,0 # INITRD_START | ||
30 | .long 0,0 # INITRD_SIZE | ||
31 | |||
32 | .org COMMAND_LINE | ||
33 | .byte "root=/dev/ram0 ro" | ||
34 | .byte 0 | ||
35 | |||
36 | .org 0x11000 | 13 | .org 0x11000 |
37 | 14 | ||
38 | startup_continue: | 15 | startup_continue: |
39 | basr %r13,0 # get base | 16 | basr %r13,0 # get base |
40 | .LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | 17 | .LPG1: |
18 | |||
19 | mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0) | ||
41 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers | 20 | lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers |
42 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area | 21 | l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area |
43 | # move IPL device to lowcore | 22 | # move IPL device to lowcore |
@@ -50,7 +29,6 @@ startup_continue: | |||
50 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE | 29 | ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE |
51 | st %r15,__LC_KERNEL_STACK # set end of kernel stack | 30 | st %r15,__LC_KERNEL_STACK # set end of kernel stack |
52 | ahi %r15,-96 | 31 | ahi %r15,-96 |
53 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | ||
54 | # | 32 | # |
55 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, | 33 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
56 | # and create a kernel NSS if the SAVESYS= parm is defined | 34 | # and create a kernel NSS if the SAVESYS= parm is defined |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 1d06961e87b3..3ccd36b24b8f 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -10,29 +10,6 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | # | ||
14 | # startup-code at 0x10000, running in absolute addressing mode | ||
15 | # this is called either by the ipl loader or directly by PSW restart | ||
16 | # or linload or SALIPL | ||
17 | # | ||
18 | .org 0x10000 | ||
19 | startup:basr %r13,0 # get base | ||
20 | .LPG0: l %r13,0f-.LPG0(%r13) | ||
21 | b 0(%r13) | ||
22 | 0: .long startup_continue | ||
23 | |||
24 | # | ||
25 | # params at 10400 (setup.h) | ||
26 | # | ||
27 | .org PARMAREA | ||
28 | .quad 0 # IPL_DEVICE | ||
29 | .quad 0 # INITRD_START | ||
30 | .quad 0 # INITRD_SIZE | ||
31 | |||
32 | .org COMMAND_LINE | ||
33 | .byte "root=/dev/ram0 ro" | ||
34 | .byte 0 | ||
35 | |||
36 | .org 0x11000 | 13 | .org 0x11000 |
37 | 14 | ||
38 | startup_continue: | 15 | startup_continue: |
@@ -119,7 +96,6 @@ startup_continue: | |||
119 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE | 96 | aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE |
120 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack | 97 | stg %r15,__LC_KERNEL_STACK # set end of kernel stack |
121 | aghi %r15,-160 | 98 | aghi %r15,-160 |
122 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain | ||
123 | # | 99 | # |
124 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, | 100 | # Save ipl parameters, clear bss memory, initialize storage key for kernel pages, |
125 | # and create a kernel NSS if the SAVESYS= parm is defined | 101 | # and create a kernel NSS if the SAVESYS= parm is defined |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S new file mode 100644 index 000000000000..397d131a345f --- /dev/null +++ b/arch/s390/kernel/mcount.S | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2008 | ||
3 | * | ||
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #ifndef CONFIG_64BIT | ||
9 | .globl _mcount | ||
10 | _mcount: | ||
11 | stm %r0,%r5,8(%r15) | ||
12 | st %r14,56(%r15) | ||
13 | lr %r1,%r15 | ||
14 | ahi %r15,-96 | ||
15 | l %r3,100(%r15) | ||
16 | la %r2,0(%r14) | ||
17 | st %r1,0(%r15) | ||
18 | la %r3,0(%r3) | ||
19 | bras %r14,0f | ||
20 | .long ftrace_trace_function | ||
21 | 0: l %r14,0(%r14) | ||
22 | l %r14,0(%r14) | ||
23 | basr %r14,%r14 | ||
24 | ahi %r15,96 | ||
25 | lm %r0,%r5,8(%r15) | ||
26 | l %r14,56(%r15) | ||
27 | br %r14 | ||
28 | |||
29 | .globl ftrace_stub | ||
30 | ftrace_stub: | ||
31 | br %r14 | ||
32 | |||
33 | #else /* CONFIG_64BIT */ | ||
34 | |||
35 | .globl _mcount | ||
36 | _mcount: | ||
37 | stmg %r0,%r5,16(%r15) | ||
38 | stg %r14,112(%r15) | ||
39 | lgr %r1,%r15 | ||
40 | aghi %r15,-160 | ||
41 | stg %r1,0(%r15) | ||
42 | lgr %r2,%r14 | ||
43 | lg %r3,168(%r15) | ||
44 | larl %r14,ftrace_trace_function | ||
45 | lg %r14,0(%r14) | ||
46 | basr %r14,%r14 | ||
47 | aghi %r15,160 | ||
48 | lmg %r0,%r5,16(%r15) | ||
49 | lg %r14,112(%r15) | ||
50 | br %r14 | ||
51 | |||
52 | .globl ftrace_stub | ||
53 | ftrace_stub: | ||
54 | br %r14 | ||
55 | |||
56 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c new file mode 100644 index 000000000000..82c1872cfe80 --- /dev/null +++ b/arch/s390/kernel/processor.c | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/processor.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
6 | */ | ||
7 | |||
8 | #define KMSG_COMPONENT "cpu" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/seq_file.h> | ||
15 | #include <linux/delay.h> | ||
16 | |||
17 | #include <asm/elf.h> | ||
18 | #include <asm/lowcore.h> | ||
19 | #include <asm/param.h> | ||
20 | |||
21 | void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) | ||
22 | { | ||
23 | pr_info("Processor %d started, address %d, identification %06X\n", | ||
24 | cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident); | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * show_cpuinfo - Get information on one CPU for use by procfs. | ||
29 | */ | ||
30 | |||
31 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
32 | { | ||
33 | static const char *hwcap_str[8] = { | ||
34 | "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", | ||
35 | "edat" | ||
36 | }; | ||
37 | struct cpuinfo_S390 *cpuinfo; | ||
38 | unsigned long n = (unsigned long) v - 1; | ||
39 | int i; | ||
40 | |||
41 | s390_adjust_jiffies(); | ||
42 | preempt_disable(); | ||
43 | if (!n) { | ||
44 | seq_printf(m, "vendor_id : IBM/S390\n" | ||
45 | "# processors : %i\n" | ||
46 | "bogomips per cpu: %lu.%02lu\n", | ||
47 | num_online_cpus(), loops_per_jiffy/(500000/HZ), | ||
48 | (loops_per_jiffy/(5000/HZ))%100); | ||
49 | seq_puts(m, "features\t: "); | ||
50 | for (i = 0; i < 8; i++) | ||
51 | if (hwcap_str[i] && (elf_hwcap & (1UL << i))) | ||
52 | seq_printf(m, "%s ", hwcap_str[i]); | ||
53 | seq_puts(m, "\n"); | ||
54 | } | ||
55 | |||
56 | if (cpu_online(n)) { | ||
57 | #ifdef CONFIG_SMP | ||
58 | if (smp_processor_id() == n) | ||
59 | cpuinfo = &S390_lowcore.cpu_data; | ||
60 | else | ||
61 | cpuinfo = &lowcore_ptr[n]->cpu_data; | ||
62 | #else | ||
63 | cpuinfo = &S390_lowcore.cpu_data; | ||
64 | #endif | ||
65 | seq_printf(m, "processor %li: " | ||
66 | "version = %02X, " | ||
67 | "identification = %06X, " | ||
68 | "machine = %04X\n", | ||
69 | n, cpuinfo->cpu_id.version, | ||
70 | cpuinfo->cpu_id.ident, | ||
71 | cpuinfo->cpu_id.machine); | ||
72 | } | ||
73 | preempt_enable(); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
78 | { | ||
79 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | ||
80 | } | ||
81 | |||
82 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
83 | { | ||
84 | ++*pos; | ||
85 | return c_start(m, pos); | ||
86 | } | ||
87 | |||
88 | static void c_stop(struct seq_file *m, void *v) | ||
89 | { | ||
90 | } | ||
91 | |||
92 | const struct seq_operations cpuinfo_op = { | ||
93 | .start = c_start, | ||
94 | .next = c_next, | ||
95 | .stop = c_stop, | ||
96 | .show = show_cpuinfo, | ||
97 | }; | ||
98 | |||
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 38ff2bce1203..75c496f4f16d 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -204,7 +204,6 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) | |||
204 | static int | 204 | static int |
205 | peek_user(struct task_struct *child, addr_t addr, addr_t data) | 205 | peek_user(struct task_struct *child, addr_t addr, addr_t data) |
206 | { | 206 | { |
207 | struct user *dummy = NULL; | ||
208 | addr_t tmp, mask; | 207 | addr_t tmp, mask; |
209 | 208 | ||
210 | /* | 209 | /* |
@@ -213,8 +212,8 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data) | |||
213 | */ | 212 | */ |
214 | mask = __ADDR_MASK; | 213 | mask = __ADDR_MASK; |
215 | #ifdef CONFIG_64BIT | 214 | #ifdef CONFIG_64BIT |
216 | if (addr >= (addr_t) &dummy->regs.acrs && | 215 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
217 | addr < (addr_t) &dummy->regs.orig_gpr2) | 216 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
218 | mask = 3; | 217 | mask = 3; |
219 | #endif | 218 | #endif |
220 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 219 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
@@ -312,7 +311,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
312 | static int | 311 | static int |
313 | poke_user(struct task_struct *child, addr_t addr, addr_t data) | 312 | poke_user(struct task_struct *child, addr_t addr, addr_t data) |
314 | { | 313 | { |
315 | struct user *dummy = NULL; | ||
316 | addr_t mask; | 314 | addr_t mask; |
317 | 315 | ||
318 | /* | 316 | /* |
@@ -321,8 +319,8 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
321 | */ | 319 | */ |
322 | mask = __ADDR_MASK; | 320 | mask = __ADDR_MASK; |
323 | #ifdef CONFIG_64BIT | 321 | #ifdef CONFIG_64BIT |
324 | if (addr >= (addr_t) &dummy->regs.acrs && | 322 | if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs && |
325 | addr < (addr_t) &dummy->regs.orig_gpr2) | 323 | addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2) |
326 | mask = 3; | 324 | mask = 3; |
327 | #endif | 325 | #endif |
328 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) | 326 | if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) |
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 48238a114ce9..46b90cb03707 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/delay.h> | 14 | #include <asm/delay.h> |
15 | #include <asm/pgalloc.h> | 15 | #include <asm/pgalloc.h> |
16 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
17 | #include <asm/ftrace.h> | ||
17 | #ifdef CONFIG_IP_MULTICAST | 18 | #ifdef CONFIG_IP_MULTICAST |
18 | #include <net/arp.h> | 19 | #include <net/arp.h> |
19 | #endif | 20 | #endif |
@@ -43,3 +44,7 @@ EXPORT_SYMBOL(csum_fold); | |||
43 | EXPORT_SYMBOL(console_mode); | 44 | EXPORT_SYMBOL(console_mode); |
44 | EXPORT_SYMBOL(console_devno); | 45 | EXPORT_SYMBOL(console_devno); |
45 | EXPORT_SYMBOL(console_irq); | 46 | EXPORT_SYMBOL(console_irq); |
47 | |||
48 | #ifdef CONFIG_FUNCTION_TRACER | ||
49 | EXPORT_SYMBOL(_mcount); | ||
50 | #endif | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 400b040df7fa..b7a1efd5522c 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -14,6 +14,9 @@ | |||
14 | * This file handles the architecture-dependent parts of initialization | 14 | * This file handles the architecture-dependent parts of initialization |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define KMSG_COMPONENT "setup" | ||
18 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
19 | |||
17 | #include <linux/errno.h> | 20 | #include <linux/errno.h> |
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
@@ -32,7 +35,6 @@ | |||
32 | #include <linux/bootmem.h> | 35 | #include <linux/bootmem.h> |
33 | #include <linux/root_dev.h> | 36 | #include <linux/root_dev.h> |
34 | #include <linux/console.h> | 37 | #include <linux/console.h> |
35 | #include <linux/seq_file.h> | ||
36 | #include <linux/kernel_stat.h> | 38 | #include <linux/kernel_stat.h> |
37 | #include <linux/device.h> | 39 | #include <linux/device.h> |
38 | #include <linux/notifier.h> | 40 | #include <linux/notifier.h> |
@@ -291,8 +293,8 @@ unsigned int switch_amode = 0; | |||
291 | #endif | 293 | #endif |
292 | EXPORT_SYMBOL_GPL(switch_amode); | 294 | EXPORT_SYMBOL_GPL(switch_amode); |
293 | 295 | ||
294 | static void set_amode_and_uaccess(unsigned long user_amode, | 296 | static int set_amode_and_uaccess(unsigned long user_amode, |
295 | unsigned long user32_amode) | 297 | unsigned long user32_amode) |
296 | { | 298 | { |
297 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | | 299 | psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode | |
298 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | | 300 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | |
@@ -309,11 +311,11 @@ static void set_amode_and_uaccess(unsigned long user_amode, | |||
309 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; | 311 | PSW_MASK_MCHECK | PSW_DEFAULT_KEY; |
310 | 312 | ||
311 | if (MACHINE_HAS_MVCOS) { | 313 | if (MACHINE_HAS_MVCOS) { |
312 | printk("mvcos available.\n"); | ||
313 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); | 314 | memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess)); |
315 | return 1; | ||
314 | } else { | 316 | } else { |
315 | printk("mvcos not available.\n"); | ||
316 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); | 317 | memcpy(&uaccess, &uaccess_pt, sizeof(uaccess)); |
318 | return 0; | ||
317 | } | 319 | } |
318 | } | 320 | } |
319 | 321 | ||
@@ -328,9 +330,10 @@ static int __init early_parse_switch_amode(char *p) | |||
328 | early_param("switch_amode", early_parse_switch_amode); | 330 | early_param("switch_amode", early_parse_switch_amode); |
329 | 331 | ||
330 | #else /* CONFIG_S390_SWITCH_AMODE */ | 332 | #else /* CONFIG_S390_SWITCH_AMODE */ |
331 | static inline void set_amode_and_uaccess(unsigned long user_amode, | 333 | static inline int set_amode_and_uaccess(unsigned long user_amode, |
332 | unsigned long user32_amode) | 334 | unsigned long user32_amode) |
333 | { | 335 | { |
336 | return 0; | ||
334 | } | 337 | } |
335 | #endif /* CONFIG_S390_SWITCH_AMODE */ | 338 | #endif /* CONFIG_S390_SWITCH_AMODE */ |
336 | 339 | ||
@@ -355,11 +358,20 @@ early_param("noexec", early_parse_noexec); | |||
355 | static void setup_addressing_mode(void) | 358 | static void setup_addressing_mode(void) |
356 | { | 359 | { |
357 | if (s390_noexec) { | 360 | if (s390_noexec) { |
358 | printk("S390 execute protection active, "); | 361 | if (set_amode_and_uaccess(PSW_ASC_SECONDARY, |
359 | set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY); | 362 | PSW32_ASC_SECONDARY)) |
363 | pr_info("Execute protection active, " | ||
364 | "mvcos available\n"); | ||
365 | else | ||
366 | pr_info("Execute protection active, " | ||
367 | "mvcos not available\n"); | ||
360 | } else if (switch_amode) { | 368 | } else if (switch_amode) { |
361 | printk("S390 address spaces switched, "); | 369 | if (set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY)) |
362 | set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY); | 370 | pr_info("Address spaces switched, " |
371 | "mvcos available\n"); | ||
372 | else | ||
373 | pr_info("Address spaces switched, " | ||
374 | "mvcos not available\n"); | ||
363 | } | 375 | } |
364 | #ifdef CONFIG_TRACE_IRQFLAGS | 376 | #ifdef CONFIG_TRACE_IRQFLAGS |
365 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; | 377 | sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK; |
@@ -572,15 +584,15 @@ setup_memory(void) | |||
572 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; | 584 | start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE; |
573 | 585 | ||
574 | if (start + INITRD_SIZE > memory_end) { | 586 | if (start + INITRD_SIZE > memory_end) { |
575 | printk("initrd extends beyond end of memory " | 587 | pr_err("initrd extends beyond end of " |
576 | "(0x%08lx > 0x%08lx)\n" | 588 | "memory (0x%08lx > 0x%08lx) " |
577 | "disabling initrd\n", | 589 | "disabling initrd\n", |
578 | start + INITRD_SIZE, memory_end); | 590 | start + INITRD_SIZE, memory_end); |
579 | INITRD_START = INITRD_SIZE = 0; | 591 | INITRD_START = INITRD_SIZE = 0; |
580 | } else { | 592 | } else { |
581 | printk("Moving initrd (0x%08lx -> 0x%08lx, " | 593 | pr_info("Moving initrd (0x%08lx -> " |
582 | "size: %ld)\n", | 594 | "0x%08lx, size: %ld)\n", |
583 | INITRD_START, start, INITRD_SIZE); | 595 | INITRD_START, start, INITRD_SIZE); |
584 | memmove((void *) start, (void *) INITRD_START, | 596 | memmove((void *) start, (void *) INITRD_START, |
585 | INITRD_SIZE); | 597 | INITRD_SIZE); |
586 | INITRD_START = start; | 598 | INITRD_START = start; |
@@ -642,8 +654,9 @@ setup_memory(void) | |||
642 | initrd_start = INITRD_START; | 654 | initrd_start = INITRD_START; |
643 | initrd_end = initrd_start + INITRD_SIZE; | 655 | initrd_end = initrd_start + INITRD_SIZE; |
644 | } else { | 656 | } else { |
645 | printk("initrd extends beyond end of memory " | 657 | pr_err("initrd extends beyond end of " |
646 | "(0x%08lx > 0x%08lx)\ndisabling initrd\n", | 658 | "memory (0x%08lx > 0x%08lx) " |
659 | "disabling initrd\n", | ||
647 | initrd_start + INITRD_SIZE, memory_end); | 660 | initrd_start + INITRD_SIZE, memory_end); |
648 | initrd_start = initrd_end = 0; | 661 | initrd_start = initrd_end = 0; |
649 | } | 662 | } |
@@ -651,23 +664,6 @@ setup_memory(void) | |||
651 | #endif | 664 | #endif |
652 | } | 665 | } |
653 | 666 | ||
654 | static int __init __stfle(unsigned long long *list, int doublewords) | ||
655 | { | ||
656 | typedef struct { unsigned long long _[doublewords]; } addrtype; | ||
657 | register unsigned long __nr asm("0") = doublewords - 1; | ||
658 | |||
659 | asm volatile(".insn s,0xb2b00000,%0" /* stfle */ | ||
660 | : "=m" (*(addrtype *) list), "+d" (__nr) : : "cc"); | ||
661 | return __nr + 1; | ||
662 | } | ||
663 | |||
664 | int __init stfle(unsigned long long *list, int doublewords) | ||
665 | { | ||
666 | if (!(stfl() & (1UL << 24))) | ||
667 | return -EOPNOTSUPP; | ||
668 | return __stfle(list, doublewords); | ||
669 | } | ||
670 | |||
671 | /* | 667 | /* |
672 | * Setup hardware capabilities. | 668 | * Setup hardware capabilities. |
673 | */ | 669 | */ |
@@ -739,8 +735,13 @@ static void __init setup_hwcaps(void) | |||
739 | strcpy(elf_platform, "z990"); | 735 | strcpy(elf_platform, "z990"); |
740 | break; | 736 | break; |
741 | case 0x2094: | 737 | case 0x2094: |
738 | case 0x2096: | ||
742 | strcpy(elf_platform, "z9-109"); | 739 | strcpy(elf_platform, "z9-109"); |
743 | break; | 740 | break; |
741 | case 0x2097: | ||
742 | case 0x2098: | ||
743 | strcpy(elf_platform, "z10"); | ||
744 | break; | ||
744 | } | 745 | } |
745 | } | 746 | } |
746 | 747 | ||
@@ -752,25 +753,34 @@ static void __init setup_hwcaps(void) | |||
752 | void __init | 753 | void __init |
753 | setup_arch(char **cmdline_p) | 754 | setup_arch(char **cmdline_p) |
754 | { | 755 | { |
756 | /* set up preferred console */ | ||
757 | add_preferred_console("ttyS", 0, NULL); | ||
758 | |||
755 | /* | 759 | /* |
756 | * print what head.S has found out about the machine | 760 | * print what head.S has found out about the machine |
757 | */ | 761 | */ |
758 | #ifndef CONFIG_64BIT | 762 | #ifndef CONFIG_64BIT |
759 | printk((MACHINE_IS_VM) ? | 763 | if (MACHINE_IS_VM) |
760 | "We are running under VM (31 bit mode)\n" : | 764 | pr_info("Linux is running as a z/VM " |
761 | "We are running native (31 bit mode)\n"); | 765 | "guest operating system in 31-bit mode\n"); |
762 | printk((MACHINE_HAS_IEEE) ? | 766 | else |
763 | "This machine has an IEEE fpu\n" : | 767 | pr_info("Linux is running natively in 31-bit mode\n"); |
764 | "This machine has no IEEE fpu\n"); | 768 | if (MACHINE_HAS_IEEE) |
769 | pr_info("The hardware system has IEEE compatible " | ||
770 | "floating point units\n"); | ||
771 | else | ||
772 | pr_info("The hardware system has no IEEE compatible " | ||
773 | "floating point units\n"); | ||
765 | #else /* CONFIG_64BIT */ | 774 | #else /* CONFIG_64BIT */ |
766 | if (MACHINE_IS_VM) | 775 | if (MACHINE_IS_VM) |
767 | printk("We are running under VM (64 bit mode)\n"); | 776 | pr_info("Linux is running as a z/VM " |
777 | "guest operating system in 64-bit mode\n"); | ||
768 | else if (MACHINE_IS_KVM) { | 778 | else if (MACHINE_IS_KVM) { |
769 | printk("We are running under KVM (64 bit mode)\n"); | 779 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
770 | add_preferred_console("hvc", 0, NULL); | 780 | add_preferred_console("hvc", 0, NULL); |
771 | s390_virtio_console_init(); | 781 | s390_virtio_console_init(); |
772 | } else | 782 | } else |
773 | printk("We are running native (64 bit mode)\n"); | 783 | pr_info("Linux is running natively in 64-bit mode\n"); |
774 | #endif /* CONFIG_64BIT */ | 784 | #endif /* CONFIG_64BIT */ |
775 | 785 | ||
776 | /* Have one command line that is parsed and saved in /proc/cmdline */ | 786 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
@@ -818,90 +828,3 @@ setup_arch(char **cmdline_p) | |||
818 | /* Setup zfcpdump support */ | 828 | /* Setup zfcpdump support */ |
819 | setup_zfcpdump(console_devno); | 829 | setup_zfcpdump(console_devno); |
820 | } | 830 | } |
821 | |||
822 | void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) | ||
823 | { | ||
824 | printk(KERN_INFO "cpu %d " | ||
825 | #ifdef CONFIG_SMP | ||
826 | "phys_idx=%d " | ||
827 | #endif | ||
828 | "vers=%02X ident=%06X machine=%04X unused=%04X\n", | ||
829 | cpuinfo->cpu_nr, | ||
830 | #ifdef CONFIG_SMP | ||
831 | cpuinfo->cpu_addr, | ||
832 | #endif | ||
833 | cpuinfo->cpu_id.version, | ||
834 | cpuinfo->cpu_id.ident, | ||
835 | cpuinfo->cpu_id.machine, | ||
836 | cpuinfo->cpu_id.unused); | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * show_cpuinfo - Get information on one CPU for use by procfs. | ||
841 | */ | ||
842 | |||
843 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
844 | { | ||
845 | static const char *hwcap_str[8] = { | ||
846 | "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", | ||
847 | "edat" | ||
848 | }; | ||
849 | struct cpuinfo_S390 *cpuinfo; | ||
850 | unsigned long n = (unsigned long) v - 1; | ||
851 | int i; | ||
852 | |||
853 | s390_adjust_jiffies(); | ||
854 | preempt_disable(); | ||
855 | if (!n) { | ||
856 | seq_printf(m, "vendor_id : IBM/S390\n" | ||
857 | "# processors : %i\n" | ||
858 | "bogomips per cpu: %lu.%02lu\n", | ||
859 | num_online_cpus(), loops_per_jiffy/(500000/HZ), | ||
860 | (loops_per_jiffy/(5000/HZ))%100); | ||
861 | seq_puts(m, "features\t: "); | ||
862 | for (i = 0; i < 8; i++) | ||
863 | if (hwcap_str[i] && (elf_hwcap & (1UL << i))) | ||
864 | seq_printf(m, "%s ", hwcap_str[i]); | ||
865 | seq_puts(m, "\n"); | ||
866 | } | ||
867 | |||
868 | if (cpu_online(n)) { | ||
869 | #ifdef CONFIG_SMP | ||
870 | if (smp_processor_id() == n) | ||
871 | cpuinfo = &S390_lowcore.cpu_data; | ||
872 | else | ||
873 | cpuinfo = &lowcore_ptr[n]->cpu_data; | ||
874 | #else | ||
875 | cpuinfo = &S390_lowcore.cpu_data; | ||
876 | #endif | ||
877 | seq_printf(m, "processor %li: " | ||
878 | "version = %02X, " | ||
879 | "identification = %06X, " | ||
880 | "machine = %04X\n", | ||
881 | n, cpuinfo->cpu_id.version, | ||
882 | cpuinfo->cpu_id.ident, | ||
883 | cpuinfo->cpu_id.machine); | ||
884 | } | ||
885 | preempt_enable(); | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
890 | { | ||
891 | return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; | ||
892 | } | ||
893 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
894 | { | ||
895 | ++*pos; | ||
896 | return c_start(m, pos); | ||
897 | } | ||
898 | static void c_stop(struct seq_file *m, void *v) | ||
899 | { | ||
900 | } | ||
901 | const struct seq_operations cpuinfo_op = { | ||
902 | .start = c_start, | ||
903 | .next = c_next, | ||
904 | .stop = c_stop, | ||
905 | .show = show_cpuinfo, | ||
906 | }; | ||
907 | |||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f03914b8ed2f..3ed5c7a83c6c 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -20,6 +20,9 @@ | |||
20 | * cpu_number_map in other architectures. | 20 | * cpu_number_map in other architectures. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define KMSG_COMPONENT "cpu" | ||
24 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
25 | |||
23 | #include <linux/module.h> | 26 | #include <linux/module.h> |
24 | #include <linux/init.h> | 27 | #include <linux/init.h> |
25 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
@@ -71,159 +74,6 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); | |||
71 | 74 | ||
72 | static void smp_ext_bitcall(int, ec_bit_sig); | 75 | static void smp_ext_bitcall(int, ec_bit_sig); |
73 | 76 | ||
74 | /* | ||
75 | * Structure and data for __smp_call_function_map(). This is designed to | ||
76 | * minimise static memory requirements. It also looks cleaner. | ||
77 | */ | ||
78 | static DEFINE_SPINLOCK(call_lock); | ||
79 | |||
80 | struct call_data_struct { | ||
81 | void (*func) (void *info); | ||
82 | void *info; | ||
83 | cpumask_t started; | ||
84 | cpumask_t finished; | ||
85 | int wait; | ||
86 | }; | ||
87 | |||
88 | static struct call_data_struct *call_data; | ||
89 | |||
90 | /* | ||
91 | * 'Call function' interrupt callback | ||
92 | */ | ||
93 | static void do_call_function(void) | ||
94 | { | ||
95 | void (*func) (void *info) = call_data->func; | ||
96 | void *info = call_data->info; | ||
97 | int wait = call_data->wait; | ||
98 | |||
99 | cpu_set(smp_processor_id(), call_data->started); | ||
100 | (*func)(info); | ||
101 | if (wait) | ||
102 | cpu_set(smp_processor_id(), call_data->finished);; | ||
103 | } | ||
104 | |||
105 | static void __smp_call_function_map(void (*func) (void *info), void *info, | ||
106 | int wait, cpumask_t map) | ||
107 | { | ||
108 | struct call_data_struct data; | ||
109 | int cpu, local = 0; | ||
110 | |||
111 | /* | ||
112 | * Can deadlock when interrupts are disabled or if in wrong context. | ||
113 | */ | ||
114 | WARN_ON(irqs_disabled() || in_irq()); | ||
115 | |||
116 | /* | ||
117 | * Check for local function call. We have to have the same call order | ||
118 | * as in on_each_cpu() because of machine_restart_smp(). | ||
119 | */ | ||
120 | if (cpu_isset(smp_processor_id(), map)) { | ||
121 | local = 1; | ||
122 | cpu_clear(smp_processor_id(), map); | ||
123 | } | ||
124 | |||
125 | cpus_and(map, map, cpu_online_map); | ||
126 | if (cpus_empty(map)) | ||
127 | goto out; | ||
128 | |||
129 | data.func = func; | ||
130 | data.info = info; | ||
131 | data.started = CPU_MASK_NONE; | ||
132 | data.wait = wait; | ||
133 | if (wait) | ||
134 | data.finished = CPU_MASK_NONE; | ||
135 | |||
136 | call_data = &data; | ||
137 | |||
138 | for_each_cpu_mask(cpu, map) | ||
139 | smp_ext_bitcall(cpu, ec_call_function); | ||
140 | |||
141 | /* Wait for response */ | ||
142 | while (!cpus_equal(map, data.started)) | ||
143 | cpu_relax(); | ||
144 | if (wait) | ||
145 | while (!cpus_equal(map, data.finished)) | ||
146 | cpu_relax(); | ||
147 | out: | ||
148 | if (local) { | ||
149 | local_irq_disable(); | ||
150 | func(info); | ||
151 | local_irq_enable(); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * smp_call_function: | ||
157 | * @func: the function to run; this must be fast and non-blocking | ||
158 | * @info: an arbitrary pointer to pass to the function | ||
159 | * @wait: if true, wait (atomically) until function has completed on other CPUs | ||
160 | * | ||
161 | * Run a function on all other CPUs. | ||
162 | * | ||
163 | * You must not call this function with disabled interrupts, from a | ||
164 | * hardware interrupt handler or from a bottom half. | ||
165 | */ | ||
166 | int smp_call_function(void (*func) (void *info), void *info, int wait) | ||
167 | { | ||
168 | cpumask_t map; | ||
169 | |||
170 | spin_lock(&call_lock); | ||
171 | map = cpu_online_map; | ||
172 | cpu_clear(smp_processor_id(), map); | ||
173 | __smp_call_function_map(func, info, wait, map); | ||
174 | spin_unlock(&call_lock); | ||
175 | return 0; | ||
176 | } | ||
177 | EXPORT_SYMBOL(smp_call_function); | ||
178 | |||
179 | /* | ||
180 | * smp_call_function_single: | ||
181 | * @cpu: the CPU where func should run | ||
182 | * @func: the function to run; this must be fast and non-blocking | ||
183 | * @info: an arbitrary pointer to pass to the function | ||
184 | * @wait: if true, wait (atomically) until function has completed on other CPUs | ||
185 | * | ||
186 | * Run a function on one processor. | ||
187 | * | ||
188 | * You must not call this function with disabled interrupts, from a | ||
189 | * hardware interrupt handler or from a bottom half. | ||
190 | */ | ||
191 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
192 | int wait) | ||
193 | { | ||
194 | spin_lock(&call_lock); | ||
195 | __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); | ||
196 | spin_unlock(&call_lock); | ||
197 | return 0; | ||
198 | } | ||
199 | EXPORT_SYMBOL(smp_call_function_single); | ||
200 | |||
201 | /** | ||
202 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
203 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
204 | * @func: The function to run. This must be fast and non-blocking. | ||
205 | * @info: An arbitrary pointer to pass to the function. | ||
206 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
207 | * | ||
208 | * Returns 0 on success, else a negative status code. | ||
209 | * | ||
210 | * If @wait is true, then returns once @func has returned; otherwise | ||
211 | * it returns just before the target cpu calls @func. | ||
212 | * | ||
213 | * You must not call this function with disabled interrupts or from a | ||
214 | * hardware interrupt handler or from a bottom half handler. | ||
215 | */ | ||
216 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | ||
217 | int wait) | ||
218 | { | ||
219 | spin_lock(&call_lock); | ||
220 | cpu_clear(smp_processor_id(), mask); | ||
221 | __smp_call_function_map(func, info, wait, mask); | ||
222 | spin_unlock(&call_lock); | ||
223 | return 0; | ||
224 | } | ||
225 | EXPORT_SYMBOL(smp_call_function_mask); | ||
226 | |||
227 | void smp_send_stop(void) | 77 | void smp_send_stop(void) |
228 | { | 78 | { |
229 | int cpu, rc; | 79 | int cpu, rc; |
@@ -265,7 +115,10 @@ static void do_ext_call_interrupt(__u16 code) | |||
265 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | 115 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
266 | 116 | ||
267 | if (test_bit(ec_call_function, &bits)) | 117 | if (test_bit(ec_call_function, &bits)) |
268 | do_call_function(); | 118 | generic_smp_call_function_interrupt(); |
119 | |||
120 | if (test_bit(ec_call_function_single, &bits)) | ||
121 | generic_smp_call_function_single_interrupt(); | ||
269 | } | 122 | } |
270 | 123 | ||
271 | /* | 124 | /* |
@@ -282,6 +135,19 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |||
282 | udelay(10); | 135 | udelay(10); |
283 | } | 136 | } |
284 | 137 | ||
138 | void arch_send_call_function_ipi(cpumask_t mask) | ||
139 | { | ||
140 | int cpu; | ||
141 | |||
142 | for_each_cpu_mask(cpu, mask) | ||
143 | smp_ext_bitcall(cpu, ec_call_function); | ||
144 | } | ||
145 | |||
146 | void arch_send_call_function_single_ipi(int cpu) | ||
147 | { | ||
148 | smp_ext_bitcall(cpu, ec_call_function_single); | ||
149 | } | ||
150 | |||
285 | #ifndef CONFIG_64BIT | 151 | #ifndef CONFIG_64BIT |
286 | /* | 152 | /* |
287 | * this function sends a 'purge tlb' signal to another CPU. | 153 | * this function sends a 'purge tlb' signal to another CPU. |
@@ -382,8 +248,8 @@ static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) | |||
382 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) | 248 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
383 | return; | 249 | return; |
384 | if (cpu >= NR_CPUS) { | 250 | if (cpu >= NR_CPUS) { |
385 | printk(KERN_WARNING "Registers for cpu %i not saved since dump " | 251 | pr_warning("CPU %i exceeds the maximum %i and is excluded from " |
386 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); | 252 | "the dump\n", cpu, NR_CPUS - 1); |
387 | return; | 253 | return; |
388 | } | 254 | } |
389 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); | 255 | zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL); |
@@ -556,7 +422,7 @@ static void __init smp_detect_cpus(void) | |||
556 | } | 422 | } |
557 | out: | 423 | out: |
558 | kfree(info); | 424 | kfree(info); |
559 | printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus); | 425 | pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus); |
560 | get_online_cpus(); | 426 | get_online_cpus(); |
561 | __smp_rescan_cpus(); | 427 | __smp_rescan_cpus(); |
562 | put_online_cpus(); | 428 | put_online_cpus(); |
@@ -572,19 +438,17 @@ int __cpuinit start_secondary(void *cpuvoid) | |||
572 | preempt_disable(); | 438 | preempt_disable(); |
573 | /* Enable TOD clock interrupts on the secondary cpu. */ | 439 | /* Enable TOD clock interrupts on the secondary cpu. */ |
574 | init_cpu_timer(); | 440 | init_cpu_timer(); |
575 | #ifdef CONFIG_VIRT_TIMER | ||
576 | /* Enable cpu timer interrupts on the secondary cpu. */ | 441 | /* Enable cpu timer interrupts on the secondary cpu. */ |
577 | init_cpu_vtimer(); | 442 | init_cpu_vtimer(); |
578 | #endif | ||
579 | /* Enable pfault pseudo page faults on this cpu. */ | 443 | /* Enable pfault pseudo page faults on this cpu. */ |
580 | pfault_init(); | 444 | pfault_init(); |
581 | 445 | ||
582 | /* call cpu notifiers */ | 446 | /* call cpu notifiers */ |
583 | notify_cpu_starting(smp_processor_id()); | 447 | notify_cpu_starting(smp_processor_id()); |
584 | /* Mark this cpu as online */ | 448 | /* Mark this cpu as online */ |
585 | spin_lock(&call_lock); | 449 | ipi_call_lock(); |
586 | cpu_set(smp_processor_id(), cpu_online_map); | 450 | cpu_set(smp_processor_id(), cpu_online_map); |
587 | spin_unlock(&call_lock); | 451 | ipi_call_unlock(); |
588 | /* Switch on interrupts */ | 452 | /* Switch on interrupts */ |
589 | local_irq_enable(); | 453 | local_irq_enable(); |
590 | /* Print info about this processor */ | 454 | /* Print info about this processor */ |
@@ -633,18 +497,15 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
633 | 497 | ||
634 | save_area = get_zeroed_page(GFP_KERNEL); | 498 | save_area = get_zeroed_page(GFP_KERNEL); |
635 | if (!save_area) | 499 | if (!save_area) |
636 | goto out_save_area; | 500 | goto out; |
637 | lowcore->extended_save_area_addr = (u32) save_area; | 501 | lowcore->extended_save_area_addr = (u32) save_area; |
638 | } | 502 | } |
639 | #endif | 503 | #endif |
640 | lowcore_ptr[cpu] = lowcore; | 504 | lowcore_ptr[cpu] = lowcore; |
641 | return 0; | 505 | return 0; |
642 | 506 | ||
643 | #ifndef CONFIG_64BIT | ||
644 | out_save_area: | ||
645 | free_page(panic_stack); | ||
646 | #endif | ||
647 | out: | 507 | out: |
508 | free_page(panic_stack); | ||
648 | free_pages(async_stack, ASYNC_ORDER); | 509 | free_pages(async_stack, ASYNC_ORDER); |
649 | free_pages((unsigned long) lowcore, lc_order); | 510 | free_pages((unsigned long) lowcore, lc_order); |
650 | return -ENOMEM; | 511 | return -ENOMEM; |
@@ -684,12 +545,8 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
684 | 545 | ||
685 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | 546 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), |
686 | cpu, sigp_set_prefix); | 547 | cpu, sigp_set_prefix); |
687 | if (ccode) { | 548 | if (ccode) |
688 | printk("sigp_set_prefix failed for cpu %d " | ||
689 | "with condition code %d\n", | ||
690 | (int) cpu, (int) ccode); | ||
691 | return -EIO; | 549 | return -EIO; |
692 | } | ||
693 | 550 | ||
694 | idle = current_set[cpu]; | 551 | idle = current_set[cpu]; |
695 | cpu_lowcore = lowcore_ptr[cpu]; | 552 | cpu_lowcore = lowcore_ptr[cpu]; |
@@ -772,7 +629,7 @@ void __cpu_die(unsigned int cpu) | |||
772 | while (!smp_cpu_not_running(cpu)) | 629 | while (!smp_cpu_not_running(cpu)) |
773 | cpu_relax(); | 630 | cpu_relax(); |
774 | smp_free_lowcore(cpu); | 631 | smp_free_lowcore(cpu); |
775 | printk(KERN_INFO "Processor %d spun down\n", cpu); | 632 | pr_info("Processor %d stopped\n", cpu); |
776 | } | 633 | } |
777 | 634 | ||
778 | void cpu_die(void) | 635 | void cpu_die(void) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index f5bd141c8443..d649600df5b9 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -12,6 +12,9 @@ | |||
12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | 12 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define KMSG_COMPONENT "time" | ||
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
17 | |||
15 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
@@ -20,6 +23,8 @@ | |||
20 | #include <linux/string.h> | 23 | #include <linux/string.h> |
21 | #include <linux/mm.h> | 24 | #include <linux/mm.h> |
22 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/cpu.h> | ||
27 | #include <linux/stop_machine.h> | ||
23 | #include <linux/time.h> | 28 | #include <linux/time.h> |
24 | #include <linux/sysdev.h> | 29 | #include <linux/sysdev.h> |
25 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
@@ -36,6 +41,7 @@ | |||
36 | #include <asm/delay.h> | 41 | #include <asm/delay.h> |
37 | #include <asm/s390_ext.h> | 42 | #include <asm/s390_ext.h> |
38 | #include <asm/div64.h> | 43 | #include <asm/div64.h> |
44 | #include <asm/vdso.h> | ||
39 | #include <asm/irq.h> | 45 | #include <asm/irq.h> |
40 | #include <asm/irq_regs.h> | 46 | #include <asm/irq_regs.h> |
41 | #include <asm/timer.h> | 47 | #include <asm/timer.h> |
@@ -223,6 +229,36 @@ static struct clocksource clocksource_tod = { | |||
223 | }; | 229 | }; |
224 | 230 | ||
225 | 231 | ||
232 | void update_vsyscall(struct timespec *wall_time, struct clocksource *clock) | ||
233 | { | ||
234 | if (clock != &clocksource_tod) | ||
235 | return; | ||
236 | |||
237 | /* Make userspace gettimeofday spin until we're done. */ | ||
238 | ++vdso_data->tb_update_count; | ||
239 | smp_wmb(); | ||
240 | vdso_data->xtime_tod_stamp = clock->cycle_last; | ||
241 | vdso_data->xtime_clock_sec = xtime.tv_sec; | ||
242 | vdso_data->xtime_clock_nsec = xtime.tv_nsec; | ||
243 | vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec; | ||
244 | vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec; | ||
245 | smp_wmb(); | ||
246 | ++vdso_data->tb_update_count; | ||
247 | } | ||
248 | |||
249 | extern struct timezone sys_tz; | ||
250 | |||
251 | void update_vsyscall_tz(void) | ||
252 | { | ||
253 | /* Make userspace gettimeofday spin until we're done. */ | ||
254 | ++vdso_data->tb_update_count; | ||
255 | smp_wmb(); | ||
256 | vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; | ||
257 | vdso_data->tz_dsttime = sys_tz.tz_dsttime; | ||
258 | smp_wmb(); | ||
259 | ++vdso_data->tb_update_count; | ||
260 | } | ||
261 | |||
226 | /* | 262 | /* |
227 | * Initialize the TOD clock and the CPU timer of | 263 | * Initialize the TOD clock and the CPU timer of |
228 | * the boot cpu. | 264 | * the boot cpu. |
@@ -253,10 +289,8 @@ void __init time_init(void) | |||
253 | 289 | ||
254 | /* Enable TOD clock interrupts on the boot cpu. */ | 290 | /* Enable TOD clock interrupts on the boot cpu. */ |
255 | init_cpu_timer(); | 291 | init_cpu_timer(); |
256 | 292 | /* Enable cpu timer interrupts on the boot cpu. */ | |
257 | #ifdef CONFIG_VIRT_TIMER | ||
258 | vtime_init(); | 293 | vtime_init(); |
259 | #endif | ||
260 | } | 294 | } |
261 | 295 | ||
262 | /* | 296 | /* |
@@ -288,8 +322,8 @@ static unsigned long long adjust_time(unsigned long long old, | |||
288 | } | 322 | } |
289 | sched_clock_base_cc += delta; | 323 | sched_clock_base_cc += delta; |
290 | if (adjust.offset != 0) { | 324 | if (adjust.offset != 0) { |
291 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | 325 | pr_notice("The ETR interface has adjusted the clock " |
292 | adjust.offset); | 326 | "by %li microseconds\n", adjust.offset); |
293 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | 327 | adjust.modes = ADJ_OFFSET_SINGLESHOT; |
294 | do_adjtimex(&adjust); | 328 | do_adjtimex(&adjust); |
295 | } | 329 | } |
@@ -360,6 +394,15 @@ static void enable_sync_clock(void) | |||
360 | atomic_set_mask(0x80000000, sw_ptr); | 394 | atomic_set_mask(0x80000000, sw_ptr); |
361 | } | 395 | } |
362 | 396 | ||
397 | /* Single threaded workqueue used for etr and stp sync events */ | ||
398 | static struct workqueue_struct *time_sync_wq; | ||
399 | |||
400 | static void __init time_init_wq(void) | ||
401 | { | ||
402 | if (!time_sync_wq) | ||
403 | time_sync_wq = create_singlethread_workqueue("timesync"); | ||
404 | } | ||
405 | |||
363 | /* | 406 | /* |
364 | * External Time Reference (ETR) code. | 407 | * External Time Reference (ETR) code. |
365 | */ | 408 | */ |
@@ -425,6 +468,7 @@ static struct timer_list etr_timer; | |||
425 | 468 | ||
426 | static void etr_timeout(unsigned long dummy); | 469 | static void etr_timeout(unsigned long dummy); |
427 | static void etr_work_fn(struct work_struct *work); | 470 | static void etr_work_fn(struct work_struct *work); |
471 | static DEFINE_MUTEX(etr_work_mutex); | ||
428 | static DECLARE_WORK(etr_work, etr_work_fn); | 472 | static DECLARE_WORK(etr_work, etr_work_fn); |
429 | 473 | ||
430 | /* | 474 | /* |
@@ -440,8 +484,8 @@ static void etr_reset(void) | |||
440 | etr_tolec = get_clock(); | 484 | etr_tolec = get_clock(); |
441 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); | 485 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); |
442 | } else if (etr_port0_online || etr_port1_online) { | 486 | } else if (etr_port0_online || etr_port1_online) { |
443 | printk(KERN_WARNING "Running on non ETR capable " | 487 | pr_warning("The real or virtual hardware system does " |
444 | "machine, only local mode available.\n"); | 488 | "not provide an ETR interface\n"); |
445 | etr_port0_online = etr_port1_online = 0; | 489 | etr_port0_online = etr_port1_online = 0; |
446 | } | 490 | } |
447 | } | 491 | } |
@@ -452,17 +496,18 @@ static int __init etr_init(void) | |||
452 | 496 | ||
453 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) | 497 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
454 | return 0; | 498 | return 0; |
499 | time_init_wq(); | ||
455 | /* Check if this machine has the steai instruction. */ | 500 | /* Check if this machine has the steai instruction. */ |
456 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | 501 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) |
457 | etr_steai_available = 1; | 502 | etr_steai_available = 1; |
458 | setup_timer(&etr_timer, etr_timeout, 0UL); | 503 | setup_timer(&etr_timer, etr_timeout, 0UL); |
459 | if (etr_port0_online) { | 504 | if (etr_port0_online) { |
460 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 505 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
461 | schedule_work(&etr_work); | 506 | queue_work(time_sync_wq, &etr_work); |
462 | } | 507 | } |
463 | if (etr_port1_online) { | 508 | if (etr_port1_online) { |
464 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 509 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
465 | schedule_work(&etr_work); | 510 | queue_work(time_sync_wq, &etr_work); |
466 | } | 511 | } |
467 | return 0; | 512 | return 0; |
468 | } | 513 | } |
@@ -489,7 +534,7 @@ void etr_switch_to_local(void) | |||
489 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | 534 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
490 | disable_sync_clock(NULL); | 535 | disable_sync_clock(NULL); |
491 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); | 536 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); |
492 | schedule_work(&etr_work); | 537 | queue_work(time_sync_wq, &etr_work); |
493 | } | 538 | } |
494 | 539 | ||
495 | /* | 540 | /* |
@@ -505,7 +550,7 @@ void etr_sync_check(void) | |||
505 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | 550 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
506 | disable_sync_clock(NULL); | 551 | disable_sync_clock(NULL); |
507 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); | 552 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); |
508 | schedule_work(&etr_work); | 553 | queue_work(time_sync_wq, &etr_work); |
509 | } | 554 | } |
510 | 555 | ||
511 | /* | 556 | /* |
@@ -529,13 +574,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm) | |||
529 | * Both ports are not up-to-date now. | 574 | * Both ports are not up-to-date now. |
530 | */ | 575 | */ |
531 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); | 576 | set_bit(ETR_EVENT_PORT_ALERT, &etr_events); |
532 | schedule_work(&etr_work); | 577 | queue_work(time_sync_wq, &etr_work); |
533 | } | 578 | } |
534 | 579 | ||
535 | static void etr_timeout(unsigned long dummy) | 580 | static void etr_timeout(unsigned long dummy) |
536 | { | 581 | { |
537 | set_bit(ETR_EVENT_UPDATE, &etr_events); | 582 | set_bit(ETR_EVENT_UPDATE, &etr_events); |
538 | schedule_work(&etr_work); | 583 | queue_work(time_sync_wq, &etr_work); |
539 | } | 584 | } |
540 | 585 | ||
541 | /* | 586 | /* |
@@ -642,14 +687,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
642 | } | 687 | } |
643 | 688 | ||
644 | struct clock_sync_data { | 689 | struct clock_sync_data { |
690 | atomic_t cpus; | ||
645 | int in_sync; | 691 | int in_sync; |
646 | unsigned long long fixup_cc; | 692 | unsigned long long fixup_cc; |
693 | int etr_port; | ||
694 | struct etr_aib *etr_aib; | ||
647 | }; | 695 | }; |
648 | 696 | ||
649 | static void clock_sync_cpu_start(void *dummy) | 697 | static void clock_sync_cpu(struct clock_sync_data *sync) |
650 | { | 698 | { |
651 | struct clock_sync_data *sync = dummy; | 699 | atomic_dec(&sync->cpus); |
652 | |||
653 | enable_sync_clock(); | 700 | enable_sync_clock(); |
654 | /* | 701 | /* |
655 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 702 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
@@ -675,39 +722,35 @@ static void clock_sync_cpu_start(void *dummy) | |||
675 | fixup_clock_comparator(sync->fixup_cc); | 722 | fixup_clock_comparator(sync->fixup_cc); |
676 | } | 723 | } |
677 | 724 | ||
678 | static void clock_sync_cpu_end(void *dummy) | ||
679 | { | ||
680 | } | ||
681 | |||
682 | /* | 725 | /* |
683 | * Sync the TOD clock using the port refered to by aibp. This port | 726 | * Sync the TOD clock using the port refered to by aibp. This port |
684 | * has to be enabled and the other port has to be disabled. The | 727 | * has to be enabled and the other port has to be disabled. The |
685 | * last eacr update has to be more than 1.6 seconds in the past. | 728 | * last eacr update has to be more than 1.6 seconds in the past. |
686 | */ | 729 | */ |
687 | static int etr_sync_clock(struct etr_aib *aib, int port) | 730 | static int etr_sync_clock(void *data) |
688 | { | 731 | { |
689 | struct etr_aib *sync_port; | 732 | static int first; |
690 | struct clock_sync_data etr_sync; | ||
691 | unsigned long long clock, old_clock, delay, delta; | 733 | unsigned long long clock, old_clock, delay, delta; |
692 | int follows; | 734 | struct clock_sync_data *etr_sync; |
735 | struct etr_aib *sync_port, *aib; | ||
736 | int port; | ||
693 | int rc; | 737 | int rc; |
694 | 738 | ||
695 | /* Check if the current aib is adjacent to the sync port aib. */ | 739 | etr_sync = data; |
696 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
697 | follows = etr_aib_follows(sync_port, aib, port); | ||
698 | memcpy(sync_port, aib, sizeof(*aib)); | ||
699 | if (!follows) | ||
700 | return -EAGAIN; | ||
701 | 740 | ||
702 | /* | 741 | if (xchg(&first, 1) == 1) { |
703 | * Catch all other cpus and make them wait until we have | 742 | /* Slave */ |
704 | * successfully synced the clock. smp_call_function will | 743 | clock_sync_cpu(etr_sync); |
705 | * return after all other cpus are in etr_sync_cpu_start. | 744 | return 0; |
706 | */ | 745 | } |
707 | memset(&etr_sync, 0, sizeof(etr_sync)); | 746 | |
708 | preempt_disable(); | 747 | /* Wait until all other cpus entered the sync function. */ |
709 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0); | 748 | while (atomic_read(&etr_sync->cpus) != 0) |
710 | local_irq_disable(); | 749 | cpu_relax(); |
750 | |||
751 | port = etr_sync->etr_port; | ||
752 | aib = etr_sync->etr_aib; | ||
753 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
711 | enable_sync_clock(); | 754 | enable_sync_clock(); |
712 | 755 | ||
713 | /* Set clock to next OTE. */ | 756 | /* Set clock to next OTE. */ |
@@ -724,16 +767,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
724 | delay = (unsigned long long) | 767 | delay = (unsigned long long) |
725 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 768 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
726 | delta = adjust_time(old_clock, clock, delay); | 769 | delta = adjust_time(old_clock, clock, delay); |
727 | etr_sync.fixup_cc = delta; | 770 | etr_sync->fixup_cc = delta; |
728 | fixup_clock_comparator(delta); | 771 | fixup_clock_comparator(delta); |
729 | /* Verify that the clock is properly set. */ | 772 | /* Verify that the clock is properly set. */ |
730 | if (!etr_aib_follows(sync_port, aib, port)) { | 773 | if (!etr_aib_follows(sync_port, aib, port)) { |
731 | /* Didn't work. */ | 774 | /* Didn't work. */ |
732 | disable_sync_clock(NULL); | 775 | disable_sync_clock(NULL); |
733 | etr_sync.in_sync = -EAGAIN; | 776 | etr_sync->in_sync = -EAGAIN; |
734 | rc = -EAGAIN; | 777 | rc = -EAGAIN; |
735 | } else { | 778 | } else { |
736 | etr_sync.in_sync = 1; | 779 | etr_sync->in_sync = 1; |
737 | rc = 0; | 780 | rc = 0; |
738 | } | 781 | } |
739 | } else { | 782 | } else { |
@@ -741,12 +784,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
741 | __ctl_clear_bit(0, 29); | 784 | __ctl_clear_bit(0, 29); |
742 | __ctl_clear_bit(14, 21); | 785 | __ctl_clear_bit(14, 21); |
743 | disable_sync_clock(NULL); | 786 | disable_sync_clock(NULL); |
744 | etr_sync.in_sync = -EAGAIN; | 787 | etr_sync->in_sync = -EAGAIN; |
745 | rc = -EAGAIN; | 788 | rc = -EAGAIN; |
746 | } | 789 | } |
747 | local_irq_enable(); | 790 | xchg(&first, 0); |
748 | smp_call_function(clock_sync_cpu_end, NULL, 0); | 791 | return rc; |
749 | preempt_enable(); | 792 | } |
793 | |||
794 | static int etr_sync_clock_stop(struct etr_aib *aib, int port) | ||
795 | { | ||
796 | struct clock_sync_data etr_sync; | ||
797 | struct etr_aib *sync_port; | ||
798 | int follows; | ||
799 | int rc; | ||
800 | |||
801 | /* Check if the current aib is adjacent to the sync port aib. */ | ||
802 | sync_port = (port == 0) ? &etr_port0 : &etr_port1; | ||
803 | follows = etr_aib_follows(sync_port, aib, port); | ||
804 | memcpy(sync_port, aib, sizeof(*aib)); | ||
805 | if (!follows) | ||
806 | return -EAGAIN; | ||
807 | memset(&etr_sync, 0, sizeof(etr_sync)); | ||
808 | etr_sync.etr_aib = aib; | ||
809 | etr_sync.etr_port = port; | ||
810 | get_online_cpus(); | ||
811 | atomic_set(&etr_sync.cpus, num_online_cpus() - 1); | ||
812 | rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map); | ||
813 | put_online_cpus(); | ||
750 | return rc; | 814 | return rc; |
751 | } | 815 | } |
752 | 816 | ||
@@ -903,7 +967,7 @@ static void etr_update_eacr(struct etr_eacr eacr) | |||
903 | } | 967 | } |
904 | 968 | ||
905 | /* | 969 | /* |
906 | * ETR tasklet. In this function you'll find the main logic. In | 970 | * ETR work. In this function you'll find the main logic. In |
907 | * particular this is the only function that calls etr_update_eacr(), | 971 | * particular this is the only function that calls etr_update_eacr(), |
908 | * it "controls" the etr control register. | 972 | * it "controls" the etr control register. |
909 | */ | 973 | */ |
@@ -914,6 +978,9 @@ static void etr_work_fn(struct work_struct *work) | |||
914 | struct etr_aib aib; | 978 | struct etr_aib aib; |
915 | int sync_port; | 979 | int sync_port; |
916 | 980 | ||
981 | /* prevent multiple execution. */ | ||
982 | mutex_lock(&etr_work_mutex); | ||
983 | |||
917 | /* Create working copy of etr_eacr. */ | 984 | /* Create working copy of etr_eacr. */ |
918 | eacr = etr_eacr; | 985 | eacr = etr_eacr; |
919 | 986 | ||
@@ -929,7 +996,7 @@ static void etr_work_fn(struct work_struct *work) | |||
929 | del_timer_sync(&etr_timer); | 996 | del_timer_sync(&etr_timer); |
930 | etr_update_eacr(eacr); | 997 | etr_update_eacr(eacr); |
931 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 998 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
932 | return; | 999 | goto out_unlock; |
933 | } | 1000 | } |
934 | 1001 | ||
935 | /* Store aib to get the current ETR status word. */ | 1002 | /* Store aib to get the current ETR status word. */ |
@@ -1016,7 +1083,7 @@ static void etr_work_fn(struct work_struct *work) | |||
1016 | eacr.es || sync_port < 0) { | 1083 | eacr.es || sync_port < 0) { |
1017 | etr_update_eacr(eacr); | 1084 | etr_update_eacr(eacr); |
1018 | etr_set_tolec_timeout(now); | 1085 | etr_set_tolec_timeout(now); |
1019 | return; | 1086 | goto out_unlock; |
1020 | } | 1087 | } |
1021 | 1088 | ||
1022 | /* | 1089 | /* |
@@ -1036,7 +1103,7 @@ static void etr_work_fn(struct work_struct *work) | |||
1036 | etr_update_eacr(eacr); | 1103 | etr_update_eacr(eacr); |
1037 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 1104 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
1038 | if (now < etr_tolec + (1600000 << 12) || | 1105 | if (now < etr_tolec + (1600000 << 12) || |
1039 | etr_sync_clock(&aib, sync_port) != 0) { | 1106 | etr_sync_clock_stop(&aib, sync_port) != 0) { |
1040 | /* Sync failed. Try again in 1/2 second. */ | 1107 | /* Sync failed. Try again in 1/2 second. */ |
1041 | eacr.es = 0; | 1108 | eacr.es = 0; |
1042 | etr_update_eacr(eacr); | 1109 | etr_update_eacr(eacr); |
@@ -1044,6 +1111,8 @@ static void etr_work_fn(struct work_struct *work) | |||
1044 | etr_set_sync_timeout(); | 1111 | etr_set_sync_timeout(); |
1045 | } else | 1112 | } else |
1046 | etr_set_tolec_timeout(now); | 1113 | etr_set_tolec_timeout(now); |
1114 | out_unlock: | ||
1115 | mutex_unlock(&etr_work_mutex); | ||
1047 | } | 1116 | } |
1048 | 1117 | ||
1049 | /* | 1118 | /* |
@@ -1125,13 +1194,13 @@ static ssize_t etr_online_store(struct sys_device *dev, | |||
1125 | return count; /* Nothing to do. */ | 1194 | return count; /* Nothing to do. */ |
1126 | etr_port0_online = value; | 1195 | etr_port0_online = value; |
1127 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 1196 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
1128 | schedule_work(&etr_work); | 1197 | queue_work(time_sync_wq, &etr_work); |
1129 | } else { | 1198 | } else { |
1130 | if (etr_port1_online == value) | 1199 | if (etr_port1_online == value) |
1131 | return count; /* Nothing to do. */ | 1200 | return count; /* Nothing to do. */ |
1132 | etr_port1_online = value; | 1201 | etr_port1_online = value; |
1133 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); | 1202 | set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); |
1134 | schedule_work(&etr_work); | 1203 | queue_work(time_sync_wq, &etr_work); |
1135 | } | 1204 | } |
1136 | return count; | 1205 | return count; |
1137 | } | 1206 | } |
@@ -1332,6 +1401,7 @@ static struct stp_sstpi stp_info; | |||
1332 | static void *stp_page; | 1401 | static void *stp_page; |
1333 | 1402 | ||
1334 | static void stp_work_fn(struct work_struct *work); | 1403 | static void stp_work_fn(struct work_struct *work); |
1404 | static DEFINE_MUTEX(stp_work_mutex); | ||
1335 | static DECLARE_WORK(stp_work, stp_work_fn); | 1405 | static DECLARE_WORK(stp_work, stp_work_fn); |
1336 | 1406 | ||
1337 | static int __init early_parse_stp(char *p) | 1407 | static int __init early_parse_stp(char *p) |
@@ -1356,7 +1426,8 @@ static void __init stp_reset(void) | |||
1356 | if (rc == 0) | 1426 | if (rc == 0) |
1357 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); | 1427 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); |
1358 | else if (stp_online) { | 1428 | else if (stp_online) { |
1359 | printk(KERN_WARNING "Running on non STP capable machine.\n"); | 1429 | pr_warning("The real or virtual hardware system does " |
1430 | "not provide an STP interface\n"); | ||
1360 | free_bootmem((unsigned long) stp_page, PAGE_SIZE); | 1431 | free_bootmem((unsigned long) stp_page, PAGE_SIZE); |
1361 | stp_page = NULL; | 1432 | stp_page = NULL; |
1362 | stp_online = 0; | 1433 | stp_online = 0; |
@@ -1365,8 +1436,12 @@ static void __init stp_reset(void) | |||
1365 | 1436 | ||
1366 | static int __init stp_init(void) | 1437 | static int __init stp_init(void) |
1367 | { | 1438 | { |
1368 | if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) | 1439 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
1369 | schedule_work(&stp_work); | 1440 | return 0; |
1441 | time_init_wq(); | ||
1442 | if (!stp_online) | ||
1443 | return 0; | ||
1444 | queue_work(time_sync_wq, &stp_work); | ||
1370 | return 0; | 1445 | return 0; |
1371 | } | 1446 | } |
1372 | 1447 | ||
@@ -1383,7 +1458,7 @@ arch_initcall(stp_init); | |||
1383 | static void stp_timing_alert(struct stp_irq_parm *intparm) | 1458 | static void stp_timing_alert(struct stp_irq_parm *intparm) |
1384 | { | 1459 | { |
1385 | if (intparm->tsc || intparm->lac || intparm->tcpc) | 1460 | if (intparm->tsc || intparm->lac || intparm->tcpc) |
1386 | schedule_work(&stp_work); | 1461 | queue_work(time_sync_wq, &stp_work); |
1387 | } | 1462 | } |
1388 | 1463 | ||
1389 | /* | 1464 | /* |
@@ -1397,7 +1472,7 @@ void stp_sync_check(void) | |||
1397 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | 1472 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) |
1398 | return; | 1473 | return; |
1399 | disable_sync_clock(NULL); | 1474 | disable_sync_clock(NULL); |
1400 | schedule_work(&stp_work); | 1475 | queue_work(time_sync_wq, &stp_work); |
1401 | } | 1476 | } |
1402 | 1477 | ||
1403 | /* | 1478 | /* |
@@ -1411,46 +1486,34 @@ void stp_island_check(void) | |||
1411 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | 1486 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) |
1412 | return; | 1487 | return; |
1413 | disable_sync_clock(NULL); | 1488 | disable_sync_clock(NULL); |
1414 | schedule_work(&stp_work); | 1489 | queue_work(time_sync_wq, &stp_work); |
1415 | } | 1490 | } |
1416 | 1491 | ||
1417 | /* | 1492 | |
1418 | * STP tasklet. Check for the STP state and take over the clock | 1493 | static int stp_sync_clock(void *data) |
1419 | * synchronization if the STP clock source is usable. | ||
1420 | */ | ||
1421 | static void stp_work_fn(struct work_struct *work) | ||
1422 | { | 1494 | { |
1423 | struct clock_sync_data stp_sync; | 1495 | static int first; |
1424 | unsigned long long old_clock, delta; | 1496 | unsigned long long old_clock, delta; |
1497 | struct clock_sync_data *stp_sync; | ||
1425 | int rc; | 1498 | int rc; |
1426 | 1499 | ||
1427 | if (!stp_online) { | 1500 | stp_sync = data; |
1428 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
1429 | return; | ||
1430 | } | ||
1431 | 1501 | ||
1432 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | 1502 | if (xchg(&first, 1) == 1) { |
1433 | if (rc) | 1503 | /* Slave */ |
1434 | return; | 1504 | clock_sync_cpu(stp_sync); |
1505 | return 0; | ||
1506 | } | ||
1435 | 1507 | ||
1436 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | 1508 | /* Wait until all other cpus entered the sync function. */ |
1437 | if (rc || stp_info.c == 0) | 1509 | while (atomic_read(&stp_sync->cpus) != 0) |
1438 | return; | 1510 | cpu_relax(); |
1439 | 1511 | ||
1440 | /* | ||
1441 | * Catch all other cpus and make them wait until we have | ||
1442 | * successfully synced the clock. smp_call_function will | ||
1443 | * return after all other cpus are in clock_sync_cpu_start. | ||
1444 | */ | ||
1445 | memset(&stp_sync, 0, sizeof(stp_sync)); | ||
1446 | preempt_disable(); | ||
1447 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0); | ||
1448 | local_irq_disable(); | ||
1449 | enable_sync_clock(); | 1512 | enable_sync_clock(); |
1450 | 1513 | ||
1451 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); | 1514 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); |
1452 | if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | 1515 | if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
1453 | schedule_work(&etr_work); | 1516 | queue_work(time_sync_wq, &etr_work); |
1454 | 1517 | ||
1455 | rc = 0; | 1518 | rc = 0; |
1456 | if (stp_info.todoff[0] || stp_info.todoff[1] || | 1519 | if (stp_info.todoff[0] || stp_info.todoff[1] || |
@@ -1469,16 +1532,49 @@ static void stp_work_fn(struct work_struct *work) | |||
1469 | } | 1532 | } |
1470 | if (rc) { | 1533 | if (rc) { |
1471 | disable_sync_clock(NULL); | 1534 | disable_sync_clock(NULL); |
1472 | stp_sync.in_sync = -EAGAIN; | 1535 | stp_sync->in_sync = -EAGAIN; |
1473 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); | 1536 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); |
1474 | if (etr_port0_online || etr_port1_online) | 1537 | if (etr_port0_online || etr_port1_online) |
1475 | schedule_work(&etr_work); | 1538 | queue_work(time_sync_wq, &etr_work); |
1476 | } else | 1539 | } else |
1477 | stp_sync.in_sync = 1; | 1540 | stp_sync->in_sync = 1; |
1541 | xchg(&first, 0); | ||
1542 | return 0; | ||
1543 | } | ||
1544 | |||
1545 | /* | ||
1546 | * STP work. Check for the STP state and take over the clock | ||
1547 | * synchronization if the STP clock source is usable. | ||
1548 | */ | ||
1549 | static void stp_work_fn(struct work_struct *work) | ||
1550 | { | ||
1551 | struct clock_sync_data stp_sync; | ||
1552 | int rc; | ||
1553 | |||
1554 | /* prevent multiple execution. */ | ||
1555 | mutex_lock(&stp_work_mutex); | ||
1556 | |||
1557 | if (!stp_online) { | ||
1558 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
1559 | goto out_unlock; | ||
1560 | } | ||
1561 | |||
1562 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | ||
1563 | if (rc) | ||
1564 | goto out_unlock; | ||
1565 | |||
1566 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | ||
1567 | if (rc || stp_info.c == 0) | ||
1568 | goto out_unlock; | ||
1569 | |||
1570 | memset(&stp_sync, 0, sizeof(stp_sync)); | ||
1571 | get_online_cpus(); | ||
1572 | atomic_set(&stp_sync.cpus, num_online_cpus() - 1); | ||
1573 | stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map); | ||
1574 | put_online_cpus(); | ||
1478 | 1575 | ||
1479 | local_irq_enable(); | 1576 | out_unlock: |
1480 | smp_call_function(clock_sync_cpu_end, NULL, 0); | 1577 | mutex_unlock(&stp_work_mutex); |
1481 | preempt_enable(); | ||
1482 | } | 1578 | } |
1483 | 1579 | ||
1484 | /* | 1580 | /* |
@@ -1587,7 +1683,7 @@ static ssize_t stp_online_store(struct sysdev_class *class, | |||
1587 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | 1683 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) |
1588 | return -EOPNOTSUPP; | 1684 | return -EOPNOTSUPP; |
1589 | stp_online = value; | 1685 | stp_online = value; |
1590 | schedule_work(&stp_work); | 1686 | queue_work(time_sync_wq, &stp_work); |
1591 | return count; | 1687 | return count; |
1592 | } | 1688 | } |
1593 | 1689 | ||
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 0601cd3231e4..cc362c9ea8f1 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define KMSG_COMPONENT "cpu" | ||
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
8 | |||
6 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
7 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
8 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -12,6 +15,7 @@ | |||
12 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
13 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
14 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | ||
15 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
16 | #include <asm/s390_ext.h> | 20 | #include <asm/s390_ext.h> |
17 | #include <asm/sysinfo.h> | 21 | #include <asm/sysinfo.h> |
@@ -57,11 +61,11 @@ struct core_info { | |||
57 | cpumask_t mask; | 61 | cpumask_t mask; |
58 | }; | 62 | }; |
59 | 63 | ||
64 | static int topology_enabled; | ||
60 | static void topology_work_fn(struct work_struct *work); | 65 | static void topology_work_fn(struct work_struct *work); |
61 | static struct tl_info *tl_info; | 66 | static struct tl_info *tl_info; |
62 | static struct core_info core_info; | 67 | static struct core_info core_info; |
63 | static int machine_has_topology; | 68 | static int machine_has_topology; |
64 | static int machine_has_topology_irq; | ||
65 | static struct timer_list topology_timer; | 69 | static struct timer_list topology_timer; |
66 | static void set_topology_timer(void); | 70 | static void set_topology_timer(void); |
67 | static DECLARE_WORK(topology_work, topology_work_fn); | 71 | static DECLARE_WORK(topology_work, topology_work_fn); |
@@ -77,8 +81,8 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) | |||
77 | cpumask_t mask; | 81 | cpumask_t mask; |
78 | 82 | ||
79 | cpus_clear(mask); | 83 | cpus_clear(mask); |
80 | if (!machine_has_topology) | 84 | if (!topology_enabled || !machine_has_topology) |
81 | return cpu_present_map; | 85 | return cpu_possible_map; |
82 | spin_lock_irqsave(&topology_lock, flags); | 86 | spin_lock_irqsave(&topology_lock, flags); |
83 | while (core) { | 87 | while (core) { |
84 | if (cpu_isset(cpu, core->mask)) { | 88 | if (cpu_isset(cpu, core->mask)) { |
@@ -173,7 +177,7 @@ static void topology_update_polarization_simple(void) | |||
173 | int cpu; | 177 | int cpu; |
174 | 178 | ||
175 | mutex_lock(&smp_cpu_state_mutex); | 179 | mutex_lock(&smp_cpu_state_mutex); |
176 | for_each_present_cpu(cpu) | 180 | for_each_possible_cpu(cpu) |
177 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | 181 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; |
178 | mutex_unlock(&smp_cpu_state_mutex); | 182 | mutex_unlock(&smp_cpu_state_mutex); |
179 | } | 183 | } |
@@ -204,7 +208,7 @@ int topology_set_cpu_management(int fc) | |||
204 | rc = ptf(PTF_HORIZONTAL); | 208 | rc = ptf(PTF_HORIZONTAL); |
205 | if (rc) | 209 | if (rc) |
206 | return -EBUSY; | 210 | return -EBUSY; |
207 | for_each_present_cpu(cpu) | 211 | for_each_possible_cpu(cpu) |
208 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 212 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; |
209 | return rc; | 213 | return rc; |
210 | } | 214 | } |
@@ -213,11 +217,11 @@ static void update_cpu_core_map(void) | |||
213 | { | 217 | { |
214 | int cpu; | 218 | int cpu; |
215 | 219 | ||
216 | for_each_present_cpu(cpu) | 220 | for_each_possible_cpu(cpu) |
217 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | 221 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); |
218 | } | 222 | } |
219 | 223 | ||
220 | void arch_update_cpu_topology(void) | 224 | int arch_update_cpu_topology(void) |
221 | { | 225 | { |
222 | struct tl_info *info = tl_info; | 226 | struct tl_info *info = tl_info; |
223 | struct sys_device *sysdev; | 227 | struct sys_device *sysdev; |
@@ -226,7 +230,7 @@ void arch_update_cpu_topology(void) | |||
226 | if (!machine_has_topology) { | 230 | if (!machine_has_topology) { |
227 | update_cpu_core_map(); | 231 | update_cpu_core_map(); |
228 | topology_update_polarization_simple(); | 232 | topology_update_polarization_simple(); |
229 | return; | 233 | return 0; |
230 | } | 234 | } |
231 | stsi(info, 15, 1, 2); | 235 | stsi(info, 15, 1, 2); |
232 | tl_to_cores(info); | 236 | tl_to_cores(info); |
@@ -235,11 +239,12 @@ void arch_update_cpu_topology(void) | |||
235 | sysdev = get_cpu_sysdev(cpu); | 239 | sysdev = get_cpu_sysdev(cpu); |
236 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | 240 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); |
237 | } | 241 | } |
242 | return 1; | ||
238 | } | 243 | } |
239 | 244 | ||
240 | static void topology_work_fn(struct work_struct *work) | 245 | static void topology_work_fn(struct work_struct *work) |
241 | { | 246 | { |
242 | arch_reinit_sched_domains(); | 247 | rebuild_sched_domains(); |
243 | } | 248 | } |
244 | 249 | ||
245 | void topology_schedule_update(void) | 250 | void topology_schedule_update(void) |
@@ -262,10 +267,14 @@ static void set_topology_timer(void) | |||
262 | add_timer(&topology_timer); | 267 | add_timer(&topology_timer); |
263 | } | 268 | } |
264 | 269 | ||
265 | static void topology_interrupt(__u16 code) | 270 | static int __init early_parse_topology(char *p) |
266 | { | 271 | { |
267 | schedule_work(&topology_work); | 272 | if (strncmp(p, "on", 2)) |
273 | return 0; | ||
274 | topology_enabled = 1; | ||
275 | return 0; | ||
268 | } | 276 | } |
277 | early_param("topology", early_parse_topology); | ||
269 | 278 | ||
270 | static int __init init_topology_update(void) | 279 | static int __init init_topology_update(void) |
271 | { | 280 | { |
@@ -277,14 +286,7 @@ static int __init init_topology_update(void) | |||
277 | goto out; | 286 | goto out; |
278 | } | 287 | } |
279 | init_timer_deferrable(&topology_timer); | 288 | init_timer_deferrable(&topology_timer); |
280 | if (machine_has_topology_irq) { | 289 | set_topology_timer(); |
281 | rc = register_external_interrupt(0x2005, topology_interrupt); | ||
282 | if (rc) | ||
283 | goto out; | ||
284 | ctl_set_bit(0, 8); | ||
285 | } | ||
286 | else | ||
287 | set_topology_timer(); | ||
288 | out: | 290 | out: |
289 | update_cpu_core_map(); | 291 | update_cpu_core_map(); |
290 | return rc; | 292 | return rc; |
@@ -305,9 +307,6 @@ void __init s390_init_cpu_topology(void) | |||
305 | return; | 307 | return; |
306 | machine_has_topology = 1; | 308 | machine_has_topology = 1; |
307 | 309 | ||
308 | if (facility_bits & (1ULL << 51)) | ||
309 | machine_has_topology_irq = 1; | ||
310 | |||
311 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | 310 | tl_info = alloc_bootmem_pages(PAGE_SIZE); |
312 | info = tl_info; | 311 | info = tl_info; |
313 | stsi(info, 15, 1, 2); | 312 | stsi(info, 15, 1, 2); |
@@ -316,7 +315,7 @@ void __init s390_init_cpu_topology(void) | |||
316 | for (i = 0; i < info->mnest - 2; i++) | 315 | for (i = 0; i < info->mnest - 2; i++) |
317 | nr_cores *= info->mag[NR_MAG - 3 - i]; | 316 | nr_cores *= info->mag[NR_MAG - 3 - i]; |
318 | 317 | ||
319 | printk(KERN_INFO "CPU topology:"); | 318 | pr_info("The CPU configuration topology of the machine is:"); |
320 | for (i = 0; i < NR_MAG; i++) | 319 | for (i = 0; i < NR_MAG; i++) |
321 | printk(" %d", info->mag[i]); | 320 | printk(" %d", info->mag[i]); |
322 | printk(" / %d\n", info->mnest); | 321 | printk(" / %d\n", info->mnest); |
@@ -331,5 +330,4 @@ void __init s390_init_cpu_topology(void) | |||
331 | return; | 330 | return; |
332 | error: | 331 | error: |
333 | machine_has_topology = 0; | 332 | machine_has_topology = 0; |
334 | machine_has_topology_irq = 0; | ||
335 | } | 333 | } |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c new file mode 100644 index 000000000000..10a6ccef4412 --- /dev/null +++ b/arch/s390/kernel/vdso.c | |||
@@ -0,0 +1,234 @@ | |||
1 | /* | ||
2 | * vdso setup for s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2008 | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License (version 2 only) | ||
9 | * as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/unistd.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/user.h> | ||
22 | #include <linux/elf.h> | ||
23 | #include <linux/security.h> | ||
24 | #include <linux/bootmem.h> | ||
25 | |||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/mmu.h> | ||
30 | #include <asm/mmu_context.h> | ||
31 | #include <asm/sections.h> | ||
32 | #include <asm/vdso.h> | ||
33 | |||
34 | /* Max supported size for symbol names */ | ||
35 | #define MAX_SYMNAME 64 | ||
36 | |||
37 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | ||
38 | extern char vdso32_start, vdso32_end; | ||
39 | static void *vdso32_kbase = &vdso32_start; | ||
40 | static unsigned int vdso32_pages; | ||
41 | static struct page **vdso32_pagelist; | ||
42 | #endif | ||
43 | |||
44 | #ifdef CONFIG_64BIT | ||
45 | extern char vdso64_start, vdso64_end; | ||
46 | static void *vdso64_kbase = &vdso64_start; | ||
47 | static unsigned int vdso64_pages; | ||
48 | static struct page **vdso64_pagelist; | ||
49 | #endif /* CONFIG_64BIT */ | ||
50 | |||
51 | /* | ||
52 | * Should the kernel map a VDSO page into processes and pass its | ||
53 | * address down to glibc upon exec()? | ||
54 | */ | ||
55 | unsigned int __read_mostly vdso_enabled = 1; | ||
56 | |||
57 | static int __init vdso_setup(char *s) | ||
58 | { | ||
59 | vdso_enabled = simple_strtoul(s, NULL, 0); | ||
60 | return 1; | ||
61 | } | ||
62 | __setup("vdso=", vdso_setup); | ||
63 | |||
64 | /* | ||
65 | * The vdso data page | ||
66 | */ | ||
67 | static union { | ||
68 | struct vdso_data data; | ||
69 | u8 page[PAGE_SIZE]; | ||
70 | } vdso_data_store __attribute__((__section__(".data.page_aligned"))); | ||
71 | struct vdso_data *vdso_data = &vdso_data_store.data; | ||
72 | |||
73 | /* | ||
74 | * This is called from binfmt_elf, we create the special vma for the | ||
75 | * vDSO and insert it into the mm struct tree | ||
76 | */ | ||
77 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
78 | { | ||
79 | struct mm_struct *mm = current->mm; | ||
80 | struct page **vdso_pagelist; | ||
81 | unsigned long vdso_pages; | ||
82 | unsigned long vdso_base; | ||
83 | int rc; | ||
84 | |||
85 | if (!vdso_enabled) | ||
86 | return 0; | ||
87 | /* | ||
88 | * Only map the vdso for dynamically linked elf binaries. | ||
89 | */ | ||
90 | if (!uses_interp) | ||
91 | return 0; | ||
92 | |||
93 | vdso_base = mm->mmap_base; | ||
94 | #ifdef CONFIG_64BIT | ||
95 | vdso_pagelist = vdso64_pagelist; | ||
96 | vdso_pages = vdso64_pages; | ||
97 | #ifdef CONFIG_COMPAT | ||
98 | if (test_thread_flag(TIF_31BIT)) { | ||
99 | vdso_pagelist = vdso32_pagelist; | ||
100 | vdso_pages = vdso32_pages; | ||
101 | } | ||
102 | #endif | ||
103 | #else | ||
104 | vdso_pagelist = vdso32_pagelist; | ||
105 | vdso_pages = vdso32_pages; | ||
106 | #endif | ||
107 | |||
108 | /* | ||
109 | * vDSO has a problem and was disabled, just don't "enable" it for | ||
110 | * the process | ||
111 | */ | ||
112 | if (vdso_pages == 0) | ||
113 | return 0; | ||
114 | |||
115 | current->mm->context.vdso_base = 0; | ||
116 | |||
117 | /* | ||
118 | * pick a base address for the vDSO in process space. We try to put | ||
119 | * it at vdso_base which is the "natural" base for it, but we might | ||
120 | * fail and end up putting it elsewhere. | ||
121 | */ | ||
122 | down_write(&mm->mmap_sem); | ||
123 | vdso_base = get_unmapped_area(NULL, vdso_base, | ||
124 | vdso_pages << PAGE_SHIFT, 0, 0); | ||
125 | if (IS_ERR_VALUE(vdso_base)) { | ||
126 | rc = vdso_base; | ||
127 | goto out_up; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * our vma flags don't have VM_WRITE so by default, the process | ||
132 | * isn't allowed to write those pages. | ||
133 | * gdb can break that with ptrace interface, and thus trigger COW | ||
134 | * on those pages but it's then your responsibility to never do that | ||
135 | * on the "data" page of the vDSO or you'll stop getting kernel | ||
136 | * updates and your nice userland gettimeofday will be totally dead. | ||
137 | * It's fine to use that for setting breakpoints in the vDSO code | ||
138 | * pages though | ||
139 | * | ||
140 | * Make sure the vDSO gets into every core dump. | ||
141 | * Dumping its contents makes post-mortem fully interpretable later | ||
142 | * without matching up the same kernel and hardware config to see | ||
143 | * what PC values meant. | ||
144 | */ | ||
145 | rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, | ||
146 | VM_READ|VM_EXEC| | ||
147 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
148 | VM_ALWAYSDUMP, | ||
149 | vdso_pagelist); | ||
150 | if (rc) | ||
151 | goto out_up; | ||
152 | |||
153 | /* Put vDSO base into mm struct */ | ||
154 | current->mm->context.vdso_base = vdso_base; | ||
155 | |||
156 | up_write(&mm->mmap_sem); | ||
157 | return 0; | ||
158 | |||
159 | out_up: | ||
160 | up_write(&mm->mmap_sem); | ||
161 | return rc; | ||
162 | } | ||
163 | |||
164 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
165 | { | ||
166 | if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base) | ||
167 | return "[vdso]"; | ||
168 | return NULL; | ||
169 | } | ||
170 | |||
171 | static int __init vdso_init(void) | ||
172 | { | ||
173 | int i; | ||
174 | |||
175 | #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT) | ||
176 | /* Calculate the size of the 32 bit vDSO */ | ||
177 | vdso32_pages = ((&vdso32_end - &vdso32_start | ||
178 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | ||
179 | |||
180 | /* Make sure pages are in the correct state */ | ||
181 | vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1), | ||
182 | GFP_KERNEL); | ||
183 | BUG_ON(vdso32_pagelist == NULL); | ||
184 | for (i = 0; i < vdso32_pages - 1; i++) { | ||
185 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); | ||
186 | ClearPageReserved(pg); | ||
187 | get_page(pg); | ||
188 | vdso32_pagelist[i] = pg; | ||
189 | } | ||
190 | vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); | ||
191 | vdso32_pagelist[vdso32_pages] = NULL; | ||
192 | #endif | ||
193 | |||
194 | #ifdef CONFIG_64BIT | ||
195 | /* Calculate the size of the 64 bit vDSO */ | ||
196 | vdso64_pages = ((&vdso64_end - &vdso64_start | ||
197 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; | ||
198 | |||
199 | /* Make sure pages are in the correct state */ | ||
200 | vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1), | ||
201 | GFP_KERNEL); | ||
202 | BUG_ON(vdso64_pagelist == NULL); | ||
203 | for (i = 0; i < vdso64_pages - 1; i++) { | ||
204 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); | ||
205 | ClearPageReserved(pg); | ||
206 | get_page(pg); | ||
207 | vdso64_pagelist[i] = pg; | ||
208 | } | ||
209 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); | ||
210 | vdso64_pagelist[vdso64_pages] = NULL; | ||
211 | #endif /* CONFIG_64BIT */ | ||
212 | |||
213 | get_page(virt_to_page(vdso_data)); | ||
214 | |||
215 | smp_wmb(); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | arch_initcall(vdso_init); | ||
220 | |||
221 | int in_gate_area_no_task(unsigned long addr) | ||
222 | { | ||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | int in_gate_area(struct task_struct *task, unsigned long addr) | ||
227 | { | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | ||
232 | { | ||
233 | return NULL; | ||
234 | } | ||
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile new file mode 100644 index 000000000000..ca78ad60ba24 --- /dev/null +++ b/arch/s390/kernel/vdso32/Makefile | |||
@@ -0,0 +1,55 @@ | |||
1 | # List of files in the vdso, has to be asm only for now | ||
2 | |||
3 | obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o | ||
4 | |||
5 | # Build rules | ||
6 | |||
7 | targets := $(obj-vdso32) vdso32.so vdso32.so.dbg | ||
8 | obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) | ||
9 | |||
10 | KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS)) | ||
11 | KBUILD_AFLAGS_31 += -m31 -s | ||
12 | |||
13 | KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) | ||
14 | KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin | ||
15 | KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ | ||
16 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
17 | |||
18 | $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) | ||
19 | $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) | ||
20 | |||
21 | obj-y += vdso32_wrapper.o | ||
22 | extra-y += vdso32.lds | ||
23 | CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) | ||
24 | |||
25 | # Force dependency (incbin is bad) | ||
26 | $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so | ||
27 | |||
28 | # link rule for the .so file, .lds has to be first | ||
29 | $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) | ||
30 | $(call if_changed,vdso32ld) | ||
31 | |||
32 | # strip rule for the .so file | ||
33 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
34 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
35 | $(call if_changed,objcopy) | ||
36 | |||
37 | # assembly rules for the .S files | ||
38 | $(obj-vdso32): %.o: %.S | ||
39 | $(call if_changed_dep,vdso32as) | ||
40 | |||
41 | # actual build commands | ||
42 | quiet_cmd_vdso32ld = VDSO32L $@ | ||
43 | cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | ||
44 | quiet_cmd_vdso32as = VDSO32A $@ | ||
45 | cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< | ||
46 | |||
47 | # install commands for the unstripped file | ||
48 | quiet_cmd_vdso_install = INSTALL $@ | ||
49 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
50 | |||
51 | vdso32.so: $(obj)/vdso32.so.dbg | ||
52 | @mkdir -p $(MODLIB)/vdso | ||
53 | $(call cmd,vdso_install) | ||
54 | |||
55 | vdso_install: vdso32.so | ||
diff --git a/arch/s390/kernel/vdso32/clock_getres.S b/arch/s390/kernel/vdso32/clock_getres.S new file mode 100644 index 000000000000..9532c4e6a9d2 --- /dev/null +++ b/arch/s390/kernel/vdso32/clock_getres.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_getres() for 32 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_getres | ||
19 | .type __kernel_clock_getres,@function | ||
20 | __kernel_clock_getres: | ||
21 | .cfi_startproc | ||
22 | chi %r2,CLOCK_REALTIME | ||
23 | je 0f | ||
24 | chi %r2,CLOCK_MONOTONIC | ||
25 | jne 3f | ||
26 | 0: ltr %r3,%r3 | ||
27 | jz 2f /* res == NULL */ | ||
28 | basr %r1,0 | ||
29 | 1: l %r0,4f-1b(%r1) | ||
30 | xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ | ||
31 | st %r0,4(%r3) /* store tp->tv_usec */ | ||
32 | 2: lhi %r2,0 | ||
33 | br %r14 | ||
34 | 3: lhi %r1,__NR_clock_getres /* fallback to svc */ | ||
35 | svc 0 | ||
36 | br %r14 | ||
37 | 4: .long CLOCK_REALTIME_RES | ||
38 | .cfi_endproc | ||
39 | .size __kernel_clock_getres,.-__kernel_clock_getres | ||
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S new file mode 100644 index 000000000000..4a98909a8310 --- /dev/null +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_gettime() for 32 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_gettime | ||
19 | .type __kernel_clock_gettime,@function | ||
20 | __kernel_clock_gettime: | ||
21 | .cfi_startproc | ||
22 | basr %r5,0 | ||
23 | 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ | ||
24 | chi %r2,CLOCK_REALTIME | ||
25 | je 10f | ||
26 | chi %r2,CLOCK_MONOTONIC | ||
27 | jne 19f | ||
28 | |||
29 | /* CLOCK_MONOTONIC */ | ||
30 | ltr %r3,%r3 | ||
31 | jz 9f /* tp == NULL */ | ||
32 | 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | ||
33 | tml %r4,0x0001 /* pending update ? loop */ | ||
34 | jnz 1b | ||
35 | stck 24(%r15) /* Store TOD clock */ | ||
36 | lm %r0,%r1,24(%r15) | ||
37 | s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
38 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | ||
39 | brc 3,2f | ||
40 | ahi %r0,-1 | ||
41 | 2: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | ||
42 | lr %r2,%r0 | ||
43 | lhi %r0,1000 | ||
44 | ltr %r1,%r1 | ||
45 | mr %r0,%r0 | ||
46 | jnm 3f | ||
47 | ahi %r0,1000 | ||
48 | 3: alr %r0,%r2 | ||
49 | srdl %r0,12 | ||
50 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
51 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
52 | brc 12,4f | ||
53 | ahi %r0,1 | ||
54 | 4: l %r2,__VDSO_XTIME_SEC+4(%r5) | ||
55 | al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ | ||
56 | al %r1,__VDSO_WTOM_NSEC+4(%r5) | ||
57 | brc 12,5f | ||
58 | ahi %r0,1 | ||
59 | 5: al %r2,__VDSO_WTOM_SEC+4(%r5) | ||
60 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | ||
61 | jne 1b | ||
62 | basr %r5,0 | ||
63 | 6: ltr %r0,%r0 | ||
64 | jnz 7f | ||
65 | cl %r1,20f-6b(%r5) | ||
66 | jl 8f | ||
67 | 7: ahi %r2,1 | ||
68 | sl %r1,20f-6b(%r5) | ||
69 | brc 3,6b | ||
70 | ahi %r0,-1 | ||
71 | j 6b | ||
72 | 8: st %r2,0(%r3) /* store tp->tv_sec */ | ||
73 | st %r1,4(%r3) /* store tp->tv_nsec */ | ||
74 | 9: lhi %r2,0 | ||
75 | br %r14 | ||
76 | |||
77 | /* CLOCK_REALTIME */ | ||
78 | 10: ltr %r3,%r3 /* tp == NULL */ | ||
79 | jz 18f | ||
80 | 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | ||
81 | tml %r4,0x0001 /* pending update ? loop */ | ||
82 | jnz 11b | ||
83 | stck 24(%r15) /* Store TOD clock */ | ||
84 | lm %r0,%r1,24(%r15) | ||
85 | s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
86 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | ||
87 | brc 3,12f | ||
88 | ahi %r0,-1 | ||
89 | 12: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | ||
90 | lr %r2,%r0 | ||
91 | lhi %r0,1000 | ||
92 | ltr %r1,%r1 | ||
93 | mr %r0,%r0 | ||
94 | jnm 13f | ||
95 | ahi %r0,1000 | ||
96 | 13: alr %r0,%r2 | ||
97 | srdl %r0,12 | ||
98 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
99 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
100 | brc 12,14f | ||
101 | ahi %r0,1 | ||
102 | 14: l %r2,__VDSO_XTIME_SEC+4(%r5) | ||
103 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | ||
104 | jne 11b | ||
105 | basr %r5,0 | ||
106 | 15: ltr %r0,%r0 | ||
107 | jnz 16f | ||
108 | cl %r1,20f-15b(%r5) | ||
109 | jl 17f | ||
110 | 16: ahi %r2,1 | ||
111 | sl %r1,20f-15b(%r5) | ||
112 | brc 3,15b | ||
113 | ahi %r0,-1 | ||
114 | j 15b | ||
115 | 17: st %r2,0(%r3) /* store tp->tv_sec */ | ||
116 | st %r1,4(%r3) /* store tp->tv_nsec */ | ||
117 | 18: lhi %r2,0 | ||
118 | br %r14 | ||
119 | |||
120 | /* Fallback to system call */ | ||
121 | 19: lhi %r1,__NR_clock_gettime | ||
122 | svc 0 | ||
123 | br %r14 | ||
124 | |||
125 | 20: .long 1000000000 | ||
126 | 21: .long _vdso_data - 0b | ||
127 | .cfi_endproc | ||
128 | .size __kernel_clock_gettime,.-__kernel_clock_gettime | ||
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S new file mode 100644 index 000000000000..c32f29c3d70c --- /dev/null +++ b/arch/s390/kernel/vdso32/gettimeofday.S | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * Userland implementation of gettimeofday() for 32 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | #include <asm/vdso.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/unistd.h> | ||
19 | |||
20 | .text | ||
21 | .align 4 | ||
22 | .globl __kernel_gettimeofday | ||
23 | .type __kernel_gettimeofday,@function | ||
24 | __kernel_gettimeofday: | ||
25 | .cfi_startproc | ||
26 | basr %r5,0 | ||
27 | 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ | ||
28 | 1: ltr %r3,%r3 /* check if tz is NULL */ | ||
29 | je 2f | ||
30 | mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) | ||
31 | 2: ltr %r2,%r2 /* check if tv is NULL */ | ||
32 | je 10f | ||
33 | l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | ||
34 | tml %r4,0x0001 /* pending update ? loop */ | ||
35 | jnz 1b | ||
36 | stck 24(%r15) /* Store TOD clock */ | ||
37 | lm %r0,%r1,24(%r15) | ||
38 | s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
39 | sl %r1,__VDSO_XTIME_STAMP+4(%r5) | ||
40 | brc 3,3f | ||
41 | ahi %r0,-1 | ||
42 | 3: mhi %r0,1000 /* cyc2ns(clock,cycle_delta) */ | ||
43 | st %r0,24(%r15) | ||
44 | lhi %r0,1000 | ||
45 | ltr %r1,%r1 | ||
46 | mr %r0,%r0 | ||
47 | jnm 4f | ||
48 | ahi %r0,1000 | ||
49 | 4: al %r0,24(%r15) | ||
50 | srdl %r0,12 | ||
51 | al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
52 | al %r1,__VDSO_XTIME_NSEC+4(%r5) | ||
53 | brc 12,5f | ||
54 | ahi %r0,1 | ||
55 | 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) | ||
56 | cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ | ||
57 | jne 1b | ||
58 | l %r4,24(%r15) /* get tv_sec from stack */ | ||
59 | basr %r5,0 | ||
60 | 6: ltr %r0,%r0 | ||
61 | jnz 7f | ||
62 | cl %r1,11f-6b(%r5) | ||
63 | jl 8f | ||
64 | 7: ahi %r4,1 | ||
65 | sl %r1,11f-6b(%r5) | ||
66 | brc 3,6b | ||
67 | ahi %r0,-1 | ||
68 | j 6b | ||
69 | 8: st %r4,0(%r2) /* store tv->tv_sec */ | ||
70 | ltr %r1,%r1 | ||
71 | m %r0,12f-6b(%r5) | ||
72 | jnm 9f | ||
73 | al %r0,12f-6b(%r5) | ||
74 | 9: srl %r0,6 | ||
75 | st %r0,4(%r2) /* store tv->tv_usec */ | ||
76 | 10: slr %r2,%r2 | ||
77 | br %r14 | ||
78 | 11: .long 1000000000 | ||
79 | 12: .long 274877907 | ||
80 | 13: .long _vdso_data - 0b | ||
81 | .cfi_endproc | ||
82 | .size __kernel_gettimeofday,.-__kernel_gettimeofday | ||
diff --git a/arch/s390/kernel/vdso32/note.S b/arch/s390/kernel/vdso32/note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/s390/kernel/vdso32/note.S | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/elfnote.h> | ||
9 | |||
10 | ELFNOTE_START(Linux, 0, "a") | ||
11 | .long LINUX_VERSION_CODE | ||
12 | ELFNOTE_END | ||
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S new file mode 100644 index 000000000000..a8c379fa1247 --- /dev/null +++ b/arch/s390/kernel/vdso32/vdso32.lds.S | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * This is the infamous ld script for the 32 bits vdso | ||
3 | * library | ||
4 | */ | ||
5 | #include <asm/vdso.h> | ||
6 | |||
7 | OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") | ||
8 | OUTPUT_ARCH(s390:31-bit) | ||
9 | ENTRY(_start) | ||
10 | |||
11 | SECTIONS | ||
12 | { | ||
13 | . = VDSO32_LBASE + SIZEOF_HEADERS; | ||
14 | |||
15 | .hash : { *(.hash) } :text | ||
16 | .gnu.hash : { *(.gnu.hash) } | ||
17 | .dynsym : { *(.dynsym) } | ||
18 | .dynstr : { *(.dynstr) } | ||
19 | .gnu.version : { *(.gnu.version) } | ||
20 | .gnu.version_d : { *(.gnu.version_d) } | ||
21 | .gnu.version_r : { *(.gnu.version_r) } | ||
22 | |||
23 | .note : { *(.note.*) } :text :note | ||
24 | |||
25 | . = ALIGN(16); | ||
26 | .text : { | ||
27 | *(.text .stub .text.* .gnu.linkonce.t.*) | ||
28 | } :text | ||
29 | PROVIDE(__etext = .); | ||
30 | PROVIDE(_etext = .); | ||
31 | PROVIDE(etext = .); | ||
32 | |||
33 | /* | ||
34 | * Other stuff is appended to the text segment: | ||
35 | */ | ||
36 | .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||
37 | .rodata1 : { *(.rodata1) } | ||
38 | |||
39 | .dynamic : { *(.dynamic) } :text :dynamic | ||
40 | |||
41 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
42 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
43 | .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } | ||
44 | |||
45 | .rela.dyn ALIGN(8) : { *(.rela.dyn) } | ||
46 | .got ALIGN(8) : { *(.got .toc) } | ||
47 | |||
48 | _end = .; | ||
49 | PROVIDE(end = .); | ||
50 | |||
51 | /* | ||
52 | * Stabs debugging sections are here too. | ||
53 | */ | ||
54 | .stab 0 : { *(.stab) } | ||
55 | .stabstr 0 : { *(.stabstr) } | ||
56 | .stab.excl 0 : { *(.stab.excl) } | ||
57 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
58 | .stab.index 0 : { *(.stab.index) } | ||
59 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
60 | .comment 0 : { *(.comment) } | ||
61 | |||
62 | /* | ||
63 | * DWARF debug sections. | ||
64 | * Symbols in the DWARF debugging sections are relative to the | ||
65 | * beginning of the section so we begin them at 0. | ||
66 | */ | ||
67 | /* DWARF 1 */ | ||
68 | .debug 0 : { *(.debug) } | ||
69 | .line 0 : { *(.line) } | ||
70 | /* GNU DWARF 1 extensions */ | ||
71 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
72 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
73 | /* DWARF 1.1 and DWARF 2 */ | ||
74 | .debug_aranges 0 : { *(.debug_aranges) } | ||
75 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
76 | /* DWARF 2 */ | ||
77 | .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||
78 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
79 | .debug_line 0 : { *(.debug_line) } | ||
80 | .debug_frame 0 : { *(.debug_frame) } | ||
81 | .debug_str 0 : { *(.debug_str) } | ||
82 | .debug_loc 0 : { *(.debug_loc) } | ||
83 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
84 | /* SGI/MIPS DWARF 2 extensions */ | ||
85 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
86 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
87 | .debug_typenames 0 : { *(.debug_typenames) } | ||
88 | .debug_varnames 0 : { *(.debug_varnames) } | ||
89 | /* DWARF 3 */ | ||
90 | .debug_pubtypes 0 : { *(.debug_pubtypes) } | ||
91 | .debug_ranges 0 : { *(.debug_ranges) } | ||
92 | .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } | ||
93 | |||
94 | . = ALIGN(4096); | ||
95 | PROVIDE(_vdso_data = .); | ||
96 | |||
97 | /DISCARD/ : { | ||
98 | *(.note.GNU-stack) | ||
99 | *(.branch_lt) | ||
100 | *(.data .data.* .gnu.linkonce.d.* .sdata*) | ||
101 | *(.bss .sbss .dynbss .dynsbss) | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Very old versions of ld do not recognize this name token; use the constant. | ||
107 | */ | ||
108 | #define PT_GNU_EH_FRAME 0x6474e550 | ||
109 | |||
110 | /* | ||
111 | * We must supply the ELF program headers explicitly to get just one | ||
112 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
113 | */ | ||
114 | PHDRS | ||
115 | { | ||
116 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | ||
117 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
118 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
119 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * This controls what symbols we export from the DSO. | ||
124 | */ | ||
125 | VERSION | ||
126 | { | ||
127 | VDSO_VERSION_STRING { | ||
128 | global: | ||
129 | /* | ||
130 | * Has to be there for the kernel to find | ||
131 | */ | ||
132 | __kernel_gettimeofday; | ||
133 | __kernel_clock_gettime; | ||
134 | __kernel_clock_getres; | ||
135 | |||
136 | local: *; | ||
137 | }; | ||
138 | } | ||
diff --git a/arch/s390/kernel/vdso32/vdso32_wrapper.S b/arch/s390/kernel/vdso32/vdso32_wrapper.S new file mode 100644 index 000000000000..61639a89e70b --- /dev/null +++ b/arch/s390/kernel/vdso32/vdso32_wrapper.S | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/page.h> | ||
3 | |||
4 | .section ".data.page_aligned" | ||
5 | |||
6 | .globl vdso32_start, vdso32_end | ||
7 | .balign PAGE_SIZE | ||
8 | vdso32_start: | ||
9 | .incbin "arch/s390/kernel/vdso32/vdso32.so" | ||
10 | .balign PAGE_SIZE | ||
11 | vdso32_end: | ||
12 | |||
13 | .previous | ||
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile new file mode 100644 index 000000000000..6fc8e829258c --- /dev/null +++ b/arch/s390/kernel/vdso64/Makefile | |||
@@ -0,0 +1,55 @@ | |||
1 | # List of files in the vdso, has to be asm only for now | ||
2 | |||
3 | obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o | ||
4 | |||
5 | # Build rules | ||
6 | |||
7 | targets := $(obj-vdso64) vdso64.so vdso64.so.dbg | ||
8 | obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | ||
9 | |||
10 | KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) | ||
11 | KBUILD_AFLAGS_64 += -m64 -s | ||
12 | |||
13 | KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) | ||
14 | KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin | ||
15 | KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ | ||
16 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
17 | |||
18 | $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) | ||
19 | $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64) | ||
20 | |||
21 | obj-y += vdso64_wrapper.o | ||
22 | extra-y += vdso64.lds | ||
23 | CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) | ||
24 | |||
25 | # Force dependency (incbin is bad) | ||
26 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so | ||
27 | |||
28 | # link rule for the .so file, .lds has to be first | ||
29 | $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) | ||
30 | $(call if_changed,vdso64ld) | ||
31 | |||
32 | # strip rule for the .so file | ||
33 | $(obj)/%.so: OBJCOPYFLAGS := -S | ||
34 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | ||
35 | $(call if_changed,objcopy) | ||
36 | |||
37 | # assembly rules for the .S files | ||
38 | $(obj-vdso64): %.o: %.S | ||
39 | $(call if_changed_dep,vdso64as) | ||
40 | |||
41 | # actual build commands | ||
42 | quiet_cmd_vdso64ld = VDSO64L $@ | ||
43 | cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | ||
44 | quiet_cmd_vdso64as = VDSO64A $@ | ||
45 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< | ||
46 | |||
47 | # install commands for the unstripped file | ||
48 | quiet_cmd_vdso_install = INSTALL $@ | ||
49 | cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ | ||
50 | |||
51 | vdso64.so: $(obj)/vdso64.so.dbg | ||
52 | @mkdir -p $(MODLIB)/vdso | ||
53 | $(call cmd,vdso_install) | ||
54 | |||
55 | vdso_install: vdso64.so | ||
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S new file mode 100644 index 000000000000..488e31a3c0e7 --- /dev/null +++ b/arch/s390/kernel/vdso64/clock_getres.S | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_getres() for 64 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_getres | ||
19 | .type __kernel_clock_getres,@function | ||
20 | __kernel_clock_getres: | ||
21 | .cfi_startproc | ||
22 | cghi %r2,CLOCK_REALTIME | ||
23 | je 0f | ||
24 | cghi %r2,CLOCK_MONOTONIC | ||
25 | jne 2f | ||
26 | 0: ltgr %r3,%r3 | ||
27 | jz 1f /* res == NULL */ | ||
28 | larl %r1,3f | ||
29 | lg %r0,0(%r1) | ||
30 | xc 0(8,%r3),0(%r3) /* set tp->tv_sec to zero */ | ||
31 | stg %r0,8(%r3) /* store tp->tv_usec */ | ||
32 | 1: lghi %r2,0 | ||
33 | br %r14 | ||
34 | 2: lghi %r1,__NR_clock_getres /* fallback to svc */ | ||
35 | svc 0 | ||
36 | br %r14 | ||
37 | 3: .quad CLOCK_REALTIME_RES | ||
38 | .cfi_endproc | ||
39 | .size __kernel_clock_getres,.-__kernel_clock_getres | ||
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S new file mode 100644 index 000000000000..738a410b7eb2 --- /dev/null +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -0,0 +1,89 @@ | |||
1 | /* | ||
2 | * Userland implementation of clock_gettime() for 64 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_clock_gettime | ||
19 | .type __kernel_clock_gettime,@function | ||
20 | __kernel_clock_gettime: | ||
21 | .cfi_startproc | ||
22 | larl %r5,_vdso_data | ||
23 | cghi %r2,CLOCK_REALTIME | ||
24 | je 4f | ||
25 | cghi %r2,CLOCK_MONOTONIC | ||
26 | jne 9f | ||
27 | |||
28 | /* CLOCK_MONOTONIC */ | ||
29 | ltgr %r3,%r3 | ||
30 | jz 3f /* tp == NULL */ | ||
31 | 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | ||
32 | tmll %r4,0x0001 /* pending update ? loop */ | ||
33 | jnz 0b | ||
34 | stck 48(%r15) /* Store TOD clock */ | ||
35 | lg %r1,48(%r15) | ||
36 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
37 | mghi %r1,1000 | ||
38 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | ||
39 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
40 | lg %r0,__VDSO_XTIME_SEC(%r5) | ||
41 | alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ | ||
42 | alg %r0,__VDSO_WTOM_SEC(%r5) | ||
43 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | ||
44 | jne 0b | ||
45 | larl %r5,10f | ||
46 | 1: clg %r1,0(%r5) | ||
47 | jl 2f | ||
48 | slg %r1,0(%r5) | ||
49 | aghi %r0,1 | ||
50 | j 1b | ||
51 | 2: stg %r0,0(%r3) /* store tp->tv_sec */ | ||
52 | stg %r1,8(%r3) /* store tp->tv_nsec */ | ||
53 | 3: lghi %r2,0 | ||
54 | br %r14 | ||
55 | |||
56 | /* CLOCK_REALTIME */ | ||
57 | 4: ltr %r3,%r3 /* tp == NULL */ | ||
58 | jz 8f | ||
59 | 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | ||
60 | tmll %r4,0x0001 /* pending update ? loop */ | ||
61 | jnz 5b | ||
62 | stck 48(%r15) /* Store TOD clock */ | ||
63 | lg %r1,48(%r15) | ||
64 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
65 | mghi %r1,1000 | ||
66 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | ||
67 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ | ||
68 | lg %r0,__VDSO_XTIME_SEC(%r5) | ||
69 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | ||
70 | jne 5b | ||
71 | larl %r5,10f | ||
72 | 6: clg %r1,0(%r5) | ||
73 | jl 7f | ||
74 | slg %r1,0(%r5) | ||
75 | aghi %r0,1 | ||
76 | j 6b | ||
77 | 7: stg %r0,0(%r3) /* store tp->tv_sec */ | ||
78 | stg %r1,8(%r3) /* store tp->tv_nsec */ | ||
79 | 8: lghi %r2,0 | ||
80 | br %r14 | ||
81 | |||
82 | /* Fallback to system call */ | ||
83 | 9: lghi %r1,__NR_clock_gettime | ||
84 | svc 0 | ||
85 | br %r14 | ||
86 | |||
87 | 10: .quad 1000000000 | ||
88 | .cfi_endproc | ||
89 | .size __kernel_clock_gettime,.-__kernel_clock_gettime | ||
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S new file mode 100644 index 000000000000..f873e75634e1 --- /dev/null +++ b/arch/s390/kernel/vdso64/gettimeofday.S | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Userland implementation of gettimeofday() for 64 bits processes in a | ||
3 | * s390 kernel for use in the vDSO | ||
4 | * | ||
5 | * Copyright IBM Corp. 2008 | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License (version 2 only) | ||
10 | * as published by the Free Software Foundation. | ||
11 | */ | ||
12 | #include <asm/vdso.h> | ||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/unistd.h> | ||
15 | |||
16 | .text | ||
17 | .align 4 | ||
18 | .globl __kernel_gettimeofday | ||
19 | .type __kernel_gettimeofday,@function | ||
20 | __kernel_gettimeofday: | ||
21 | .cfi_startproc | ||
22 | larl %r5,_vdso_data | ||
23 | 0: ltgr %r3,%r3 /* check if tz is NULL */ | ||
24 | je 1f | ||
25 | mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) | ||
26 | 1: ltgr %r2,%r2 /* check if tv is NULL */ | ||
27 | je 4f | ||
28 | lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | ||
29 | tmll %r4,0x0001 /* pending update ? loop */ | ||
30 | jnz 0b | ||
31 | stck 48(%r15) /* Store TOD clock */ | ||
32 | lg %r1,48(%r15) | ||
33 | sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ | ||
34 | mghi %r1,1000 | ||
35 | srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ | ||
36 | alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ | ||
37 | lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ | ||
38 | clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ | ||
39 | jne 0b | ||
40 | larl %r5,5f | ||
41 | 2: clg %r1,0(%r5) | ||
42 | jl 3f | ||
43 | slg %r1,0(%r5) | ||
44 | aghi %r0,1 | ||
45 | j 2b | ||
46 | 3: stg %r0,0(%r2) /* store tv->tv_sec */ | ||
47 | slgr %r0,%r0 /* tv_nsec -> tv_usec */ | ||
48 | ml %r0,8(%r5) | ||
49 | srlg %r0,%r0,6 | ||
50 | stg %r0,8(%r2) /* store tv->tv_usec */ | ||
51 | 4: lghi %r2,0 | ||
52 | br %r14 | ||
53 | 5: .quad 1000000000 | ||
54 | .long 274877907 | ||
55 | .cfi_endproc | ||
56 | .size __kernel_gettimeofday,.-__kernel_gettimeofday | ||
diff --git a/arch/s390/kernel/vdso64/note.S b/arch/s390/kernel/vdso64/note.S new file mode 100644 index 000000000000..79a071e4357e --- /dev/null +++ b/arch/s390/kernel/vdso64/note.S | |||
@@ -0,0 +1,12 @@ | |||
1 | /* | ||
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | #include <linux/elfnote.h> | ||
9 | |||
10 | ELFNOTE_START(Linux, 0, "a") | ||
11 | .long LINUX_VERSION_CODE | ||
12 | ELFNOTE_END | ||
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S new file mode 100644 index 000000000000..9f5979d102a9 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64.lds.S | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * This is the infamous ld script for the 64 bits vdso | ||
3 | * library | ||
4 | */ | ||
5 | #include <asm/vdso.h> | ||
6 | |||
7 | OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") | ||
8 | OUTPUT_ARCH(s390:64-bit) | ||
9 | ENTRY(_start) | ||
10 | |||
11 | SECTIONS | ||
12 | { | ||
13 | . = VDSO64_LBASE + SIZEOF_HEADERS; | ||
14 | |||
15 | .hash : { *(.hash) } :text | ||
16 | .gnu.hash : { *(.gnu.hash) } | ||
17 | .dynsym : { *(.dynsym) } | ||
18 | .dynstr : { *(.dynstr) } | ||
19 | .gnu.version : { *(.gnu.version) } | ||
20 | .gnu.version_d : { *(.gnu.version_d) } | ||
21 | .gnu.version_r : { *(.gnu.version_r) } | ||
22 | |||
23 | .note : { *(.note.*) } :text :note | ||
24 | |||
25 | . = ALIGN(16); | ||
26 | .text : { | ||
27 | *(.text .stub .text.* .gnu.linkonce.t.*) | ||
28 | } :text | ||
29 | PROVIDE(__etext = .); | ||
30 | PROVIDE(_etext = .); | ||
31 | PROVIDE(etext = .); | ||
32 | |||
33 | /* | ||
34 | * Other stuff is appended to the text segment: | ||
35 | */ | ||
36 | .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } | ||
37 | .rodata1 : { *(.rodata1) } | ||
38 | |||
39 | .dynamic : { *(.dynamic) } :text :dynamic | ||
40 | |||
41 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
42 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
43 | .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } | ||
44 | |||
45 | .rela.dyn ALIGN(8) : { *(.rela.dyn) } | ||
46 | .got ALIGN(8) : { *(.got .toc) } | ||
47 | |||
48 | _end = .; | ||
49 | PROVIDE(end = .); | ||
50 | |||
51 | /* | ||
52 | * Stabs debugging sections are here too. | ||
53 | */ | ||
54 | .stab 0 : { *(.stab) } | ||
55 | .stabstr 0 : { *(.stabstr) } | ||
56 | .stab.excl 0 : { *(.stab.excl) } | ||
57 | .stab.exclstr 0 : { *(.stab.exclstr) } | ||
58 | .stab.index 0 : { *(.stab.index) } | ||
59 | .stab.indexstr 0 : { *(.stab.indexstr) } | ||
60 | .comment 0 : { *(.comment) } | ||
61 | |||
62 | /* | ||
63 | * DWARF debug sections. | ||
64 | * Symbols in the DWARF debugging sections are relative to the | ||
65 | * beginning of the section so we begin them at 0. | ||
66 | */ | ||
67 | /* DWARF 1 */ | ||
68 | .debug 0 : { *(.debug) } | ||
69 | .line 0 : { *(.line) } | ||
70 | /* GNU DWARF 1 extensions */ | ||
71 | .debug_srcinfo 0 : { *(.debug_srcinfo) } | ||
72 | .debug_sfnames 0 : { *(.debug_sfnames) } | ||
73 | /* DWARF 1.1 and DWARF 2 */ | ||
74 | .debug_aranges 0 : { *(.debug_aranges) } | ||
75 | .debug_pubnames 0 : { *(.debug_pubnames) } | ||
76 | /* DWARF 2 */ | ||
77 | .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } | ||
78 | .debug_abbrev 0 : { *(.debug_abbrev) } | ||
79 | .debug_line 0 : { *(.debug_line) } | ||
80 | .debug_frame 0 : { *(.debug_frame) } | ||
81 | .debug_str 0 : { *(.debug_str) } | ||
82 | .debug_loc 0 : { *(.debug_loc) } | ||
83 | .debug_macinfo 0 : { *(.debug_macinfo) } | ||
84 | /* SGI/MIPS DWARF 2 extensions */ | ||
85 | .debug_weaknames 0 : { *(.debug_weaknames) } | ||
86 | .debug_funcnames 0 : { *(.debug_funcnames) } | ||
87 | .debug_typenames 0 : { *(.debug_typenames) } | ||
88 | .debug_varnames 0 : { *(.debug_varnames) } | ||
89 | /* DWARF 3 */ | ||
90 | .debug_pubtypes 0 : { *(.debug_pubtypes) } | ||
91 | .debug_ranges 0 : { *(.debug_ranges) } | ||
92 | .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } | ||
93 | |||
94 | . = ALIGN(4096); | ||
95 | PROVIDE(_vdso_data = .); | ||
96 | |||
97 | /DISCARD/ : { | ||
98 | *(.note.GNU-stack) | ||
99 | *(.branch_lt) | ||
100 | *(.data .data.* .gnu.linkonce.d.* .sdata*) | ||
101 | *(.bss .sbss .dynbss .dynsbss) | ||
102 | } | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Very old versions of ld do not recognize this name token; use the constant. | ||
107 | */ | ||
108 | #define PT_GNU_EH_FRAME 0x6474e550 | ||
109 | |||
110 | /* | ||
111 | * We must supply the ELF program headers explicitly to get just one | ||
112 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
113 | */ | ||
114 | PHDRS | ||
115 | { | ||
116 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | ||
117 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
118 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
119 | eh_frame_hdr PT_GNU_EH_FRAME; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * This controls what symbols we export from the DSO. | ||
124 | */ | ||
125 | VERSION | ||
126 | { | ||
127 | VDSO_VERSION_STRING { | ||
128 | global: | ||
129 | /* | ||
130 | * Has to be there for the kernel to find | ||
131 | */ | ||
132 | __kernel_gettimeofday; | ||
133 | __kernel_clock_gettime; | ||
134 | __kernel_clock_getres; | ||
135 | |||
136 | local: *; | ||
137 | }; | ||
138 | } | ||
diff --git a/arch/s390/kernel/vdso64/vdso64_wrapper.S b/arch/s390/kernel/vdso64/vdso64_wrapper.S new file mode 100644 index 000000000000..d8e2ac14d564 --- /dev/null +++ b/arch/s390/kernel/vdso64/vdso64_wrapper.S | |||
@@ -0,0 +1,13 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <asm/page.h> | ||
3 | |||
4 | .section ".data.page_aligned" | ||
5 | |||
6 | .globl vdso64_start, vdso64_end | ||
7 | .balign PAGE_SIZE | ||
8 | vdso64_start: | ||
9 | .incbin "arch/s390/kernel/vdso64/vdso64.so" | ||
10 | .balign PAGE_SIZE | ||
11 | vdso64_end: | ||
12 | |||
13 | .previous | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 0fa5dc5d68e1..75a6e62ea973 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -27,7 +27,6 @@ | |||
27 | static ext_int_info_t ext_int_info_timer; | 27 | static ext_int_info_t ext_int_info_timer; |
28 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | 28 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); |
29 | 29 | ||
30 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
31 | /* | 30 | /* |
32 | * Update process times based on virtual cpu times stored by entry.S | 31 | * Update process times based on virtual cpu times stored by entry.S |
33 | * to the lowcore fields user_timer, system_timer & steal_clock. | 32 | * to the lowcore fields user_timer, system_timer & steal_clock. |
@@ -125,16 +124,6 @@ static inline void set_vtimer(__u64 expires) | |||
125 | /* store expire time for this CPU timer */ | 124 | /* store expire time for this CPU timer */ |
126 | __get_cpu_var(virt_cpu_timer).to_expire = expires; | 125 | __get_cpu_var(virt_cpu_timer).to_expire = expires; |
127 | } | 126 | } |
128 | #else | ||
129 | static inline void set_vtimer(__u64 expires) | ||
130 | { | ||
131 | S390_lowcore.last_update_timer = expires; | ||
132 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
133 | |||
134 | /* store expire time for this CPU timer */ | ||
135 | __get_cpu_var(virt_cpu_timer).to_expire = expires; | ||
136 | } | ||
137 | #endif | ||
138 | 127 | ||
139 | void vtime_start_cpu_timer(void) | 128 | void vtime_start_cpu_timer(void) |
140 | { | 129 | { |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 580fc64cc735..5c8457129603 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * (C) IBM Corporation 2002-2004 | 7 | * (C) IBM Corporation 2002-2004 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "extmem" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/string.h> | 14 | #include <linux/string.h> |
12 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
@@ -24,19 +27,6 @@ | |||
24 | #include <asm/cpcmd.h> | 27 | #include <asm/cpcmd.h> |
25 | #include <asm/setup.h> | 28 | #include <asm/setup.h> |
26 | 29 | ||
27 | #define DCSS_DEBUG /* Debug messages on/off */ | ||
28 | |||
29 | #define DCSS_NAME "extmem" | ||
30 | #ifdef DCSS_DEBUG | ||
31 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSS_NAME " debug:" x) | ||
32 | #else | ||
33 | #define PRINT_DEBUG(x...) do {} while (0) | ||
34 | #endif | ||
35 | #define PRINT_INFO(x...) printk(KERN_INFO DCSS_NAME " info:" x) | ||
36 | #define PRINT_WARN(x...) printk(KERN_WARNING DCSS_NAME " warning:" x) | ||
37 | #define PRINT_ERR(x...) printk(KERN_ERR DCSS_NAME " error:" x) | ||
38 | |||
39 | |||
40 | #define DCSS_LOADSHR 0x00 | 30 | #define DCSS_LOADSHR 0x00 |
41 | #define DCSS_LOADNSR 0x04 | 31 | #define DCSS_LOADNSR 0x04 |
42 | #define DCSS_PURGESEG 0x08 | 32 | #define DCSS_PURGESEG 0x08 |
@@ -286,7 +276,7 @@ query_segment_type (struct dcss_segment *seg) | |||
286 | goto out_free; | 276 | goto out_free; |
287 | } | 277 | } |
288 | if (diag_cc > 1) { | 278 | if (diag_cc > 1) { |
289 | PRINT_WARN ("segment_type: diag returned error %ld\n", vmrc); | 279 | pr_warning("Querying a DCSS type failed with rc=%ld\n", vmrc); |
290 | rc = dcss_diag_translate_rc (vmrc); | 280 | rc = dcss_diag_translate_rc (vmrc); |
291 | goto out_free; | 281 | goto out_free; |
292 | } | 282 | } |
@@ -368,7 +358,6 @@ query_segment_type (struct dcss_segment *seg) | |||
368 | * -EIO : could not perform query diagnose | 358 | * -EIO : could not perform query diagnose |
369 | * -ENOENT : no such segment | 359 | * -ENOENT : no such segment |
370 | * -ENOTSUPP: multi-part segment cannot be used with linux | 360 | * -ENOTSUPP: multi-part segment cannot be used with linux |
371 | * -ENOSPC : segment cannot be used (overlaps with storage) | ||
372 | * -ENOMEM : out of memory | 361 | * -ENOMEM : out of memory |
373 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h | 362 | * 0 .. 6 : type of segment as defined in include/asm-s390/extmem.h |
374 | */ | 363 | */ |
@@ -480,9 +469,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
480 | goto out_resource; | 469 | goto out_resource; |
481 | } | 470 | } |
482 | if (diag_cc > 1) { | 471 | if (diag_cc > 1) { |
483 | PRINT_WARN ("segment_load: could not load segment %s - " | 472 | pr_warning("Loading DCSS %s failed with rc=%ld\n", name, |
484 | "diag returned error (%ld)\n", | 473 | end_addr); |
485 | name, end_addr); | ||
486 | rc = dcss_diag_translate_rc(end_addr); | 474 | rc = dcss_diag_translate_rc(end_addr); |
487 | dcss_diag(&purgeseg_scode, seg->dcss_name, | 475 | dcss_diag(&purgeseg_scode, seg->dcss_name, |
488 | &dummy, &dummy); | 476 | &dummy, &dummy); |
@@ -496,15 +484,13 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
496 | *addr = seg->start_addr; | 484 | *addr = seg->start_addr; |
497 | *end = seg->end; | 485 | *end = seg->end; |
498 | if (do_nonshared) | 486 | if (do_nonshared) |
499 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | 487 | pr_info("DCSS %s of range %p to %p and type %s loaded as " |
500 | "type %s in non-shared mode\n", name, | 488 | "exclusive-writable\n", name, (void*) seg->start_addr, |
501 | (void*)seg->start_addr, (void*)seg->end, | 489 | (void*) seg->end, segtype_string[seg->vm_segtype]); |
502 | segtype_string[seg->vm_segtype]); | ||
503 | else { | 490 | else { |
504 | PRINT_INFO ("segment_load: loaded segment %s range %p .. %p " | 491 | pr_info("DCSS %s of range %p to %p and type %s loaded in " |
505 | "type %s in shared mode\n", name, | 492 | "shared access mode\n", name, (void*) seg->start_addr, |
506 | (void*)seg->start_addr, (void*)seg->end, | 493 | (void*) seg->end, segtype_string[seg->vm_segtype]); |
507 | segtype_string[seg->vm_segtype]); | ||
508 | } | 494 | } |
509 | goto out; | 495 | goto out; |
510 | out_resource: | 496 | out_resource: |
@@ -593,14 +579,14 @@ segment_modify_shared (char *name, int do_nonshared) | |||
593 | goto out_unlock; | 579 | goto out_unlock; |
594 | } | 580 | } |
595 | if (do_nonshared == seg->do_nonshared) { | 581 | if (do_nonshared == seg->do_nonshared) { |
596 | PRINT_INFO ("segment_modify_shared: not reloading segment %s" | 582 | pr_info("DCSS %s is already in the requested access " |
597 | " - already in requested mode\n",name); | 583 | "mode\n", name); |
598 | rc = 0; | 584 | rc = 0; |
599 | goto out_unlock; | 585 | goto out_unlock; |
600 | } | 586 | } |
601 | if (atomic_read (&seg->ref_count) != 1) { | 587 | if (atomic_read (&seg->ref_count) != 1) { |
602 | PRINT_WARN ("segment_modify_shared: not reloading segment %s - " | 588 | pr_warning("DCSS %s is in use and cannot be reloaded\n", |
603 | "segment is in use by other driver(s)\n",name); | 589 | name); |
604 | rc = -EAGAIN; | 590 | rc = -EAGAIN; |
605 | goto out_unlock; | 591 | goto out_unlock; |
606 | } | 592 | } |
@@ -613,8 +599,8 @@ segment_modify_shared (char *name, int do_nonshared) | |||
613 | seg->res->flags |= IORESOURCE_READONLY; | 599 | seg->res->flags |= IORESOURCE_READONLY; |
614 | 600 | ||
615 | if (request_resource(&iomem_resource, seg->res)) { | 601 | if (request_resource(&iomem_resource, seg->res)) { |
616 | PRINT_WARN("segment_modify_shared: could not reload segment %s" | 602 | pr_warning("DCSS %s overlaps with used memory resources " |
617 | " - overlapping resources\n", name); | 603 | "and cannot be reloaded\n", name); |
618 | rc = -EBUSY; | 604 | rc = -EBUSY; |
619 | kfree(seg->res); | 605 | kfree(seg->res); |
620 | goto out_del_mem; | 606 | goto out_del_mem; |
@@ -632,9 +618,8 @@ segment_modify_shared (char *name, int do_nonshared) | |||
632 | goto out_del_res; | 618 | goto out_del_res; |
633 | } | 619 | } |
634 | if (diag_cc > 1) { | 620 | if (diag_cc > 1) { |
635 | PRINT_WARN ("segment_modify_shared: could not reload segment %s" | 621 | pr_warning("Reloading DCSS %s failed with rc=%ld\n", name, |
636 | " - diag returned error (%ld)\n", | 622 | end_addr); |
637 | name, end_addr); | ||
638 | rc = dcss_diag_translate_rc(end_addr); | 623 | rc = dcss_diag_translate_rc(end_addr); |
639 | goto out_del_res; | 624 | goto out_del_res; |
640 | } | 625 | } |
@@ -673,8 +658,7 @@ segment_unload(char *name) | |||
673 | mutex_lock(&dcss_lock); | 658 | mutex_lock(&dcss_lock); |
674 | seg = segment_by_name (name); | 659 | seg = segment_by_name (name); |
675 | if (seg == NULL) { | 660 | if (seg == NULL) { |
676 | PRINT_ERR ("could not find segment %s in segment_unload, " | 661 | pr_err("Unloading unknown DCSS %s failed\n", name); |
677 | "please report to linux390@de.ibm.com\n",name); | ||
678 | goto out_unlock; | 662 | goto out_unlock; |
679 | } | 663 | } |
680 | if (atomic_dec_return(&seg->ref_count) != 0) | 664 | if (atomic_dec_return(&seg->ref_count) != 0) |
@@ -709,8 +693,7 @@ segment_save(char *name) | |||
709 | seg = segment_by_name (name); | 693 | seg = segment_by_name (name); |
710 | 694 | ||
711 | if (seg == NULL) { | 695 | if (seg == NULL) { |
712 | PRINT_ERR("could not find segment %s in segment_save, please " | 696 | pr_err("Saving unknown DCSS %s failed\n", name); |
713 | "report to linux390@de.ibm.com\n", name); | ||
714 | goto out; | 697 | goto out; |
715 | } | 698 | } |
716 | 699 | ||
@@ -727,14 +710,14 @@ segment_save(char *name) | |||
727 | response = 0; | 710 | response = 0; |
728 | cpcmd(cmd1, NULL, 0, &response); | 711 | cpcmd(cmd1, NULL, 0, &response); |
729 | if (response) { | 712 | if (response) { |
730 | PRINT_ERR("segment_save: DEFSEG failed with response code %i\n", | 713 | pr_err("Saving a DCSS failed with DEFSEG response code " |
731 | response); | 714 | "%i\n", response); |
732 | goto out; | 715 | goto out; |
733 | } | 716 | } |
734 | cpcmd(cmd2, NULL, 0, &response); | 717 | cpcmd(cmd2, NULL, 0, &response); |
735 | if (response) { | 718 | if (response) { |
736 | PRINT_ERR("segment_save: SAVESEG failed with response code %i\n", | 719 | pr_err("Saving a DCSS failed with SAVESEG response code " |
737 | response); | 720 | "%i\n", response); |
738 | goto out; | 721 | goto out; |
739 | } | 722 | } |
740 | out: | 723 | out: |
@@ -749,44 +732,41 @@ void segment_warning(int rc, char *seg_name) | |||
749 | { | 732 | { |
750 | switch (rc) { | 733 | switch (rc) { |
751 | case -ENOENT: | 734 | case -ENOENT: |
752 | PRINT_WARN("cannot load/query segment %s, " | 735 | pr_err("DCSS %s cannot be loaded or queried\n", seg_name); |
753 | "does not exist\n", seg_name); | ||
754 | break; | 736 | break; |
755 | case -ENOSYS: | 737 | case -ENOSYS: |
756 | PRINT_WARN("cannot load/query segment %s, " | 738 | pr_err("DCSS %s cannot be loaded or queried without " |
757 | "not running on VM\n", seg_name); | 739 | "z/VM\n", seg_name); |
758 | break; | 740 | break; |
759 | case -EIO: | 741 | case -EIO: |
760 | PRINT_WARN("cannot load/query segment %s, " | 742 | pr_err("Loading or querying DCSS %s resulted in a " |
761 | "hardware error\n", seg_name); | 743 | "hardware error\n", seg_name); |
762 | break; | 744 | break; |
763 | case -ENOTSUPP: | 745 | case -ENOTSUPP: |
764 | PRINT_WARN("cannot load/query segment %s, " | 746 | pr_err("DCSS %s has multiple page ranges and cannot be " |
765 | "is a multi-part segment\n", seg_name); | 747 | "loaded or queried\n", seg_name); |
766 | break; | 748 | break; |
767 | case -ENOSPC: | 749 | case -ENOSPC: |
768 | PRINT_WARN("cannot load/query segment %s, " | 750 | pr_err("DCSS %s overlaps with used storage and cannot " |
769 | "overlaps with storage\n", seg_name); | 751 | "be loaded\n", seg_name); |
770 | break; | 752 | break; |
771 | case -EBUSY: | 753 | case -EBUSY: |
772 | PRINT_WARN("cannot load/query segment %s, " | 754 | pr_err("%s needs used memory resources and cannot be " |
773 | "overlaps with already loaded dcss\n", seg_name); | 755 | "loaded or queried\n", seg_name); |
774 | break; | 756 | break; |
775 | case -EPERM: | 757 | case -EPERM: |
776 | PRINT_WARN("cannot load/query segment %s, " | 758 | pr_err("DCSS %s is already loaded in a different access " |
777 | "already loaded in incompatible mode\n", seg_name); | 759 | "mode\n", seg_name); |
778 | break; | 760 | break; |
779 | case -ENOMEM: | 761 | case -ENOMEM: |
780 | PRINT_WARN("cannot load/query segment %s, " | 762 | pr_err("There is not enough memory to load or query " |
781 | "out of memory\n", seg_name); | 763 | "DCSS %s\n", seg_name); |
782 | break; | 764 | break; |
783 | case -ERANGE: | 765 | case -ERANGE: |
784 | PRINT_WARN("cannot load/query segment %s, " | 766 | pr_err("DCSS %s exceeds the kernel mapping range (%lu) " |
785 | "exceeds kernel mapping range\n", seg_name); | 767 | "and cannot be loaded\n", seg_name, VMEM_MAX_PHYS); |
786 | break; | 768 | break; |
787 | default: | 769 | default: |
788 | PRINT_WARN("cannot load/query segment %s, " | ||
789 | "return value %i\n", seg_name, rc); | ||
790 | break; | 770 | break; |
791 | } | 771 | } |
792 | } | 772 | } |