diff options
Diffstat (limited to 'arch/s390')
30 files changed, 873 insertions, 367 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1831833c430e..f6a68e178fc5 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -3,6 +3,10 @@ | |||
3 | # see Documentation/kbuild/kconfig-language.txt. | 3 | # see Documentation/kbuild/kconfig-language.txt. |
4 | # | 4 | # |
5 | 5 | ||
6 | config SCHED_MC | ||
7 | def_bool y | ||
8 | depends on SMP | ||
9 | |||
6 | config MMU | 10 | config MMU |
7 | def_bool y | 11 | def_bool y |
8 | 12 | ||
@@ -39,6 +43,9 @@ config GENERIC_HWEIGHT | |||
39 | config GENERIC_TIME | 43 | config GENERIC_TIME |
40 | def_bool y | 44 | def_bool y |
41 | 45 | ||
46 | config GENERIC_CLOCKEVENTS | ||
47 | def_bool y | ||
48 | |||
42 | config GENERIC_BUG | 49 | config GENERIC_BUG |
43 | bool | 50 | bool |
44 | depends on BUG | 51 | depends on BUG |
@@ -69,6 +76,8 @@ menu "Base setup" | |||
69 | 76 | ||
70 | comment "Processor type and features" | 77 | comment "Processor type and features" |
71 | 78 | ||
79 | source "kernel/time/Kconfig" | ||
80 | |||
72 | config 64BIT | 81 | config 64BIT |
73 | bool "64 bit kernel" | 82 | bool "64 bit kernel" |
74 | help | 83 | help |
@@ -301,10 +310,7 @@ config QDIO | |||
301 | tristate "QDIO support" | 310 | tristate "QDIO support" |
302 | ---help--- | 311 | ---help--- |
303 | This driver provides the Queued Direct I/O base support for | 312 | This driver provides the Queued Direct I/O base support for |
304 | IBM mainframes. | 313 | IBM System z. |
305 | |||
306 | For details please refer to the documentation provided by IBM at | ||
307 | <http://www10.software.ibm.com/developerworks/opensource/linux390> | ||
308 | 314 | ||
309 | To compile this driver as a module, choose M here: the | 315 | To compile this driver as a module, choose M here: the |
310 | module will be called qdio. | 316 | module will be called qdio. |
@@ -486,25 +492,6 @@ config APPLDATA_NET_SUM | |||
486 | 492 | ||
487 | source kernel/Kconfig.hz | 493 | source kernel/Kconfig.hz |
488 | 494 | ||
489 | config NO_IDLE_HZ | ||
490 | bool "No HZ timer ticks in idle" | ||
491 | help | ||
492 | Switches the regular HZ timer off when the system is going idle. | ||
493 | This helps z/VM to detect that the Linux system is idle. VM can | ||
494 | then "swap-out" this guest which reduces memory usage. It also | ||
495 | reduces the overhead of idle systems. | ||
496 | |||
497 | The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer. | ||
498 | hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ | ||
499 | timer is active. | ||
500 | |||
501 | config NO_IDLE_HZ_INIT | ||
502 | bool "HZ timer in idle off by default" | ||
503 | depends on NO_IDLE_HZ | ||
504 | help | ||
505 | The HZ timer is switched off in idle by default. That means the | ||
506 | HZ timer is already disabled at boot time. | ||
507 | |||
508 | config S390_HYPFS_FS | 495 | config S390_HYPFS_FS |
509 | bool "s390 hypervisor file system support" | 496 | bool "s390 hypervisor file system support" |
510 | select SYS_HYPERVISOR | 497 | select SYS_HYPERVISOR |
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index a3f67f8b5427..e33f32b54c08 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c | |||
@@ -499,7 +499,7 @@ static struct crypto_alg cbc_aes_alg = { | |||
499 | } | 499 | } |
500 | }; | 500 | }; |
501 | 501 | ||
502 | static int __init aes_init(void) | 502 | static int __init aes_s390_init(void) |
503 | { | 503 | { |
504 | int ret; | 504 | int ret; |
505 | 505 | ||
@@ -542,15 +542,15 @@ aes_err: | |||
542 | goto out; | 542 | goto out; |
543 | } | 543 | } |
544 | 544 | ||
545 | static void __exit aes_fini(void) | 545 | static void __exit aes_s390_fini(void) |
546 | { | 546 | { |
547 | crypto_unregister_alg(&cbc_aes_alg); | 547 | crypto_unregister_alg(&cbc_aes_alg); |
548 | crypto_unregister_alg(&ecb_aes_alg); | 548 | crypto_unregister_alg(&ecb_aes_alg); |
549 | crypto_unregister_alg(&aes_alg); | 549 | crypto_unregister_alg(&aes_alg); |
550 | } | 550 | } |
551 | 551 | ||
552 | module_init(aes_init); | 552 | module_init(aes_s390_init); |
553 | module_exit(aes_fini); | 553 | module_exit(aes_s390_fini); |
554 | 554 | ||
555 | MODULE_ALIAS("aes"); | 555 | MODULE_ALIAS("aes"); |
556 | 556 | ||
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c index ea22707f435f..4aba83b31596 100644 --- a/arch/s390/crypto/des_s390.c +++ b/arch/s390/crypto/des_s390.c | |||
@@ -550,7 +550,7 @@ static struct crypto_alg cbc_des3_192_alg = { | |||
550 | } | 550 | } |
551 | }; | 551 | }; |
552 | 552 | ||
553 | static int init(void) | 553 | static int des_s390_init(void) |
554 | { | 554 | { |
555 | int ret = 0; | 555 | int ret = 0; |
556 | 556 | ||
@@ -612,7 +612,7 @@ des_err: | |||
612 | goto out; | 612 | goto out; |
613 | } | 613 | } |
614 | 614 | ||
615 | static void __exit fini(void) | 615 | static void __exit des_s390_fini(void) |
616 | { | 616 | { |
617 | crypto_unregister_alg(&cbc_des3_192_alg); | 617 | crypto_unregister_alg(&cbc_des3_192_alg); |
618 | crypto_unregister_alg(&ecb_des3_192_alg); | 618 | crypto_unregister_alg(&ecb_des3_192_alg); |
@@ -625,8 +625,8 @@ static void __exit fini(void) | |||
625 | crypto_unregister_alg(&des_alg); | 625 | crypto_unregister_alg(&des_alg); |
626 | } | 626 | } |
627 | 627 | ||
628 | module_init(init); | 628 | module_init(des_s390_init); |
629 | module_exit(fini); | 629 | module_exit(des_s390_fini); |
630 | 630 | ||
631 | MODULE_ALIAS("des"); | 631 | MODULE_ALIAS("des"); |
632 | MODULE_ALIAS("des3_ede"); | 632 | MODULE_ALIAS("des3_ede"); |
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c index 5a834f6578ab..9cf9eca22747 100644 --- a/arch/s390/crypto/sha1_s390.c +++ b/arch/s390/crypto/sha1_s390.c | |||
@@ -137,7 +137,7 @@ static struct crypto_alg alg = { | |||
137 | .dia_final = sha1_final } } | 137 | .dia_final = sha1_final } } |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static int __init init(void) | 140 | static int __init sha1_s390_init(void) |
141 | { | 141 | { |
142 | if (!crypt_s390_func_available(KIMD_SHA_1)) | 142 | if (!crypt_s390_func_available(KIMD_SHA_1)) |
143 | return -EOPNOTSUPP; | 143 | return -EOPNOTSUPP; |
@@ -145,13 +145,13 @@ static int __init init(void) | |||
145 | return crypto_register_alg(&alg); | 145 | return crypto_register_alg(&alg); |
146 | } | 146 | } |
147 | 147 | ||
148 | static void __exit fini(void) | 148 | static void __exit sha1_s390_fini(void) |
149 | { | 149 | { |
150 | crypto_unregister_alg(&alg); | 150 | crypto_unregister_alg(&alg); |
151 | } | 151 | } |
152 | 152 | ||
153 | module_init(init); | 153 | module_init(sha1_s390_init); |
154 | module_exit(fini); | 154 | module_exit(sha1_s390_fini); |
155 | 155 | ||
156 | MODULE_ALIAS("sha1"); | 156 | MODULE_ALIAS("sha1"); |
157 | 157 | ||
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c index ccf8633c4f65..2a3d756b35d4 100644 --- a/arch/s390/crypto/sha256_s390.c +++ b/arch/s390/crypto/sha256_s390.c | |||
@@ -133,7 +133,7 @@ static struct crypto_alg alg = { | |||
133 | .dia_final = sha256_final } } | 133 | .dia_final = sha256_final } } |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static int init(void) | 136 | static int sha256_s390_init(void) |
137 | { | 137 | { |
138 | if (!crypt_s390_func_available(KIMD_SHA_256)) | 138 | if (!crypt_s390_func_available(KIMD_SHA_256)) |
139 | return -EOPNOTSUPP; | 139 | return -EOPNOTSUPP; |
@@ -141,13 +141,13 @@ static int init(void) | |||
141 | return crypto_register_alg(&alg); | 141 | return crypto_register_alg(&alg); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void __exit fini(void) | 144 | static void __exit sha256_s390_fini(void) |
145 | { | 145 | { |
146 | crypto_unregister_alg(&alg); | 146 | crypto_unregister_alg(&alg); |
147 | } | 147 | } |
148 | 148 | ||
149 | module_init(init); | 149 | module_init(sha256_s390_init); |
150 | module_exit(fini); | 150 | module_exit(sha256_s390_fini); |
151 | 151 | ||
152 | MODULE_ALIAS("sha256"); | 152 | MODULE_ALIAS("sha256"); |
153 | 153 | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 62f6b5a606dd..dcc3ec2ef643 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -3,6 +3,7 @@ | |||
3 | # Linux kernel version: 2.6.25-rc4 | 3 | # Linux kernel version: 2.6.25-rc4 |
4 | # Wed Mar 5 11:22:59 2008 | 4 | # Wed Mar 5 11:22:59 2008 |
5 | # | 5 | # |
6 | CONFIG_SCHED_MC=y | ||
6 | CONFIG_MMU=y | 7 | CONFIG_MMU=y |
7 | CONFIG_ZONE_DMA=y | 8 | CONFIG_ZONE_DMA=y |
8 | CONFIG_LOCKDEP_SUPPORT=y | 9 | CONFIG_LOCKDEP_SUPPORT=y |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index ce144b67f060..77051cd27925 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -19,7 +19,7 @@ obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | |||
19 | extra-y += head.o init_task.o vmlinux.lds | 19 | extra-y += head.o init_task.o vmlinux.lds |
20 | 20 | ||
21 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o | 21 | obj-$(CONFIG_MODULES) += s390_ksyms.o module.o |
22 | obj-$(CONFIG_SMP) += smp.o | 22 | obj-$(CONFIG_SMP) += smp.o topology.o |
23 | 23 | ||
24 | obj-$(CONFIG_AUDIT) += audit.o | 24 | obj-$(CONFIG_AUDIT) += audit.o |
25 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o | 25 | compat-obj-$(CONFIG_AUDIT) += compat_audit.o |
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h index e89f8c0c42a0..20723a062017 100644 --- a/arch/s390/kernel/compat_linux.h +++ b/arch/s390/kernel/compat_linux.h | |||
@@ -162,4 +162,77 @@ struct ucontext32 { | |||
162 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | 162 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
163 | }; | 163 | }; |
164 | 164 | ||
165 | struct __sysctl_args32; | ||
166 | struct stat64_emu31; | ||
167 | struct mmap_arg_struct_emu31; | ||
168 | struct fadvise64_64_args; | ||
169 | struct old_sigaction32; | ||
170 | struct old_sigaction32; | ||
171 | |||
172 | long sys32_chown16(const char __user * filename, u16 user, u16 group); | ||
173 | long sys32_lchown16(const char __user * filename, u16 user, u16 group); | ||
174 | long sys32_fchown16(unsigned int fd, u16 user, u16 group); | ||
175 | long sys32_setregid16(u16 rgid, u16 egid); | ||
176 | long sys32_setgid16(u16 gid); | ||
177 | long sys32_setreuid16(u16 ruid, u16 euid); | ||
178 | long sys32_setuid16(u16 uid); | ||
179 | long sys32_setresuid16(u16 ruid, u16 euid, u16 suid); | ||
180 | long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid); | ||
181 | long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid); | ||
182 | long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid); | ||
183 | long sys32_setfsuid16(u16 uid); | ||
184 | long sys32_setfsgid16(u16 gid); | ||
185 | long sys32_getgroups16(int gidsetsize, u16 __user *grouplist); | ||
186 | long sys32_setgroups16(int gidsetsize, u16 __user *grouplist); | ||
187 | long sys32_getuid16(void); | ||
188 | long sys32_geteuid16(void); | ||
189 | long sys32_getgid16(void); | ||
190 | long sys32_getegid16(void); | ||
191 | long sys32_ipc(u32 call, int first, int second, int third, u32 ptr); | ||
192 | long sys32_truncate64(const char __user * path, unsigned long high, | ||
193 | unsigned long low); | ||
194 | long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low); | ||
195 | long sys32_sched_rr_get_interval(compat_pid_t pid, | ||
196 | struct compat_timespec __user *interval); | ||
197 | long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, | ||
198 | compat_sigset_t __user *oset, size_t sigsetsize); | ||
199 | long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize); | ||
200 | long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo); | ||
201 | long sys32_execve(void); | ||
202 | long sys32_init_module(void __user *umod, unsigned long len, | ||
203 | const char __user *uargs); | ||
204 | long sys32_delete_module(const char __user *name_user, unsigned int flags); | ||
205 | long sys32_gettimeofday(struct compat_timeval __user *tv, | ||
206 | struct timezone __user *tz); | ||
207 | long sys32_settimeofday(struct compat_timeval __user *tv, | ||
208 | struct timezone __user *tz); | ||
209 | long sys32_pause(void); | ||
210 | long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
211 | u32 poshi, u32 poslo); | ||
212 | long sys32_pwrite64(unsigned int fd, const char __user *ubuf, | ||
213 | size_t count, u32 poshi, u32 poslo); | ||
214 | compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count); | ||
215 | long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | ||
216 | size_t count); | ||
217 | long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset, | ||
218 | s32 count); | ||
219 | long sys32_sysctl(struct __sysctl_args32 __user *args); | ||
220 | long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf); | ||
221 | long sys32_lstat64(char __user * filename, | ||
222 | struct stat64_emu31 __user * statbuf); | ||
223 | long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf); | ||
224 | long sys32_fstatat64(unsigned int dfd, char __user *filename, | ||
225 | struct stat64_emu31 __user* statbuf, int flag); | ||
226 | unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg); | ||
227 | long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg); | ||
228 | long sys32_read(unsigned int fd, char __user * buf, size_t count); | ||
229 | long sys32_write(unsigned int fd, char __user * buf, size_t count); | ||
230 | long sys32_clone(void); | ||
231 | long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise); | ||
232 | long sys32_fadvise64_64(struct fadvise64_64_args __user *args); | ||
233 | long sys32_sigaction(int sig, const struct old_sigaction32 __user *act, | ||
234 | struct old_sigaction32 __user *oact); | ||
235 | long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, | ||
236 | struct sigaction32 __user *oact, size_t sigsetsize); | ||
237 | long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss); | ||
165 | #endif /* _ASM_S390X_S390_H */ | 238 | #endif /* _ASM_S390X_S390_H */ |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index a5692c460bad..c7f02e777af2 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/lowcore.h> | 29 | #include <asm/lowcore.h> |
30 | #include "compat_linux.h" | 30 | #include "compat_linux.h" |
31 | #include "compat_ptrace.h" | 31 | #include "compat_ptrace.h" |
32 | #include "entry.h" | ||
32 | 33 | ||
33 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 34 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
34 | 35 | ||
@@ -428,6 +429,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
428 | /* Default to using normal stack */ | 429 | /* Default to using normal stack */ |
429 | sp = (unsigned long) A(regs->gprs[15]); | 430 | sp = (unsigned long) A(regs->gprs[15]); |
430 | 431 | ||
432 | /* Overflow on alternate signal stack gives SIGSEGV. */ | ||
433 | if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL)) | ||
434 | return (void __user *) -1UL; | ||
435 | |||
431 | /* This is the X/Open sanctioned signal stack switching. */ | 436 | /* This is the X/Open sanctioned signal stack switching. */ |
432 | if (ka->sa.sa_flags & SA_ONSTACK) { | 437 | if (ka->sa.sa_flags & SA_ONSTACK) { |
433 | if (! sas_ss_flags(sp)) | 438 | if (! sas_ss_flags(sp)) |
@@ -461,6 +466,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka, | |||
461 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) | 466 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32))) |
462 | goto give_sigsegv; | 467 | goto give_sigsegv; |
463 | 468 | ||
469 | if (frame == (void __user *) -1UL) | ||
470 | goto give_sigsegv; | ||
471 | |||
464 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) | 472 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32)) |
465 | goto give_sigsegv; | 473 | goto give_sigsegv; |
466 | 474 | ||
@@ -514,6 +522,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
514 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) | 522 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32))) |
515 | goto give_sigsegv; | 523 | goto give_sigsegv; |
516 | 524 | ||
525 | if (frame == (void __user *) -1UL) | ||
526 | goto give_sigsegv; | ||
527 | |||
517 | if (copy_siginfo_to_user32(&frame->info, info)) | 528 | if (copy_siginfo_to_user32(&frame->info, info)) |
518 | goto give_sigsegv; | 529 | goto give_sigsegv; |
519 | 530 | ||
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 1b2f5ce45320..1e7d4ac7068b 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -73,7 +73,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf, | |||
73 | static int debug_open(struct inode *inode, struct file *file); | 73 | static int debug_open(struct inode *inode, struct file *file); |
74 | static int debug_close(struct inode *inode, struct file *file); | 74 | static int debug_close(struct inode *inode, struct file *file); |
75 | static debug_info_t* debug_info_create(char *name, int pages_per_area, | 75 | static debug_info_t* debug_info_create(char *name, int pages_per_area, |
76 | int nr_areas, int buf_size); | 76 | int nr_areas, int buf_size, mode_t mode); |
77 | static void debug_info_get(debug_info_t *); | 77 | static void debug_info_get(debug_info_t *); |
78 | static void debug_info_put(debug_info_t *); | 78 | static void debug_info_put(debug_info_t *); |
79 | static int debug_prolog_level_fn(debug_info_t * id, | 79 | static int debug_prolog_level_fn(debug_info_t * id, |
@@ -157,7 +157,7 @@ struct debug_view debug_sprintf_view = { | |||
157 | }; | 157 | }; |
158 | 158 | ||
159 | /* used by dump analysis tools to determine version of debug feature */ | 159 | /* used by dump analysis tools to determine version of debug feature */ |
160 | unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION; | 160 | static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION; |
161 | 161 | ||
162 | /* static globals */ | 162 | /* static globals */ |
163 | 163 | ||
@@ -327,7 +327,8 @@ debug_info_free(debug_info_t* db_info){ | |||
327 | */ | 327 | */ |
328 | 328 | ||
329 | static debug_info_t* | 329 | static debug_info_t* |
330 | debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) | 330 | debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size, |
331 | mode_t mode) | ||
331 | { | 332 | { |
332 | debug_info_t* rc; | 333 | debug_info_t* rc; |
333 | 334 | ||
@@ -336,6 +337,8 @@ debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size) | |||
336 | if(!rc) | 337 | if(!rc) |
337 | goto out; | 338 | goto out; |
338 | 339 | ||
340 | rc->mode = mode & ~S_IFMT; | ||
341 | |||
339 | /* create root directory */ | 342 | /* create root directory */ |
340 | rc->debugfs_root_entry = debugfs_create_dir(rc->name, | 343 | rc->debugfs_root_entry = debugfs_create_dir(rc->name, |
341 | debug_debugfs_root_entry); | 344 | debug_debugfs_root_entry); |
@@ -676,23 +679,30 @@ debug_close(struct inode *inode, struct file *file) | |||
676 | } | 679 | } |
677 | 680 | ||
678 | /* | 681 | /* |
679 | * debug_register: | 682 | * debug_register_mode: |
680 | * - creates and initializes debug area for the caller | 683 | * - Creates and initializes debug area for the caller |
681 | * - returns handle for debug area | 684 | * The mode parameter allows to specify access rights for the s390dbf files |
685 | * - Returns handle for debug area | ||
682 | */ | 686 | */ |
683 | 687 | ||
684 | debug_info_t* | 688 | debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas, |
685 | debug_register (char *name, int pages_per_area, int nr_areas, int buf_size) | 689 | int buf_size, mode_t mode, uid_t uid, |
690 | gid_t gid) | ||
686 | { | 691 | { |
687 | debug_info_t *rc = NULL; | 692 | debug_info_t *rc = NULL; |
688 | 693 | ||
694 | /* Since debugfs currently does not support uid/gid other than root, */ | ||
695 | /* we do not allow gid/uid != 0 until we get support for that. */ | ||
696 | if ((uid != 0) || (gid != 0)) | ||
697 | printk(KERN_WARNING "debug: Warning - Currently only uid/gid " | ||
698 | "= 0 are supported. Using root as owner now!"); | ||
689 | if (!initialized) | 699 | if (!initialized) |
690 | BUG(); | 700 | BUG(); |
691 | mutex_lock(&debug_mutex); | 701 | mutex_lock(&debug_mutex); |
692 | 702 | ||
693 | /* create new debug_info */ | 703 | /* create new debug_info */ |
694 | 704 | ||
695 | rc = debug_info_create(name, pages_per_area, nr_areas, buf_size); | 705 | rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode); |
696 | if(!rc) | 706 | if(!rc) |
697 | goto out; | 707 | goto out; |
698 | debug_register_view(rc, &debug_level_view); | 708 | debug_register_view(rc, &debug_level_view); |
@@ -705,6 +715,20 @@ out: | |||
705 | mutex_unlock(&debug_mutex); | 715 | mutex_unlock(&debug_mutex); |
706 | return rc; | 716 | return rc; |
707 | } | 717 | } |
718 | EXPORT_SYMBOL(debug_register_mode); | ||
719 | |||
720 | /* | ||
721 | * debug_register: | ||
722 | * - creates and initializes debug area for the caller | ||
723 | * - returns handle for debug area | ||
724 | */ | ||
725 | |||
726 | debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas, | ||
727 | int buf_size) | ||
728 | { | ||
729 | return debug_register_mode(name, pages_per_area, nr_areas, buf_size, | ||
730 | S_IRUSR | S_IWUSR, 0, 0); | ||
731 | } | ||
708 | 732 | ||
709 | /* | 733 | /* |
710 | * debug_unregister: | 734 | * debug_unregister: |
@@ -1073,15 +1097,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view) | |||
1073 | int rc = 0; | 1097 | int rc = 0; |
1074 | int i; | 1098 | int i; |
1075 | unsigned long flags; | 1099 | unsigned long flags; |
1076 | mode_t mode = S_IFREG; | 1100 | mode_t mode; |
1077 | struct dentry *pde; | 1101 | struct dentry *pde; |
1078 | 1102 | ||
1079 | if (!id) | 1103 | if (!id) |
1080 | goto out; | 1104 | goto out; |
1081 | if (view->prolog_proc || view->format_proc || view->header_proc) | 1105 | mode = (id->mode | S_IFREG) & ~S_IXUGO; |
1082 | mode |= S_IRUSR; | 1106 | if (!(view->prolog_proc || view->format_proc || view->header_proc)) |
1083 | if (view->input_proc) | 1107 | mode &= ~(S_IRUSR | S_IRGRP | S_IROTH); |
1084 | mode |= S_IWUSR; | 1108 | if (!view->input_proc) |
1109 | mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH); | ||
1085 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, | 1110 | pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry, |
1086 | id , &debug_file_ops); | 1111 | id , &debug_file_ops); |
1087 | if (!pde){ | 1112 | if (!pde){ |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 01832c440636..540a67f979b6 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
22 | #include <asm/cpcmd.h> | 22 | #include <asm/cpcmd.h> |
23 | #include <asm/sclp.h> | 23 | #include <asm/sclp.h> |
24 | #include "entry.h" | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * Create a Kernel NSS if the SAVESYS= parameter is defined | 27 | * Create a Kernel NSS if the SAVESYS= parameter is defined |
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h new file mode 100644 index 000000000000..6b1896345eda --- /dev/null +++ b/arch/s390/kernel/entry.h | |||
@@ -0,0 +1,60 @@ | |||
1 | #ifndef _ENTRY_H | ||
2 | #define _ENTRY_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/signal.h> | ||
6 | #include <asm/ptrace.h> | ||
7 | |||
8 | typedef void pgm_check_handler_t(struct pt_regs *, long); | ||
9 | extern pgm_check_handler_t *pgm_check_table[128]; | ||
10 | pgm_check_handler_t do_protection_exception; | ||
11 | pgm_check_handler_t do_dat_exception; | ||
12 | |||
13 | extern int sysctl_userprocess_debug; | ||
14 | |||
15 | void do_single_step(struct pt_regs *regs); | ||
16 | void syscall_trace(struct pt_regs *regs, int entryexit); | ||
17 | void kernel_stack_overflow(struct pt_regs * regs); | ||
18 | void do_signal(struct pt_regs *regs); | ||
19 | int handle_signal32(unsigned long sig, struct k_sigaction *ka, | ||
20 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); | ||
21 | |||
22 | void do_extint(struct pt_regs *regs, unsigned short code); | ||
23 | int __cpuinit start_secondary(void *cpuvoid); | ||
24 | void __init startup_init(void); | ||
25 | void die(const char * str, struct pt_regs * regs, long err); | ||
26 | |||
27 | struct new_utsname; | ||
28 | struct mmap_arg_struct; | ||
29 | struct fadvise64_64_args; | ||
30 | struct old_sigaction; | ||
31 | struct sel_arg_struct; | ||
32 | |||
33 | long sys_pipe(unsigned long __user *fildes); | ||
34 | long sys_mmap2(struct mmap_arg_struct __user *arg); | ||
35 | long old_mmap(struct mmap_arg_struct __user *arg); | ||
36 | long sys_ipc(uint call, int first, unsigned long second, | ||
37 | unsigned long third, void __user *ptr); | ||
38 | long s390x_newuname(struct new_utsname __user *name); | ||
39 | long s390x_personality(unsigned long personality); | ||
40 | long s390_fadvise64(int fd, u32 offset_high, u32 offset_low, | ||
41 | size_t len, int advice); | ||
42 | long s390_fadvise64_64(struct fadvise64_64_args __user *args); | ||
43 | long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low); | ||
44 | long sys_fork(void); | ||
45 | long sys_clone(void); | ||
46 | long sys_vfork(void); | ||
47 | void execve_tail(void); | ||
48 | long sys_execve(void); | ||
49 | int sys_sigsuspend(int history0, int history1, old_sigset_t mask); | ||
50 | long sys_sigaction(int sig, const struct old_sigaction __user *act, | ||
51 | struct old_sigaction __user *oact); | ||
52 | long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss); | ||
53 | long sys_sigreturn(void); | ||
54 | long sys_rt_sigreturn(void); | ||
55 | long sys32_sigreturn(void); | ||
56 | long sys32_rt_sigreturn(void); | ||
57 | long old_select(struct sel_arg_struct __user *arg); | ||
58 | long sys_ptrace(long request, long pid, long addr, long data); | ||
59 | |||
60 | #endif /* _ENTRY_H */ | ||
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index efde6e178f6c..cd959c0b2e16 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -475,6 +475,7 @@ pgm_check_handler: | |||
475 | pgm_no_vtime: | 475 | pgm_no_vtime: |
476 | #endif | 476 | #endif |
477 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct | 477 | lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct |
478 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | ||
478 | TRACE_IRQS_OFF | 479 | TRACE_IRQS_OFF |
479 | lgf %r3,__LC_PGM_ILC # load program interruption code | 480 | lgf %r3,__LC_PGM_ILC # load program interruption code |
480 | lghi %r8,0x7f | 481 | lghi %r8,0x7f |
@@ -847,6 +848,7 @@ stack_overflow: | |||
847 | je 0f | 848 | je 0f |
848 | la %r1,__LC_SAVE_AREA+32 | 849 | la %r1,__LC_SAVE_AREA+32 |
849 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack | 850 | 0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack |
851 | mvc SP_ARGS(8,%r15),__LC_LAST_BREAK | ||
850 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain | 852 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain |
851 | la %r2,SP_PTREGS(%r15) # load pt_regs | 853 | la %r2,SP_PTREGS(%r15) # load pt_regs |
852 | jg kernel_stack_overflow | 854 | jg kernel_stack_overflow |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 375232c46c7a..532542447d66 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -655,7 +655,7 @@ static struct kobj_attribute reipl_type_attr = | |||
655 | 655 | ||
656 | static struct kset *reipl_kset; | 656 | static struct kset *reipl_kset; |
657 | 657 | ||
658 | void reipl_run(struct shutdown_trigger *trigger) | 658 | static void reipl_run(struct shutdown_trigger *trigger) |
659 | { | 659 | { |
660 | struct ccw_dev_id devid; | 660 | struct ccw_dev_id devid; |
661 | static char buf[100]; | 661 | static char buf[100]; |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index c5549a206284..ed04d1372d5d 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -360,7 +360,7 @@ no_kprobe: | |||
360 | * - When the probed function returns, this probe | 360 | * - When the probed function returns, this probe |
361 | * causes the handlers to fire | 361 | * causes the handlers to fire |
362 | */ | 362 | */ |
363 | void kretprobe_trampoline_holder(void) | 363 | static void __used kretprobe_trampoline_holder(void) |
364 | { | 364 | { |
365 | asm volatile(".global kretprobe_trampoline\n" | 365 | asm volatile(".global kretprobe_trampoline\n" |
366 | "kretprobe_trampoline: bcr 0,0\n"); | 366 | "kretprobe_trampoline: bcr 0,0\n"); |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index ce203154d8ce..c1aff194141d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -36,6 +36,8 @@ | |||
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/notifier.h> | 37 | #include <linux/notifier.h> |
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/tick.h> | ||
40 | #include <linux/elfcore.h> | ||
39 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
40 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
41 | #include <asm/system.h> | 43 | #include <asm/system.h> |
@@ -44,6 +46,7 @@ | |||
44 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
45 | #include <asm/timer.h> | 47 | #include <asm/timer.h> |
46 | #include <asm/cpu.h> | 48 | #include <asm/cpu.h> |
49 | #include "entry.h" | ||
47 | 50 | ||
48 | asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); | 51 | asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); |
49 | 52 | ||
@@ -76,6 +79,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
76 | * Need to know about CPUs going idle? | 79 | * Need to know about CPUs going idle? |
77 | */ | 80 | */ |
78 | static ATOMIC_NOTIFIER_HEAD(idle_chain); | 81 | static ATOMIC_NOTIFIER_HEAD(idle_chain); |
82 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
79 | 83 | ||
80 | int register_idle_notifier(struct notifier_block *nb) | 84 | int register_idle_notifier(struct notifier_block *nb) |
81 | { | 85 | { |
@@ -89,9 +93,33 @@ int unregister_idle_notifier(struct notifier_block *nb) | |||
89 | } | 93 | } |
90 | EXPORT_SYMBOL(unregister_idle_notifier); | 94 | EXPORT_SYMBOL(unregister_idle_notifier); |
91 | 95 | ||
92 | void do_monitor_call(struct pt_regs *regs, long interruption_code) | 96 | static int s390_idle_enter(void) |
97 | { | ||
98 | struct s390_idle_data *idle; | ||
99 | int nr_calls = 0; | ||
100 | void *hcpu; | ||
101 | int rc; | ||
102 | |||
103 | hcpu = (void *)(long)smp_processor_id(); | ||
104 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
105 | &nr_calls); | ||
106 | if (rc == NOTIFY_BAD) { | ||
107 | nr_calls--; | ||
108 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
109 | hcpu, nr_calls, NULL); | ||
110 | return rc; | ||
111 | } | ||
112 | idle = &__get_cpu_var(s390_idle); | ||
113 | spin_lock(&idle->lock); | ||
114 | idle->idle_count++; | ||
115 | idle->in_idle = 1; | ||
116 | idle->idle_enter = get_clock(); | ||
117 | spin_unlock(&idle->lock); | ||
118 | return NOTIFY_OK; | ||
119 | } | ||
120 | |||
121 | void s390_idle_leave(void) | ||
93 | { | 122 | { |
94 | #ifdef CONFIG_SMP | ||
95 | struct s390_idle_data *idle; | 123 | struct s390_idle_data *idle; |
96 | 124 | ||
97 | idle = &__get_cpu_var(s390_idle); | 125 | idle = &__get_cpu_var(s390_idle); |
@@ -99,10 +127,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code) | |||
99 | idle->idle_time += get_clock() - idle->idle_enter; | 127 | idle->idle_time += get_clock() - idle->idle_enter; |
100 | idle->in_idle = 0; | 128 | idle->in_idle = 0; |
101 | spin_unlock(&idle->lock); | 129 | spin_unlock(&idle->lock); |
102 | #endif | ||
103 | /* disable monitor call class 0 */ | ||
104 | __ctl_clear_bit(8, 15); | ||
105 | |||
106 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | 130 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, |
107 | (void *)(long) smp_processor_id()); | 131 | (void *)(long) smp_processor_id()); |
108 | } | 132 | } |
@@ -113,61 +137,30 @@ extern void s390_handle_mcck(void); | |||
113 | */ | 137 | */ |
114 | static void default_idle(void) | 138 | static void default_idle(void) |
115 | { | 139 | { |
116 | int cpu, rc; | ||
117 | int nr_calls = 0; | ||
118 | void *hcpu; | ||
119 | #ifdef CONFIG_SMP | ||
120 | struct s390_idle_data *idle; | ||
121 | #endif | ||
122 | |||
123 | /* CPU is going idle. */ | 140 | /* CPU is going idle. */ |
124 | cpu = smp_processor_id(); | ||
125 | hcpu = (void *)(long)cpu; | ||
126 | local_irq_disable(); | 141 | local_irq_disable(); |
127 | if (need_resched()) { | 142 | if (need_resched()) { |
128 | local_irq_enable(); | 143 | local_irq_enable(); |
129 | return; | 144 | return; |
130 | } | 145 | } |
131 | 146 | if (s390_idle_enter() == NOTIFY_BAD) { | |
132 | rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1, | ||
133 | &nr_calls); | ||
134 | if (rc == NOTIFY_BAD) { | ||
135 | nr_calls--; | ||
136 | __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
137 | hcpu, nr_calls, NULL); | ||
138 | local_irq_enable(); | 147 | local_irq_enable(); |
139 | return; | 148 | return; |
140 | } | 149 | } |
141 | |||
142 | /* enable monitor call class 0 */ | ||
143 | __ctl_set_bit(8, 15); | ||
144 | |||
145 | #ifdef CONFIG_HOTPLUG_CPU | 150 | #ifdef CONFIG_HOTPLUG_CPU |
146 | if (cpu_is_offline(cpu)) { | 151 | if (cpu_is_offline(smp_processor_id())) { |
147 | preempt_enable_no_resched(); | 152 | preempt_enable_no_resched(); |
148 | cpu_die(); | 153 | cpu_die(); |
149 | } | 154 | } |
150 | #endif | 155 | #endif |
151 | |||
152 | local_mcck_disable(); | 156 | local_mcck_disable(); |
153 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 157 | if (test_thread_flag(TIF_MCCK_PENDING)) { |
154 | local_mcck_enable(); | 158 | local_mcck_enable(); |
155 | /* disable monitor call class 0 */ | 159 | s390_idle_leave(); |
156 | __ctl_clear_bit(8, 15); | ||
157 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
158 | hcpu); | ||
159 | local_irq_enable(); | 160 | local_irq_enable(); |
160 | s390_handle_mcck(); | 161 | s390_handle_mcck(); |
161 | return; | 162 | return; |
162 | } | 163 | } |
163 | #ifdef CONFIG_SMP | ||
164 | idle = &__get_cpu_var(s390_idle); | ||
165 | spin_lock(&idle->lock); | ||
166 | idle->idle_count++; | ||
167 | idle->in_idle = 1; | ||
168 | idle->idle_enter = get_clock(); | ||
169 | spin_unlock(&idle->lock); | ||
170 | #endif | ||
171 | trace_hardirqs_on(); | 164 | trace_hardirqs_on(); |
172 | /* Wait for external, I/O or machine check interrupt. */ | 165 | /* Wait for external, I/O or machine check interrupt. */ |
173 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 166 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | |
@@ -177,9 +170,10 @@ static void default_idle(void) | |||
177 | void cpu_idle(void) | 170 | void cpu_idle(void) |
178 | { | 171 | { |
179 | for (;;) { | 172 | for (;;) { |
173 | tick_nohz_stop_sched_tick(); | ||
180 | while (!need_resched()) | 174 | while (!need_resched()) |
181 | default_idle(); | 175 | default_idle(); |
182 | 176 | tick_nohz_restart_sched_tick(); | |
183 | preempt_enable_no_resched(); | 177 | preempt_enable_no_resched(); |
184 | schedule(); | 178 | schedule(); |
185 | preempt_disable(); | 179 | preempt_disable(); |
@@ -201,6 +195,7 @@ void show_regs(struct pt_regs *regs) | |||
201 | /* Show stack backtrace if pt_regs is from kernel mode */ | 195 | /* Show stack backtrace if pt_regs is from kernel mode */ |
202 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 196 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
203 | show_trace(NULL, (unsigned long *) regs->gprs[15]); | 197 | show_trace(NULL, (unsigned long *) regs->gprs[15]); |
198 | show_last_breaking_event(regs); | ||
204 | } | 199 | } |
205 | 200 | ||
206 | extern void kernel_thread_starter(void); | 201 | extern void kernel_thread_starter(void); |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 6e036bae9875..58a064296987 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/system.h> | 41 | #include <asm/system.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/unistd.h> | 43 | #include <asm/unistd.h> |
44 | #include "entry.h" | ||
44 | 45 | ||
45 | #ifdef CONFIG_COMPAT | 46 | #ifdef CONFIG_COMPAT |
46 | #include "compat_ptrace.h" | 47 | #include "compat_ptrace.h" |
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index acf93dba7727..e019b419efc6 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c | |||
@@ -13,11 +13,12 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | 16 | #include <asm/cpu.h> | |
17 | #include <asm/lowcore.h> | 17 | #include <asm/lowcore.h> |
18 | #include <asm/s390_ext.h> | 18 | #include <asm/s390_ext.h> |
19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
20 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
21 | #include "entry.h" | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * ext_int_hash[index] is the start of the list for all external interrupts | 24 | * ext_int_hash[index] is the start of the list for all external interrupts |
@@ -119,13 +120,10 @@ void do_extint(struct pt_regs *regs, unsigned short code) | |||
119 | 120 | ||
120 | old_regs = set_irq_regs(regs); | 121 | old_regs = set_irq_regs(regs); |
121 | irq_enter(); | 122 | irq_enter(); |
122 | asm volatile ("mc 0,0"); | 123 | s390_idle_check(); |
123 | if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) | 124 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) |
124 | /** | 125 | /* Serve timer interrupts first. */ |
125 | * Make sure that the i/o interrupt did not "overtake" | 126 | clock_comparator_work(); |
126 | * the last HZ timer interrupt. | ||
127 | */ | ||
128 | account_ticks(S390_lowcore.int_clock); | ||
129 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | 127 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; |
130 | index = ext_hash(code); | 128 | index = ext_hash(code); |
131 | for (p = ext_int_hash[index]; p; p = p->next) { | 129 | for (p = ext_int_hash[index]; p; p = p->next) { |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 290e504061a3..7141147e6b63 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/pfn.h> | 39 | #include <linux/pfn.h> |
40 | #include <linux/ctype.h> | 40 | #include <linux/ctype.h> |
41 | #include <linux/reboot.h> | 41 | #include <linux/reboot.h> |
42 | #include <linux/topology.h> | ||
42 | 43 | ||
43 | #include <asm/ipl.h> | 44 | #include <asm/ipl.h> |
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -427,7 +428,7 @@ setup_lowcore(void) | |||
427 | lc->io_new_psw.mask = psw_kernel_bits; | 428 | lc->io_new_psw.mask = psw_kernel_bits; |
428 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 429 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; |
429 | lc->ipl_device = S390_lowcore.ipl_device; | 430 | lc->ipl_device = S390_lowcore.ipl_device; |
430 | lc->jiffy_timer = -1LL; | 431 | lc->clock_comparator = -1ULL; |
431 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; | 432 | lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; |
432 | lc->async_stack = (unsigned long) | 433 | lc->async_stack = (unsigned long) |
433 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; | 434 | __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; |
@@ -687,7 +688,7 @@ static __init unsigned int stfl(void) | |||
687 | return S390_lowcore.stfl_fac_list; | 688 | return S390_lowcore.stfl_fac_list; |
688 | } | 689 | } |
689 | 690 | ||
690 | static __init int stfle(unsigned long long *list, int doublewords) | 691 | static int __init __stfle(unsigned long long *list, int doublewords) |
691 | { | 692 | { |
692 | typedef struct { unsigned long long _[doublewords]; } addrtype; | 693 | typedef struct { unsigned long long _[doublewords]; } addrtype; |
693 | register unsigned long __nr asm("0") = doublewords - 1; | 694 | register unsigned long __nr asm("0") = doublewords - 1; |
@@ -697,6 +698,13 @@ static __init int stfle(unsigned long long *list, int doublewords) | |||
697 | return __nr + 1; | 698 | return __nr + 1; |
698 | } | 699 | } |
699 | 700 | ||
701 | int __init stfle(unsigned long long *list, int doublewords) | ||
702 | { | ||
703 | if (!(stfl() & (1UL << 24))) | ||
704 | return -EOPNOTSUPP; | ||
705 | return __stfle(list, doublewords); | ||
706 | } | ||
707 | |||
700 | /* | 708 | /* |
701 | * Setup hardware capabilities. | 709 | * Setup hardware capabilities. |
702 | */ | 710 | */ |
@@ -741,7 +749,7 @@ static void __init setup_hwcaps(void) | |||
741 | * HWCAP_S390_DFP bit 6. | 749 | * HWCAP_S390_DFP bit 6. |
742 | */ | 750 | */ |
743 | if ((elf_hwcap & (1UL << 2)) && | 751 | if ((elf_hwcap & (1UL << 2)) && |
744 | stfle(&facility_list_extended, 1) > 0) { | 752 | __stfle(&facility_list_extended, 1) > 0) { |
745 | if (facility_list_extended & (1ULL << (64 - 43))) | 753 | if (facility_list_extended & (1ULL << (64 - 43))) |
746 | elf_hwcap |= 1UL << 6; | 754 | elf_hwcap |= 1UL << 6; |
747 | } | 755 | } |
@@ -823,6 +831,7 @@ setup_arch(char **cmdline_p) | |||
823 | 831 | ||
824 | cpu_init(); | 832 | cpu_init(); |
825 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; | 833 | __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; |
834 | s390_init_cpu_topology(); | ||
826 | 835 | ||
827 | /* | 836 | /* |
828 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). | 837 | * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 4449bf32cbf1..b97682040215 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/ucontext.h> | 27 | #include <asm/ucontext.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/lowcore.h> | 29 | #include <asm/lowcore.h> |
30 | #include "entry.h" | ||
30 | 31 | ||
31 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
32 | 33 | ||
@@ -235,6 +236,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size) | |||
235 | /* Default to using normal stack */ | 236 | /* Default to using normal stack */ |
236 | sp = regs->gprs[15]; | 237 | sp = regs->gprs[15]; |
237 | 238 | ||
239 | /* Overflow on alternate signal stack gives SIGSEGV. */ | ||
240 | if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL)) | ||
241 | return (void __user *) -1UL; | ||
242 | |||
238 | /* This is the X/Open sanctioned signal stack switching. */ | 243 | /* This is the X/Open sanctioned signal stack switching. */ |
239 | if (ka->sa.sa_flags & SA_ONSTACK) { | 244 | if (ka->sa.sa_flags & SA_ONSTACK) { |
240 | if (! sas_ss_flags(sp)) | 245 | if (! sas_ss_flags(sp)) |
@@ -270,6 +275,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
270 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) | 275 | if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe))) |
271 | goto give_sigsegv; | 276 | goto give_sigsegv; |
272 | 277 | ||
278 | if (frame == (void __user *) -1UL) | ||
279 | goto give_sigsegv; | ||
280 | |||
273 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) | 281 | if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE)) |
274 | goto give_sigsegv; | 282 | goto give_sigsegv; |
275 | 283 | ||
@@ -327,6 +335,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
327 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) | 335 | if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe))) |
328 | goto give_sigsegv; | 336 | goto give_sigsegv; |
329 | 337 | ||
338 | if (frame == (void __user *) -1UL) | ||
339 | goto give_sigsegv; | ||
340 | |||
330 | if (copy_siginfo_to_user(&frame->info, info)) | 341 | if (copy_siginfo_to_user(&frame->info, info)) |
331 | goto give_sigsegv; | 342 | goto give_sigsegv; |
332 | 343 | ||
@@ -474,11 +485,6 @@ void do_signal(struct pt_regs *regs) | |||
474 | int ret; | 485 | int ret; |
475 | #ifdef CONFIG_COMPAT | 486 | #ifdef CONFIG_COMPAT |
476 | if (test_thread_flag(TIF_31BIT)) { | 487 | if (test_thread_flag(TIF_31BIT)) { |
477 | extern int handle_signal32(unsigned long sig, | ||
478 | struct k_sigaction *ka, | ||
479 | siginfo_t *info, | ||
480 | sigset_t *oldset, | ||
481 | struct pt_regs *regs); | ||
482 | ret = handle_signal32(signr, &ka, &info, oldset, regs); | 488 | ret = handle_signal32(signr, &ka, &info, oldset, regs); |
483 | } | 489 | } |
484 | else | 490 | else |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 8f894d380a62..0dfa988c1b26 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/lowcore.h> | 44 | #include <asm/lowcore.h> |
45 | #include <asm/sclp.h> | 45 | #include <asm/sclp.h> |
46 | #include <asm/cpu.h> | 46 | #include <asm/cpu.h> |
47 | #include "entry.h" | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * An array with a pointer the lowcore of every CPU. | 50 | * An array with a pointer the lowcore of every CPU. |
@@ -67,13 +68,12 @@ enum s390_cpu_state { | |||
67 | CPU_STATE_CONFIGURED, | 68 | CPU_STATE_CONFIGURED, |
68 | }; | 69 | }; |
69 | 70 | ||
70 | #ifdef CONFIG_HOTPLUG_CPU | 71 | DEFINE_MUTEX(smp_cpu_state_mutex); |
71 | static DEFINE_MUTEX(smp_cpu_state_mutex); | 72 | int smp_cpu_polarization[NR_CPUS]; |
72 | #endif | ||
73 | static int smp_cpu_state[NR_CPUS]; | 73 | static int smp_cpu_state[NR_CPUS]; |
74 | static int cpu_management; | ||
74 | 75 | ||
75 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 76 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
76 | DEFINE_PER_CPU(struct s390_idle_data, s390_idle); | ||
77 | 77 | ||
78 | static void smp_ext_bitcall(int, ec_bit_sig); | 78 | static void smp_ext_bitcall(int, ec_bit_sig); |
79 | 79 | ||
@@ -298,7 +298,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |||
298 | /* | 298 | /* |
299 | * this function sends a 'purge tlb' signal to another CPU. | 299 | * this function sends a 'purge tlb' signal to another CPU. |
300 | */ | 300 | */ |
301 | void smp_ptlb_callback(void *info) | 301 | static void smp_ptlb_callback(void *info) |
302 | { | 302 | { |
303 | __tlb_flush_local(); | 303 | __tlb_flush_local(); |
304 | } | 304 | } |
@@ -456,6 +456,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) | |||
456 | if (cpu_known(cpu_id)) | 456 | if (cpu_known(cpu_id)) |
457 | continue; | 457 | continue; |
458 | __cpu_logical_map[logical_cpu] = cpu_id; | 458 | __cpu_logical_map[logical_cpu] = cpu_id; |
459 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | ||
459 | if (!cpu_stopped(logical_cpu)) | 460 | if (!cpu_stopped(logical_cpu)) |
460 | continue; | 461 | continue; |
461 | cpu_set(logical_cpu, cpu_present_map); | 462 | cpu_set(logical_cpu, cpu_present_map); |
@@ -489,6 +490,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) | |||
489 | if (cpu_known(cpu_id)) | 490 | if (cpu_known(cpu_id)) |
490 | continue; | 491 | continue; |
491 | __cpu_logical_map[logical_cpu] = cpu_id; | 492 | __cpu_logical_map[logical_cpu] = cpu_id; |
493 | smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; | ||
492 | cpu_set(logical_cpu, cpu_present_map); | 494 | cpu_set(logical_cpu, cpu_present_map); |
493 | if (cpu >= info->configured) | 495 | if (cpu >= info->configured) |
494 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; | 496 | smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; |
@@ -846,6 +848,7 @@ void __init smp_prepare_boot_cpu(void) | |||
846 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; | 848 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
847 | current_set[0] = current; | 849 | current_set[0] = current; |
848 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; | 850 | smp_cpu_state[0] = CPU_STATE_CONFIGURED; |
851 | smp_cpu_polarization[0] = POLARIZATION_UNKNWN; | ||
849 | spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); | 852 | spin_lock_init(&(&__get_cpu_var(s390_idle))->lock); |
850 | } | 853 | } |
851 | 854 | ||
@@ -897,15 +900,19 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf, | |||
897 | case 0: | 900 | case 0: |
898 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { | 901 | if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) { |
899 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); | 902 | rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); |
900 | if (!rc) | 903 | if (!rc) { |
901 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; | 904 | smp_cpu_state[cpu] = CPU_STATE_STANDBY; |
905 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | ||
906 | } | ||
902 | } | 907 | } |
903 | break; | 908 | break; |
904 | case 1: | 909 | case 1: |
905 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { | 910 | if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) { |
906 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); | 911 | rc = sclp_cpu_configure(__cpu_logical_map[cpu]); |
907 | if (!rc) | 912 | if (!rc) { |
908 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; | 913 | smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; |
914 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | ||
915 | } | ||
909 | } | 916 | } |
910 | break; | 917 | break; |
911 | default: | 918 | default: |
@@ -919,6 +926,34 @@ out: | |||
919 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); | 926 | static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); |
920 | #endif /* CONFIG_HOTPLUG_CPU */ | 927 | #endif /* CONFIG_HOTPLUG_CPU */ |
921 | 928 | ||
929 | static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf) | ||
930 | { | ||
931 | int cpu = dev->id; | ||
932 | ssize_t count; | ||
933 | |||
934 | mutex_lock(&smp_cpu_state_mutex); | ||
935 | switch (smp_cpu_polarization[cpu]) { | ||
936 | case POLARIZATION_HRZ: | ||
937 | count = sprintf(buf, "horizontal\n"); | ||
938 | break; | ||
939 | case POLARIZATION_VL: | ||
940 | count = sprintf(buf, "vertical:low\n"); | ||
941 | break; | ||
942 | case POLARIZATION_VM: | ||
943 | count = sprintf(buf, "vertical:medium\n"); | ||
944 | break; | ||
945 | case POLARIZATION_VH: | ||
946 | count = sprintf(buf, "vertical:high\n"); | ||
947 | break; | ||
948 | default: | ||
949 | count = sprintf(buf, "unknown\n"); | ||
950 | break; | ||
951 | } | ||
952 | mutex_unlock(&smp_cpu_state_mutex); | ||
953 | return count; | ||
954 | } | ||
955 | static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); | ||
956 | |||
922 | static ssize_t show_cpu_address(struct sys_device *dev, char *buf) | 957 | static ssize_t show_cpu_address(struct sys_device *dev, char *buf) |
923 | { | 958 | { |
924 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); | 959 | return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); |
@@ -931,6 +966,7 @@ static struct attribute *cpu_common_attrs[] = { | |||
931 | &attr_configure.attr, | 966 | &attr_configure.attr, |
932 | #endif | 967 | #endif |
933 | &attr_address.attr, | 968 | &attr_address.attr, |
969 | &attr_polarization.attr, | ||
934 | NULL, | 970 | NULL, |
935 | }; | 971 | }; |
936 | 972 | ||
@@ -1075,11 +1111,48 @@ static ssize_t __ref rescan_store(struct sys_device *dev, | |||
1075 | out: | 1111 | out: |
1076 | put_online_cpus(); | 1112 | put_online_cpus(); |
1077 | mutex_unlock(&smp_cpu_state_mutex); | 1113 | mutex_unlock(&smp_cpu_state_mutex); |
1114 | if (!cpus_empty(newcpus)) | ||
1115 | topology_schedule_update(); | ||
1078 | return rc ? rc : count; | 1116 | return rc ? rc : count; |
1079 | } | 1117 | } |
1080 | static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); | 1118 | static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); |
1081 | #endif /* CONFIG_HOTPLUG_CPU */ | 1119 | #endif /* CONFIG_HOTPLUG_CPU */ |
1082 | 1120 | ||
1121 | static ssize_t dispatching_show(struct sys_device *dev, char *buf) | ||
1122 | { | ||
1123 | ssize_t count; | ||
1124 | |||
1125 | mutex_lock(&smp_cpu_state_mutex); | ||
1126 | count = sprintf(buf, "%d\n", cpu_management); | ||
1127 | mutex_unlock(&smp_cpu_state_mutex); | ||
1128 | return count; | ||
1129 | } | ||
1130 | |||
1131 | static ssize_t dispatching_store(struct sys_device *dev, const char *buf, | ||
1132 | size_t count) | ||
1133 | { | ||
1134 | int val, rc; | ||
1135 | char delim; | ||
1136 | |||
1137 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
1138 | return -EINVAL; | ||
1139 | if (val != 0 && val != 1) | ||
1140 | return -EINVAL; | ||
1141 | rc = 0; | ||
1142 | mutex_lock(&smp_cpu_state_mutex); | ||
1143 | get_online_cpus(); | ||
1144 | if (cpu_management == val) | ||
1145 | goto out; | ||
1146 | rc = topology_set_cpu_management(val); | ||
1147 | if (!rc) | ||
1148 | cpu_management = val; | ||
1149 | out: | ||
1150 | put_online_cpus(); | ||
1151 | mutex_unlock(&smp_cpu_state_mutex); | ||
1152 | return rc ? rc : count; | ||
1153 | } | ||
1154 | static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store); | ||
1155 | |||
1083 | static int __init topology_init(void) | 1156 | static int __init topology_init(void) |
1084 | { | 1157 | { |
1085 | int cpu; | 1158 | int cpu; |
@@ -1093,6 +1166,10 @@ static int __init topology_init(void) | |||
1093 | if (rc) | 1166 | if (rc) |
1094 | return rc; | 1167 | return rc; |
1095 | #endif | 1168 | #endif |
1169 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
1170 | &attr_dispatching.attr); | ||
1171 | if (rc) | ||
1172 | return rc; | ||
1096 | for_each_present_cpu(cpu) { | 1173 | for_each_present_cpu(cpu) { |
1097 | rc = smp_add_present_cpu(cpu); | 1174 | rc = smp_add_present_cpu(cpu); |
1098 | if (rc) | 1175 | if (rc) |
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index fefee99f28aa..988d0d64c2c8 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c | |||
@@ -29,8 +29,8 @@ | |||
29 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
30 | #include <linux/unistd.h> | 30 | #include <linux/unistd.h> |
31 | #include <linux/ipc.h> | 31 | #include <linux/ipc.h> |
32 | |||
33 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
33 | #include "entry.h" | ||
34 | 34 | ||
35 | /* | 35 | /* |
36 | * sys_pipe() is the normal C calling standard for creating | 36 | * sys_pipe() is the normal C calling standard for creating |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index cb232c155360..7aec676fefd5 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/timex.h> | 30 | #include <linux/timex.h> |
31 | #include <linux/notifier.h> | 31 | #include <linux/notifier.h> |
32 | #include <linux/clocksource.h> | 32 | #include <linux/clocksource.h> |
33 | 33 | #include <linux/clockchips.h> | |
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | #include <asm/delay.h> | 35 | #include <asm/delay.h> |
36 | #include <asm/s390_ext.h> | 36 | #include <asm/s390_ext.h> |
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/irq_regs.h> | 39 | #include <asm/irq_regs.h> |
40 | #include <asm/timer.h> | 40 | #include <asm/timer.h> |
41 | #include <asm/etr.h> | 41 | #include <asm/etr.h> |
42 | #include <asm/cio.h> | ||
42 | 43 | ||
43 | /* change this if you have some constant time drift */ | 44 | /* change this if you have some constant time drift */ |
44 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) | 45 | #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) |
@@ -57,16 +58,16 @@ | |||
57 | 58 | ||
58 | static ext_int_info_t ext_int_info_cc; | 59 | static ext_int_info_t ext_int_info_cc; |
59 | static ext_int_info_t ext_int_etr_cc; | 60 | static ext_int_info_t ext_int_etr_cc; |
60 | static u64 init_timer_cc; | ||
61 | static u64 jiffies_timer_cc; | 61 | static u64 jiffies_timer_cc; |
62 | static u64 xtime_cc; | 62 | |
63 | static DEFINE_PER_CPU(struct clock_event_device, comparators); | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Scheduler clock - returns current time in nanosec units. | 66 | * Scheduler clock - returns current time in nanosec units. |
66 | */ | 67 | */ |
67 | unsigned long long sched_clock(void) | 68 | unsigned long long sched_clock(void) |
68 | { | 69 | { |
69 | return ((get_clock() - jiffies_timer_cc) * 125) >> 9; | 70 | return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9; |
70 | } | 71 | } |
71 | 72 | ||
72 | /* | 73 | /* |
@@ -95,162 +96,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime) | |||
95 | #define s390_do_profile() do { ; } while(0) | 96 | #define s390_do_profile() do { ; } while(0) |
96 | #endif /* CONFIG_PROFILING */ | 97 | #endif /* CONFIG_PROFILING */ |
97 | 98 | ||
98 | /* | 99 | void clock_comparator_work(void) |
99 | * Advance the per cpu tick counter up to the time given with the | ||
100 | * "time" argument. The per cpu update consists of accounting | ||
101 | * the virtual cpu time, calling update_process_times and calling | ||
102 | * the profiling hook. If xtime is before time it is advanced as well. | ||
103 | */ | ||
104 | void account_ticks(u64 time) | ||
105 | { | 100 | { |
106 | __u32 ticks; | 101 | struct clock_event_device *cd; |
107 | __u64 tmp; | ||
108 | |||
109 | /* Calculate how many ticks have passed. */ | ||
110 | if (time < S390_lowcore.jiffy_timer) | ||
111 | return; | ||
112 | tmp = time - S390_lowcore.jiffy_timer; | ||
113 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */ | ||
114 | ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1; | ||
115 | S390_lowcore.jiffy_timer += | ||
116 | CLK_TICKS_PER_JIFFY * (__u64) ticks; | ||
117 | } else if (tmp >= CLK_TICKS_PER_JIFFY) { | ||
118 | ticks = 2; | ||
119 | S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY; | ||
120 | } else { | ||
121 | ticks = 1; | ||
122 | S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY; | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_SMP | ||
126 | /* | ||
127 | * Do not rely on the boot cpu to do the calls to do_timer. | ||
128 | * Spread it over all cpus instead. | ||
129 | */ | ||
130 | write_seqlock(&xtime_lock); | ||
131 | if (S390_lowcore.jiffy_timer > xtime_cc) { | ||
132 | __u32 xticks; | ||
133 | tmp = S390_lowcore.jiffy_timer - xtime_cc; | ||
134 | if (tmp >= 2*CLK_TICKS_PER_JIFFY) { | ||
135 | xticks = __div(tmp, CLK_TICKS_PER_JIFFY); | ||
136 | xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY; | ||
137 | } else { | ||
138 | xticks = 1; | ||
139 | xtime_cc += CLK_TICKS_PER_JIFFY; | ||
140 | } | ||
141 | do_timer(xticks); | ||
142 | } | ||
143 | write_sequnlock(&xtime_lock); | ||
144 | #else | ||
145 | do_timer(ticks); | ||
146 | #endif | ||
147 | |||
148 | while (ticks--) | ||
149 | update_process_times(user_mode(get_irq_regs())); | ||
150 | 102 | ||
103 | S390_lowcore.clock_comparator = -1ULL; | ||
104 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
105 | cd = &__get_cpu_var(comparators); | ||
106 | cd->event_handler(cd); | ||
151 | s390_do_profile(); | 107 | s390_do_profile(); |
152 | } | 108 | } |
153 | 109 | ||
154 | #ifdef CONFIG_NO_IDLE_HZ | ||
155 | |||
156 | #ifdef CONFIG_NO_IDLE_HZ_INIT | ||
157 | int sysctl_hz_timer = 0; | ||
158 | #else | ||
159 | int sysctl_hz_timer = 1; | ||
160 | #endif | ||
161 | |||
162 | /* | ||
163 | * Stop the HZ tick on the current CPU. | ||
164 | * Only cpu_idle may call this function. | ||
165 | */ | ||
166 | static void stop_hz_timer(void) | ||
167 | { | ||
168 | unsigned long flags; | ||
169 | unsigned long seq, next; | ||
170 | __u64 timer, todval; | ||
171 | int cpu = smp_processor_id(); | ||
172 | |||
173 | if (sysctl_hz_timer != 0) | ||
174 | return; | ||
175 | |||
176 | cpu_set(cpu, nohz_cpu_mask); | ||
177 | |||
178 | /* | ||
179 | * Leave the clock comparator set up for the next timer | ||
180 | * tick if either rcu or a softirq is pending. | ||
181 | */ | ||
182 | if (rcu_needs_cpu(cpu) || local_softirq_pending()) { | ||
183 | cpu_clear(cpu, nohz_cpu_mask); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * This cpu is going really idle. Set up the clock comparator | ||
189 | * for the next event. | ||
190 | */ | ||
191 | next = next_timer_interrupt(); | ||
192 | do { | ||
193 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
194 | timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64; | ||
195 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
196 | todval = -1ULL; | ||
197 | /* Be careful about overflows. */ | ||
198 | if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) { | ||
199 | timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY; | ||
200 | if (timer >= jiffies_timer_cc) | ||
201 | todval = timer; | ||
202 | } | ||
203 | set_clock_comparator(todval); | ||
204 | } | ||
205 | |||
206 | /* | 110 | /* |
207 | * Start the HZ tick on the current CPU. | 111 | * Fixup the clock comparator. |
208 | * Only cpu_idle may call this function. | ||
209 | */ | 112 | */ |
210 | static void start_hz_timer(void) | 113 | static void fixup_clock_comparator(unsigned long long delta) |
211 | { | 114 | { |
212 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | 115 | /* If nobody is waiting there's nothing to fix. */ |
116 | if (S390_lowcore.clock_comparator == -1ULL) | ||
213 | return; | 117 | return; |
214 | account_ticks(get_clock()); | 118 | S390_lowcore.clock_comparator += delta; |
215 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | 119 | set_clock_comparator(S390_lowcore.clock_comparator); |
216 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | ||
217 | } | ||
218 | |||
219 | static int nohz_idle_notify(struct notifier_block *self, | ||
220 | unsigned long action, void *hcpu) | ||
221 | { | ||
222 | switch (action) { | ||
223 | case S390_CPU_IDLE: | ||
224 | stop_hz_timer(); | ||
225 | break; | ||
226 | case S390_CPU_NOT_IDLE: | ||
227 | start_hz_timer(); | ||
228 | break; | ||
229 | } | ||
230 | return NOTIFY_OK; | ||
231 | } | 120 | } |
232 | 121 | ||
233 | static struct notifier_block nohz_idle_nb = { | 122 | static int s390_next_event(unsigned long delta, |
234 | .notifier_call = nohz_idle_notify, | 123 | struct clock_event_device *evt) |
235 | }; | ||
236 | |||
237 | static void __init nohz_init(void) | ||
238 | { | 124 | { |
239 | if (register_idle_notifier(&nohz_idle_nb)) | 125 | S390_lowcore.clock_comparator = get_clock() + delta; |
240 | panic("Couldn't register idle notifier"); | 126 | set_clock_comparator(S390_lowcore.clock_comparator); |
127 | return 0; | ||
241 | } | 128 | } |
242 | 129 | ||
243 | #endif | 130 | static void s390_set_mode(enum clock_event_mode mode, |
244 | 131 | struct clock_event_device *evt) | |
245 | /* | ||
246 | * Set up per cpu jiffy timer and set the clock comparator. | ||
247 | */ | ||
248 | static void setup_jiffy_timer(void) | ||
249 | { | 132 | { |
250 | /* Set up clock comparator to next jiffy. */ | ||
251 | S390_lowcore.jiffy_timer = | ||
252 | jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY; | ||
253 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
254 | } | 133 | } |
255 | 134 | ||
256 | /* | 135 | /* |
@@ -259,7 +138,26 @@ static void setup_jiffy_timer(void) | |||
259 | */ | 138 | */ |
260 | void init_cpu_timer(void) | 139 | void init_cpu_timer(void) |
261 | { | 140 | { |
262 | setup_jiffy_timer(); | 141 | struct clock_event_device *cd; |
142 | int cpu; | ||
143 | |||
144 | S390_lowcore.clock_comparator = -1ULL; | ||
145 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
146 | |||
147 | cpu = smp_processor_id(); | ||
148 | cd = &per_cpu(comparators, cpu); | ||
149 | cd->name = "comparator"; | ||
150 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
151 | cd->mult = 16777; | ||
152 | cd->shift = 12; | ||
153 | cd->min_delta_ns = 1; | ||
154 | cd->max_delta_ns = LONG_MAX; | ||
155 | cd->rating = 400; | ||
156 | cd->cpumask = cpumask_of_cpu(cpu); | ||
157 | cd->set_next_event = s390_next_event; | ||
158 | cd->set_mode = s390_set_mode; | ||
159 | |||
160 | clockevents_register_device(cd); | ||
263 | 161 | ||
264 | /* Enable clock comparator timer interrupt. */ | 162 | /* Enable clock comparator timer interrupt. */ |
265 | __ctl_set_bit(0,11); | 163 | __ctl_set_bit(0,11); |
@@ -270,8 +168,6 @@ void init_cpu_timer(void) | |||
270 | 168 | ||
271 | static void clock_comparator_interrupt(__u16 code) | 169 | static void clock_comparator_interrupt(__u16 code) |
272 | { | 170 | { |
273 | /* set clock comparator for next tick */ | ||
274 | set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION); | ||
275 | } | 171 | } |
276 | 172 | ||
277 | static void etr_reset(void); | 173 | static void etr_reset(void); |
@@ -316,8 +212,9 @@ static struct clocksource clocksource_tod = { | |||
316 | */ | 212 | */ |
317 | void __init time_init(void) | 213 | void __init time_init(void) |
318 | { | 214 | { |
215 | u64 init_timer_cc; | ||
216 | |||
319 | init_timer_cc = reset_tod_clock(); | 217 | init_timer_cc = reset_tod_clock(); |
320 | xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY; | ||
321 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; | 218 | jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY; |
322 | 219 | ||
323 | /* set xtime */ | 220 | /* set xtime */ |
@@ -342,10 +239,6 @@ void __init time_init(void) | |||
342 | /* Enable TOD clock interrupts on the boot cpu. */ | 239 | /* Enable TOD clock interrupts on the boot cpu. */ |
343 | init_cpu_timer(); | 240 | init_cpu_timer(); |
344 | 241 | ||
345 | #ifdef CONFIG_NO_IDLE_HZ | ||
346 | nohz_init(); | ||
347 | #endif | ||
348 | |||
349 | #ifdef CONFIG_VIRT_TIMER | 242 | #ifdef CONFIG_VIRT_TIMER |
350 | vtime_init(); | 243 | vtime_init(); |
351 | #endif | 244 | #endif |
@@ -699,53 +592,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
699 | } | 592 | } |
700 | 593 | ||
701 | /* | 594 | /* |
702 | * The time is "clock". xtime is what we think the time is. | 595 | * The time is "clock". old is what we think the time is. |
703 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | 596 | * Adjust the value by a multiple of jiffies and add the delta to ntp. |
704 | * "delay" is an approximation how long the synchronization took. If | 597 | * "delay" is an approximation how long the synchronization took. If |
705 | * the time correction is positive, then "delay" is subtracted from | 598 | * the time correction is positive, then "delay" is subtracted from |
706 | * the time difference and only the remaining part is passed to ntp. | 599 | * the time difference and only the remaining part is passed to ntp. |
707 | */ | 600 | */ |
708 | static void etr_adjust_time(unsigned long long clock, unsigned long long delay) | 601 | static unsigned long long etr_adjust_time(unsigned long long old, |
602 | unsigned long long clock, | ||
603 | unsigned long long delay) | ||
709 | { | 604 | { |
710 | unsigned long long delta, ticks; | 605 | unsigned long long delta, ticks; |
711 | struct timex adjust; | 606 | struct timex adjust; |
712 | 607 | ||
713 | /* | 608 | if (clock > old) { |
714 | * We don't have to take the xtime lock because the cpu | ||
715 | * executing etr_adjust_time is running disabled in | ||
716 | * tasklet context and all other cpus are looping in | ||
717 | * etr_sync_cpu_start. | ||
718 | */ | ||
719 | if (clock > xtime_cc) { | ||
720 | /* It is later than we thought. */ | 609 | /* It is later than we thought. */ |
721 | delta = ticks = clock - xtime_cc; | 610 | delta = ticks = clock - old; |
722 | delta = ticks = (delta < delay) ? 0 : delta - delay; | 611 | delta = ticks = (delta < delay) ? 0 : delta - delay; |
723 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 612 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
724 | init_timer_cc = init_timer_cc + delta; | ||
725 | jiffies_timer_cc = jiffies_timer_cc + delta; | ||
726 | xtime_cc = xtime_cc + delta; | ||
727 | adjust.offset = ticks * (1000000 / HZ); | 613 | adjust.offset = ticks * (1000000 / HZ); |
728 | } else { | 614 | } else { |
729 | /* It is earlier than we thought. */ | 615 | /* It is earlier than we thought. */ |
730 | delta = ticks = xtime_cc - clock; | 616 | delta = ticks = old - clock; |
731 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | 617 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); |
732 | init_timer_cc = init_timer_cc - delta; | 618 | delta = -delta; |
733 | jiffies_timer_cc = jiffies_timer_cc - delta; | ||
734 | xtime_cc = xtime_cc - delta; | ||
735 | adjust.offset = -ticks * (1000000 / HZ); | 619 | adjust.offset = -ticks * (1000000 / HZ); |
736 | } | 620 | } |
621 | jiffies_timer_cc += delta; | ||
737 | if (adjust.offset != 0) { | 622 | if (adjust.offset != 0) { |
738 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | 623 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", |
739 | adjust.offset); | 624 | adjust.offset); |
740 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | 625 | adjust.modes = ADJ_OFFSET_SINGLESHOT; |
741 | do_adjtimex(&adjust); | 626 | do_adjtimex(&adjust); |
742 | } | 627 | } |
628 | return delta; | ||
743 | } | 629 | } |
744 | 630 | ||
631 | static struct { | ||
632 | int in_sync; | ||
633 | unsigned long long fixup_cc; | ||
634 | } etr_sync; | ||
635 | |||
745 | static void etr_sync_cpu_start(void *dummy) | 636 | static void etr_sync_cpu_start(void *dummy) |
746 | { | 637 | { |
747 | int *in_sync = dummy; | ||
748 | |||
749 | etr_enable_sync_clock(); | 638 | etr_enable_sync_clock(); |
750 | /* | 639 | /* |
751 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 640 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
@@ -753,7 +642,7 @@ static void etr_sync_cpu_start(void *dummy) | |||
753 | * __udelay will stop the cpu on an enabled wait psw until the | 642 | * __udelay will stop the cpu on an enabled wait psw until the |
754 | * TOD is running again. | 643 | * TOD is running again. |
755 | */ | 644 | */ |
756 | while (*in_sync == 0) { | 645 | while (etr_sync.in_sync == 0) { |
757 | __udelay(1); | 646 | __udelay(1); |
758 | /* | 647 | /* |
759 | * A different cpu changes *in_sync. Therefore use | 648 | * A different cpu changes *in_sync. Therefore use |
@@ -761,14 +650,14 @@ static void etr_sync_cpu_start(void *dummy) | |||
761 | */ | 650 | */ |
762 | barrier(); | 651 | barrier(); |
763 | } | 652 | } |
764 | if (*in_sync != 1) | 653 | if (etr_sync.in_sync != 1) |
765 | /* Didn't work. Clear per-cpu in sync bit again. */ | 654 | /* Didn't work. Clear per-cpu in sync bit again. */ |
766 | etr_disable_sync_clock(NULL); | 655 | etr_disable_sync_clock(NULL); |
767 | /* | 656 | /* |
768 | * This round of TOD syncing is done. Set the clock comparator | 657 | * This round of TOD syncing is done. Set the clock comparator |
769 | * to the next tick and let the processor continue. | 658 | * to the next tick and let the processor continue. |
770 | */ | 659 | */ |
771 | setup_jiffy_timer(); | 660 | fixup_clock_comparator(etr_sync.fixup_cc); |
772 | } | 661 | } |
773 | 662 | ||
774 | static void etr_sync_cpu_end(void *dummy) | 663 | static void etr_sync_cpu_end(void *dummy) |
@@ -783,8 +672,8 @@ static void etr_sync_cpu_end(void *dummy) | |||
783 | static int etr_sync_clock(struct etr_aib *aib, int port) | 672 | static int etr_sync_clock(struct etr_aib *aib, int port) |
784 | { | 673 | { |
785 | struct etr_aib *sync_port; | 674 | struct etr_aib *sync_port; |
786 | unsigned long long clock, delay; | 675 | unsigned long long clock, old_clock, delay, delta; |
787 | int in_sync, follows; | 676 | int follows; |
788 | int rc; | 677 | int rc; |
789 | 678 | ||
790 | /* Check if the current aib is adjacent to the sync port aib. */ | 679 | /* Check if the current aib is adjacent to the sync port aib. */ |
@@ -799,9 +688,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
799 | * successfully synced the clock. smp_call_function will | 688 | * successfully synced the clock. smp_call_function will |
800 | * return after all other cpus are in etr_sync_cpu_start. | 689 | * return after all other cpus are in etr_sync_cpu_start. |
801 | */ | 690 | */ |
802 | in_sync = 0; | 691 | memset(&etr_sync, 0, sizeof(etr_sync)); |
803 | preempt_disable(); | 692 | preempt_disable(); |
804 | smp_call_function(etr_sync_cpu_start,&in_sync,0,0); | 693 | smp_call_function(etr_sync_cpu_start, NULL, 0, 0); |
805 | local_irq_disable(); | 694 | local_irq_disable(); |
806 | etr_enable_sync_clock(); | 695 | etr_enable_sync_clock(); |
807 | 696 | ||
@@ -809,6 +698,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
809 | __ctl_set_bit(14, 21); | 698 | __ctl_set_bit(14, 21); |
810 | __ctl_set_bit(0, 29); | 699 | __ctl_set_bit(0, 29); |
811 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; | 700 | clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32; |
701 | old_clock = get_clock(); | ||
812 | if (set_clock(clock) == 0) { | 702 | if (set_clock(clock) == 0) { |
813 | __udelay(1); /* Wait for the clock to start. */ | 703 | __udelay(1); /* Wait for the clock to start. */ |
814 | __ctl_clear_bit(0, 29); | 704 | __ctl_clear_bit(0, 29); |
@@ -817,16 +707,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
817 | /* Adjust Linux timing variables. */ | 707 | /* Adjust Linux timing variables. */ |
818 | delay = (unsigned long long) | 708 | delay = (unsigned long long) |
819 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 709 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
820 | etr_adjust_time(clock, delay); | 710 | delta = etr_adjust_time(old_clock, clock, delay); |
821 | setup_jiffy_timer(); | 711 | etr_sync.fixup_cc = delta; |
712 | fixup_clock_comparator(delta); | ||
822 | /* Verify that the clock is properly set. */ | 713 | /* Verify that the clock is properly set. */ |
823 | if (!etr_aib_follows(sync_port, aib, port)) { | 714 | if (!etr_aib_follows(sync_port, aib, port)) { |
824 | /* Didn't work. */ | 715 | /* Didn't work. */ |
825 | etr_disable_sync_clock(NULL); | 716 | etr_disable_sync_clock(NULL); |
826 | in_sync = -EAGAIN; | 717 | etr_sync.in_sync = -EAGAIN; |
827 | rc = -EAGAIN; | 718 | rc = -EAGAIN; |
828 | } else { | 719 | } else { |
829 | in_sync = 1; | 720 | etr_sync.in_sync = 1; |
830 | rc = 0; | 721 | rc = 0; |
831 | } | 722 | } |
832 | } else { | 723 | } else { |
@@ -834,7 +725,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
834 | __ctl_clear_bit(0, 29); | 725 | __ctl_clear_bit(0, 29); |
835 | __ctl_clear_bit(14, 21); | 726 | __ctl_clear_bit(14, 21); |
836 | etr_disable_sync_clock(NULL); | 727 | etr_disable_sync_clock(NULL); |
837 | in_sync = -EAGAIN; | 728 | etr_sync.in_sync = -EAGAIN; |
838 | rc = -EAGAIN; | 729 | rc = -EAGAIN; |
839 | } | 730 | } |
840 | local_irq_enable(); | 731 | local_irq_enable(); |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c new file mode 100644 index 000000000000..12b39b3d9c38 --- /dev/null +++ b/arch/s390/kernel/topology.c | |||
@@ -0,0 +1,314 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2007 | ||
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/device.h> | ||
10 | #include <linux/bootmem.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include <linux/cpu.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <asm/delay.h> | ||
16 | #include <asm/s390_ext.h> | ||
17 | #include <asm/sysinfo.h> | ||
18 | |||
19 | #define CPU_BITS 64 | ||
20 | #define NR_MAG 6 | ||
21 | |||
22 | #define PTF_HORIZONTAL (0UL) | ||
23 | #define PTF_VERTICAL (1UL) | ||
24 | #define PTF_CHECK (2UL) | ||
25 | |||
26 | struct tl_cpu { | ||
27 | unsigned char reserved0[4]; | ||
28 | unsigned char :6; | ||
29 | unsigned char pp:2; | ||
30 | unsigned char reserved1; | ||
31 | unsigned short origin; | ||
32 | unsigned long mask[CPU_BITS / BITS_PER_LONG]; | ||
33 | }; | ||
34 | |||
35 | struct tl_container { | ||
36 | unsigned char reserved[8]; | ||
37 | }; | ||
38 | |||
39 | union tl_entry { | ||
40 | unsigned char nl; | ||
41 | struct tl_cpu cpu; | ||
42 | struct tl_container container; | ||
43 | }; | ||
44 | |||
45 | struct tl_info { | ||
46 | unsigned char reserved0[2]; | ||
47 | unsigned short length; | ||
48 | unsigned char mag[NR_MAG]; | ||
49 | unsigned char reserved1; | ||
50 | unsigned char mnest; | ||
51 | unsigned char reserved2[4]; | ||
52 | union tl_entry tle[0]; | ||
53 | }; | ||
54 | |||
55 | struct core_info { | ||
56 | struct core_info *next; | ||
57 | cpumask_t mask; | ||
58 | }; | ||
59 | |||
60 | static void topology_work_fn(struct work_struct *work); | ||
61 | static struct tl_info *tl_info; | ||
62 | static struct core_info core_info; | ||
63 | static int machine_has_topology; | ||
64 | static int machine_has_topology_irq; | ||
65 | static struct timer_list topology_timer; | ||
66 | static void set_topology_timer(void); | ||
67 | static DECLARE_WORK(topology_work, topology_work_fn); | ||
68 | |||
69 | cpumask_t cpu_coregroup_map(unsigned int cpu) | ||
70 | { | ||
71 | struct core_info *core = &core_info; | ||
72 | cpumask_t mask; | ||
73 | |||
74 | cpus_clear(mask); | ||
75 | if (!machine_has_topology) | ||
76 | return cpu_present_map; | ||
77 | mutex_lock(&smp_cpu_state_mutex); | ||
78 | while (core) { | ||
79 | if (cpu_isset(cpu, core->mask)) { | ||
80 | mask = core->mask; | ||
81 | break; | ||
82 | } | ||
83 | core = core->next; | ||
84 | } | ||
85 | mutex_unlock(&smp_cpu_state_mutex); | ||
86 | if (cpus_empty(mask)) | ||
87 | mask = cpumask_of_cpu(cpu); | ||
88 | return mask; | ||
89 | } | ||
90 | |||
91 | static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core) | ||
92 | { | ||
93 | unsigned int cpu; | ||
94 | |||
95 | for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS); | ||
96 | cpu < CPU_BITS; | ||
97 | cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1)) | ||
98 | { | ||
99 | unsigned int rcpu, lcpu; | ||
100 | |||
101 | rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin; | ||
102 | for_each_present_cpu(lcpu) { | ||
103 | if (__cpu_logical_map[lcpu] == rcpu) { | ||
104 | cpu_set(lcpu, core->mask); | ||
105 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | ||
106 | } | ||
107 | } | ||
108 | } | ||
109 | } | ||
110 | |||
111 | static void clear_cores(void) | ||
112 | { | ||
113 | struct core_info *core = &core_info; | ||
114 | |||
115 | while (core) { | ||
116 | cpus_clear(core->mask); | ||
117 | core = core->next; | ||
118 | } | ||
119 | } | ||
120 | |||
121 | static union tl_entry *next_tle(union tl_entry *tle) | ||
122 | { | ||
123 | if (tle->nl) | ||
124 | return (union tl_entry *)((struct tl_container *)tle + 1); | ||
125 | else | ||
126 | return (union tl_entry *)((struct tl_cpu *)tle + 1); | ||
127 | } | ||
128 | |||
129 | static void tl_to_cores(struct tl_info *info) | ||
130 | { | ||
131 | union tl_entry *tle, *end; | ||
132 | struct core_info *core = &core_info; | ||
133 | |||
134 | mutex_lock(&smp_cpu_state_mutex); | ||
135 | clear_cores(); | ||
136 | tle = info->tle; | ||
137 | end = (union tl_entry *)((unsigned long)info + info->length); | ||
138 | while (tle < end) { | ||
139 | switch (tle->nl) { | ||
140 | case 5: | ||
141 | case 4: | ||
142 | case 3: | ||
143 | case 2: | ||
144 | break; | ||
145 | case 1: | ||
146 | core = core->next; | ||
147 | break; | ||
148 | case 0: | ||
149 | add_cpus_to_core(&tle->cpu, core); | ||
150 | break; | ||
151 | default: | ||
152 | clear_cores(); | ||
153 | machine_has_topology = 0; | ||
154 | return; | ||
155 | } | ||
156 | tle = next_tle(tle); | ||
157 | } | ||
158 | mutex_unlock(&smp_cpu_state_mutex); | ||
159 | } | ||
160 | |||
161 | static void topology_update_polarization_simple(void) | ||
162 | { | ||
163 | int cpu; | ||
164 | |||
165 | mutex_lock(&smp_cpu_state_mutex); | ||
166 | for_each_present_cpu(cpu) | ||
167 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | ||
168 | mutex_unlock(&smp_cpu_state_mutex); | ||
169 | } | ||
170 | |||
171 | static int ptf(unsigned long fc) | ||
172 | { | ||
173 | int rc; | ||
174 | |||
175 | asm volatile( | ||
176 | " .insn rre,0xb9a20000,%1,%1\n" | ||
177 | " ipm %0\n" | ||
178 | " srl %0,28\n" | ||
179 | : "=d" (rc) | ||
180 | : "d" (fc) : "cc"); | ||
181 | return rc; | ||
182 | } | ||
183 | |||
184 | int topology_set_cpu_management(int fc) | ||
185 | { | ||
186 | int cpu; | ||
187 | int rc; | ||
188 | |||
189 | if (!machine_has_topology) | ||
190 | return -EOPNOTSUPP; | ||
191 | if (fc) | ||
192 | rc = ptf(PTF_VERTICAL); | ||
193 | else | ||
194 | rc = ptf(PTF_HORIZONTAL); | ||
195 | if (rc) | ||
196 | return -EBUSY; | ||
197 | for_each_present_cpu(cpu) | ||
198 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | ||
199 | return rc; | ||
200 | } | ||
201 | |||
202 | void arch_update_cpu_topology(void) | ||
203 | { | ||
204 | struct tl_info *info = tl_info; | ||
205 | struct sys_device *sysdev; | ||
206 | int cpu; | ||
207 | |||
208 | if (!machine_has_topology) { | ||
209 | topology_update_polarization_simple(); | ||
210 | return; | ||
211 | } | ||
212 | stsi(info, 15, 1, 2); | ||
213 | tl_to_cores(info); | ||
214 | for_each_online_cpu(cpu) { | ||
215 | sysdev = get_cpu_sysdev(cpu); | ||
216 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void topology_work_fn(struct work_struct *work) | ||
221 | { | ||
222 | arch_reinit_sched_domains(); | ||
223 | } | ||
224 | |||
225 | void topology_schedule_update(void) | ||
226 | { | ||
227 | schedule_work(&topology_work); | ||
228 | } | ||
229 | |||
230 | static void topology_timer_fn(unsigned long ignored) | ||
231 | { | ||
232 | if (ptf(PTF_CHECK)) | ||
233 | topology_schedule_update(); | ||
234 | set_topology_timer(); | ||
235 | } | ||
236 | |||
237 | static void set_topology_timer(void) | ||
238 | { | ||
239 | topology_timer.function = topology_timer_fn; | ||
240 | topology_timer.data = 0; | ||
241 | topology_timer.expires = jiffies + 60 * HZ; | ||
242 | add_timer(&topology_timer); | ||
243 | } | ||
244 | |||
245 | static void topology_interrupt(__u16 code) | ||
246 | { | ||
247 | schedule_work(&topology_work); | ||
248 | } | ||
249 | |||
250 | static int __init init_topology_update(void) | ||
251 | { | ||
252 | int rc; | ||
253 | |||
254 | if (!machine_has_topology) { | ||
255 | topology_update_polarization_simple(); | ||
256 | return 0; | ||
257 | } | ||
258 | init_timer_deferrable(&topology_timer); | ||
259 | if (machine_has_topology_irq) { | ||
260 | rc = register_external_interrupt(0x2005, topology_interrupt); | ||
261 | if (rc) | ||
262 | return rc; | ||
263 | ctl_set_bit(0, 8); | ||
264 | } | ||
265 | else | ||
266 | set_topology_timer(); | ||
267 | return 0; | ||
268 | } | ||
269 | __initcall(init_topology_update); | ||
270 | |||
271 | void __init s390_init_cpu_topology(void) | ||
272 | { | ||
273 | unsigned long long facility_bits; | ||
274 | struct tl_info *info; | ||
275 | struct core_info *core; | ||
276 | int nr_cores; | ||
277 | int i; | ||
278 | |||
279 | if (stfle(&facility_bits, 1) <= 0) | ||
280 | return; | ||
281 | if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61))) | ||
282 | return; | ||
283 | machine_has_topology = 1; | ||
284 | |||
285 | if (facility_bits & (1ULL << 51)) | ||
286 | machine_has_topology_irq = 1; | ||
287 | |||
288 | tl_info = alloc_bootmem_pages(PAGE_SIZE); | ||
289 | if (!tl_info) | ||
290 | goto error; | ||
291 | info = tl_info; | ||
292 | stsi(info, 15, 1, 2); | ||
293 | |||
294 | nr_cores = info->mag[NR_MAG - 2]; | ||
295 | for (i = 0; i < info->mnest - 2; i++) | ||
296 | nr_cores *= info->mag[NR_MAG - 3 - i]; | ||
297 | |||
298 | printk(KERN_INFO "CPU topology:"); | ||
299 | for (i = 0; i < NR_MAG; i++) | ||
300 | printk(" %d", info->mag[i]); | ||
301 | printk(" / %d\n", info->mnest); | ||
302 | |||
303 | core = &core_info; | ||
304 | for (i = 0; i < nr_cores; i++) { | ||
305 | core->next = alloc_bootmem(sizeof(struct core_info)); | ||
306 | core = core->next; | ||
307 | if (!core) | ||
308 | goto error; | ||
309 | } | ||
310 | return; | ||
311 | error: | ||
312 | machine_has_topology = 0; | ||
313 | machine_has_topology_irq = 0; | ||
314 | } | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 60f728aeaf12..57b607b61100 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -42,11 +42,8 @@ | |||
42 | #include <asm/s390_ext.h> | 42 | #include <asm/s390_ext.h> |
43 | #include <asm/lowcore.h> | 43 | #include <asm/lowcore.h> |
44 | #include <asm/debug.h> | 44 | #include <asm/debug.h> |
45 | #include "entry.h" | ||
45 | 46 | ||
46 | /* Called from entry.S only */ | ||
47 | extern void handle_per_exception(struct pt_regs *regs); | ||
48 | |||
49 | typedef void pgm_check_handler_t(struct pt_regs *, long); | ||
50 | pgm_check_handler_t *pgm_check_table[128]; | 47 | pgm_check_handler_t *pgm_check_table[128]; |
51 | 48 | ||
52 | #ifdef CONFIG_SYSCTL | 49 | #ifdef CONFIG_SYSCTL |
@@ -59,7 +56,6 @@ int sysctl_userprocess_debug = 0; | |||
59 | 56 | ||
60 | extern pgm_check_handler_t do_protection_exception; | 57 | extern pgm_check_handler_t do_protection_exception; |
61 | extern pgm_check_handler_t do_dat_exception; | 58 | extern pgm_check_handler_t do_dat_exception; |
62 | extern pgm_check_handler_t do_monitor_call; | ||
63 | extern pgm_check_handler_t do_asce_exception; | 59 | extern pgm_check_handler_t do_asce_exception; |
64 | 60 | ||
65 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) | 61 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) |
@@ -138,7 +134,6 @@ void show_trace(struct task_struct *task, unsigned long *stack) | |||
138 | else | 134 | else |
139 | __show_trace(sp, S390_lowcore.thread_info, | 135 | __show_trace(sp, S390_lowcore.thread_info, |
140 | S390_lowcore.thread_info + THREAD_SIZE); | 136 | S390_lowcore.thread_info + THREAD_SIZE); |
141 | printk("\n"); | ||
142 | if (!task) | 137 | if (!task) |
143 | task = current; | 138 | task = current; |
144 | debug_show_held_locks(task); | 139 | debug_show_held_locks(task); |
@@ -166,6 +161,15 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
166 | show_trace(task, sp); | 161 | show_trace(task, sp); |
167 | } | 162 | } |
168 | 163 | ||
164 | #ifdef CONFIG_64BIT | ||
165 | void show_last_breaking_event(struct pt_regs *regs) | ||
166 | { | ||
167 | printk("Last Breaking-Event-Address:\n"); | ||
168 | printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN); | ||
169 | print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN); | ||
170 | } | ||
171 | #endif | ||
172 | |||
169 | /* | 173 | /* |
170 | * The architecture-independent dump_stack generator | 174 | * The architecture-independent dump_stack generator |
171 | */ | 175 | */ |
@@ -739,6 +743,5 @@ void __init trap_init(void) | |||
739 | pgm_check_table[0x15] = &operand_exception; | 743 | pgm_check_table[0x15] = &operand_exception; |
740 | pgm_check_table[0x1C] = &space_switch_exception; | 744 | pgm_check_table[0x1C] = &space_switch_exception; |
741 | pgm_check_table[0x1D] = &hfp_sqrt_exception; | 745 | pgm_check_table[0x1D] = &hfp_sqrt_exception; |
742 | pgm_check_table[0x40] = &do_monitor_call; | ||
743 | pfault_irq_init(); | 746 | pfault_irq_init(); |
744 | } | 747 | } |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 70f2a862b670..eae21a8ac72d 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -34,7 +34,7 @@ void __delay(unsigned long loops) | |||
34 | */ | 34 | */ |
35 | void __udelay(unsigned long usecs) | 35 | void __udelay(unsigned long usecs) |
36 | { | 36 | { |
37 | u64 end, time, jiffy_timer = 0; | 37 | u64 end, time, old_cc = 0; |
38 | unsigned long flags, cr0, mask, dummy; | 38 | unsigned long flags, cr0, mask, dummy; |
39 | int irq_context; | 39 | int irq_context; |
40 | 40 | ||
@@ -43,8 +43,8 @@ void __udelay(unsigned long usecs) | |||
43 | local_bh_disable(); | 43 | local_bh_disable(); |
44 | local_irq_save(flags); | 44 | local_irq_save(flags); |
45 | if (raw_irqs_disabled_flags(flags)) { | 45 | if (raw_irqs_disabled_flags(flags)) { |
46 | jiffy_timer = S390_lowcore.jiffy_timer; | 46 | old_cc = S390_lowcore.clock_comparator; |
47 | S390_lowcore.jiffy_timer = -1ULL - (4096 << 12); | 47 | S390_lowcore.clock_comparator = -1ULL; |
48 | __ctl_store(cr0, 0, 0); | 48 | __ctl_store(cr0, 0, 0); |
49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | 49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; |
50 | __ctl_load(dummy , 0, 0); | 50 | __ctl_load(dummy , 0, 0); |
@@ -55,8 +55,8 @@ void __udelay(unsigned long usecs) | |||
55 | 55 | ||
56 | end = get_clock() + ((u64) usecs << 12); | 56 | end = get_clock() + ((u64) usecs << 12); |
57 | do { | 57 | do { |
58 | time = end < S390_lowcore.jiffy_timer ? | 58 | time = end < S390_lowcore.clock_comparator ? |
59 | end : S390_lowcore.jiffy_timer; | 59 | end : S390_lowcore.clock_comparator; |
60 | set_clock_comparator(time); | 60 | set_clock_comparator(time); |
61 | trace_hardirqs_on(); | 61 | trace_hardirqs_on(); |
62 | __load_psw_mask(mask); | 62 | __load_psw_mask(mask); |
@@ -65,10 +65,10 @@ void __udelay(unsigned long usecs) | |||
65 | 65 | ||
66 | if (raw_irqs_disabled_flags(flags)) { | 66 | if (raw_irqs_disabled_flags(flags)) { |
67 | __ctl_load(cr0, 0, 0); | 67 | __ctl_load(cr0, 0, 0); |
68 | S390_lowcore.jiffy_timer = jiffy_timer; | 68 | S390_lowcore.clock_comparator = old_cc; |
69 | } | 69 | } |
70 | if (!irq_context) | 70 | if (!irq_context) |
71 | _local_bh_enable(); | 71 | _local_bh_enable(); |
72 | set_clock_comparator(S390_lowcore.jiffy_timer); | 72 | set_clock_comparator(S390_lowcore.clock_comparator); |
73 | local_irq_restore(flags); | 73 | local_irq_restore(flags); |
74 | } | 74 | } |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 5efdfe9f5e76..d66215b0fde9 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -302,6 +302,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to, | |||
302 | pte_t *pte_from, *pte_to; | 302 | pte_t *pte_from, *pte_to; |
303 | int write_user; | 303 | int write_user; |
304 | 304 | ||
305 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
306 | memcpy((void __force *) to, (void __force *) from, n); | ||
307 | return 0; | ||
308 | } | ||
305 | done = 0; | 309 | done = 0; |
306 | retry: | 310 | retry: |
307 | spin_lock(&mm->page_table_lock); | 311 | spin_lock(&mm->page_table_lock); |
@@ -361,18 +365,10 @@ fault: | |||
361 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ | 365 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
362 | "m" (*uaddr) : "cc" ); | 366 | "m" (*uaddr) : "cc" ); |
363 | 367 | ||
364 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | 368 | static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) |
365 | { | 369 | { |
366 | int oldval = 0, newval, ret; | 370 | int oldval = 0, newval, ret; |
367 | 371 | ||
368 | spin_lock(¤t->mm->page_table_lock); | ||
369 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
370 | if (!uaddr) { | ||
371 | spin_unlock(¤t->mm->page_table_lock); | ||
372 | return -EFAULT; | ||
373 | } | ||
374 | get_page(virt_to_page(uaddr)); | ||
375 | spin_unlock(¤t->mm->page_table_lock); | ||
376 | switch (op) { | 372 | switch (op) { |
377 | case FUTEX_OP_SET: | 373 | case FUTEX_OP_SET: |
378 | __futex_atomic_op("lr %2,%5\n", | 374 | __futex_atomic_op("lr %2,%5\n", |
@@ -397,17 +393,17 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |||
397 | default: | 393 | default: |
398 | ret = -ENOSYS; | 394 | ret = -ENOSYS; |
399 | } | 395 | } |
400 | put_page(virt_to_page(uaddr)); | 396 | if (ret == 0) |
401 | *old = oldval; | 397 | *old = oldval; |
402 | return ret; | 398 | return ret; |
403 | } | 399 | } |
404 | 400 | ||
405 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | 401 | int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) |
406 | { | 402 | { |
407 | int ret; | 403 | int ret; |
408 | 404 | ||
409 | if (!current->mm) | 405 | if (segment_eq(get_fs(), KERNEL_DS)) |
410 | return -EFAULT; | 406 | return __futex_atomic_op_pt(op, uaddr, oparg, old); |
411 | spin_lock(¤t->mm->page_table_lock); | 407 | spin_lock(¤t->mm->page_table_lock); |
412 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | 408 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); |
413 | if (!uaddr) { | 409 | if (!uaddr) { |
@@ -416,13 +412,40 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | |||
416 | } | 412 | } |
417 | get_page(virt_to_page(uaddr)); | 413 | get_page(virt_to_page(uaddr)); |
418 | spin_unlock(¤t->mm->page_table_lock); | 414 | spin_unlock(¤t->mm->page_table_lock); |
419 | asm volatile(" cs %1,%4,0(%5)\n" | 415 | ret = __futex_atomic_op_pt(op, uaddr, oparg, old); |
420 | "0: lr %0,%1\n" | 416 | put_page(virt_to_page(uaddr)); |
421 | "1:\n" | 417 | return ret; |
422 | EX_TABLE(0b,1b) | 418 | } |
419 | |||
420 | static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | ||
421 | { | ||
422 | int ret; | ||
423 | |||
424 | asm volatile("0: cs %1,%4,0(%5)\n" | ||
425 | "1: lr %0,%1\n" | ||
426 | "2:\n" | ||
427 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | ||
423 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) | 428 | : "=d" (ret), "+d" (oldval), "=m" (*uaddr) |
424 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) | 429 | : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) |
425 | : "cc", "memory" ); | 430 | : "cc", "memory" ); |
431 | return ret; | ||
432 | } | ||
433 | |||
434 | int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) | ||
435 | { | ||
436 | int ret; | ||
437 | |||
438 | if (segment_eq(get_fs(), KERNEL_DS)) | ||
439 | return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | ||
440 | spin_lock(¤t->mm->page_table_lock); | ||
441 | uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); | ||
442 | if (!uaddr) { | ||
443 | spin_unlock(¤t->mm->page_table_lock); | ||
444 | return -EFAULT; | ||
445 | } | ||
446 | get_page(virt_to_page(uaddr)); | ||
447 | spin_unlock(¤t->mm->page_table_lock); | ||
448 | ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); | ||
426 | put_page(virt_to_page(uaddr)); | 449 | put_page(virt_to_page(uaddr)); |
427 | return ret; | 450 | return ret; |
428 | } | 451 | } |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 880b0ebf894b..ed2af0a3303b 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -289,22 +289,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
289 | 289 | ||
290 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 290 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
291 | 291 | ||
292 | switch (rc) { | 292 | if (rc) |
293 | case 0: | ||
294 | break; | ||
295 | case -ENOSPC: | ||
296 | PRINT_WARN("segment_load: not loading segment %s - overlaps " | ||
297 | "storage/segment\n", name); | ||
298 | goto out_free; | ||
299 | case -ERANGE: | ||
300 | PRINT_WARN("segment_load: not loading segment %s - exceeds " | ||
301 | "kernel mapping range\n", name); | ||
302 | goto out_free; | ||
303 | default: | ||
304 | PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n", | ||
305 | name, rc); | ||
306 | goto out_free; | 293 | goto out_free; |
307 | } | ||
308 | 294 | ||
309 | seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); | 295 | seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL); |
310 | if (seg->res == NULL) { | 296 | if (seg->res == NULL) { |
@@ -582,8 +568,59 @@ out: | |||
582 | mutex_unlock(&dcss_lock); | 568 | mutex_unlock(&dcss_lock); |
583 | } | 569 | } |
584 | 570 | ||
571 | /* | ||
572 | * print appropriate error message for segment_load()/segment_type() | ||
573 | * return code | ||
574 | */ | ||
575 | void segment_warning(int rc, char *seg_name) | ||
576 | { | ||
577 | switch (rc) { | ||
578 | case -ENOENT: | ||
579 | PRINT_WARN("cannot load/query segment %s, " | ||
580 | "does not exist\n", seg_name); | ||
581 | break; | ||
582 | case -ENOSYS: | ||
583 | PRINT_WARN("cannot load/query segment %s, " | ||
584 | "not running on VM\n", seg_name); | ||
585 | break; | ||
586 | case -EIO: | ||
587 | PRINT_WARN("cannot load/query segment %s, " | ||
588 | "hardware error\n", seg_name); | ||
589 | break; | ||
590 | case -ENOTSUPP: | ||
591 | PRINT_WARN("cannot load/query segment %s, " | ||
592 | "is a multi-part segment\n", seg_name); | ||
593 | break; | ||
594 | case -ENOSPC: | ||
595 | PRINT_WARN("cannot load/query segment %s, " | ||
596 | "overlaps with storage\n", seg_name); | ||
597 | break; | ||
598 | case -EBUSY: | ||
599 | PRINT_WARN("cannot load/query segment %s, " | ||
600 | "overlaps with already loaded dcss\n", seg_name); | ||
601 | break; | ||
602 | case -EPERM: | ||
603 | PRINT_WARN("cannot load/query segment %s, " | ||
604 | "already loaded in incompatible mode\n", seg_name); | ||
605 | break; | ||
606 | case -ENOMEM: | ||
607 | PRINT_WARN("cannot load/query segment %s, " | ||
608 | "out of memory\n", seg_name); | ||
609 | break; | ||
610 | case -ERANGE: | ||
611 | PRINT_WARN("cannot load/query segment %s, " | ||
612 | "exceeds kernel mapping range\n", seg_name); | ||
613 | break; | ||
614 | default: | ||
615 | PRINT_WARN("cannot load/query segment %s, " | ||
616 | "return value %i\n", seg_name, rc); | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | |||
585 | EXPORT_SYMBOL(segment_load); | 621 | EXPORT_SYMBOL(segment_load); |
586 | EXPORT_SYMBOL(segment_unload); | 622 | EXPORT_SYMBOL(segment_unload); |
587 | EXPORT_SYMBOL(segment_save); | 623 | EXPORT_SYMBOL(segment_save); |
588 | EXPORT_SYMBOL(segment_type); | 624 | EXPORT_SYMBOL(segment_type); |
589 | EXPORT_SYMBOL(segment_modify_shared); | 625 | EXPORT_SYMBOL(segment_modify_shared); |
626 | EXPORT_SYMBOL(segment_warning); | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index ed13d429a487..2650f46001d0 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -28,11 +28,11 @@ | |||
28 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
29 | #include <linux/kprobes.h> | 29 | #include <linux/kprobes.h> |
30 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
31 | |||
32 | #include <asm/system.h> | 31 | #include <asm/system.h> |
33 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
34 | #include <asm/s390_ext.h> | 33 | #include <asm/s390_ext.h> |
35 | #include <asm/mmu_context.h> | 34 | #include <asm/mmu_context.h> |
35 | #include "../kernel/entry.h" | ||
36 | 36 | ||
37 | #ifndef CONFIG_64BIT | 37 | #ifndef CONFIG_64BIT |
38 | #define __FAIL_ADDR_MASK 0x7ffff000 | 38 | #define __FAIL_ADDR_MASK 0x7ffff000 |
@@ -50,8 +50,6 @@ | |||
50 | extern int sysctl_userprocess_debug; | 50 | extern int sysctl_userprocess_debug; |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | extern void die(const char *,struct pt_regs *,long); | ||
54 | |||
55 | #ifdef CONFIG_KPROBES | 53 | #ifdef CONFIG_KPROBES |
56 | static inline int notify_page_fault(struct pt_regs *regs, long err) | 54 | static inline int notify_page_fault(struct pt_regs *regs, long err) |
57 | { | 55 | { |
@@ -245,11 +243,6 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code, | |||
245 | } | 243 | } |
246 | 244 | ||
247 | #ifdef CONFIG_S390_EXEC_PROTECT | 245 | #ifdef CONFIG_S390_EXEC_PROTECT |
248 | extern long sys_sigreturn(struct pt_regs *regs); | ||
249 | extern long sys_rt_sigreturn(struct pt_regs *regs); | ||
250 | extern long sys32_sigreturn(struct pt_regs *regs); | ||
251 | extern long sys32_rt_sigreturn(struct pt_regs *regs); | ||
252 | |||
253 | static int signal_return(struct mm_struct *mm, struct pt_regs *regs, | 246 | static int signal_return(struct mm_struct *mm, struct pt_regs *regs, |
254 | unsigned long address, unsigned long error_code) | 247 | unsigned long address, unsigned long error_code) |
255 | { | 248 | { |
@@ -270,15 +263,15 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs, | |||
270 | #ifdef CONFIG_COMPAT | 263 | #ifdef CONFIG_COMPAT |
271 | compat = test_tsk_thread_flag(current, TIF_31BIT); | 264 | compat = test_tsk_thread_flag(current, TIF_31BIT); |
272 | if (compat && instruction == 0x0a77) | 265 | if (compat && instruction == 0x0a77) |
273 | sys32_sigreturn(regs); | 266 | sys32_sigreturn(); |
274 | else if (compat && instruction == 0x0aad) | 267 | else if (compat && instruction == 0x0aad) |
275 | sys32_rt_sigreturn(regs); | 268 | sys32_rt_sigreturn(); |
276 | else | 269 | else |
277 | #endif | 270 | #endif |
278 | if (instruction == 0x0a77) | 271 | if (instruction == 0x0a77) |
279 | sys_sigreturn(regs); | 272 | sys_sigreturn(); |
280 | else if (instruction == 0x0aad) | 273 | else if (instruction == 0x0aad) |
281 | sys_rt_sigreturn(regs); | 274 | sys_rt_sigreturn(); |
282 | else { | 275 | else { |
283 | current->thread.prot_addr = address; | 276 | current->thread.prot_addr = address; |
284 | current->thread.trap_no = error_code; | 277 | current->thread.trap_no = error_code; |
@@ -424,7 +417,7 @@ no_context: | |||
424 | } | 417 | } |
425 | 418 | ||
426 | void __kprobes do_protection_exception(struct pt_regs *regs, | 419 | void __kprobes do_protection_exception(struct pt_regs *regs, |
427 | unsigned long error_code) | 420 | long error_code) |
428 | { | 421 | { |
429 | /* Protection exception is supressing, decrement psw address. */ | 422 | /* Protection exception is supressing, decrement psw address. */ |
430 | regs->psw.addr -= (error_code >> 16); | 423 | regs->psw.addr -= (error_code >> 16); |
@@ -440,7 +433,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs, | |||
440 | do_exception(regs, 4, 1); | 433 | do_exception(regs, 4, 1); |
441 | } | 434 | } |
442 | 435 | ||
443 | void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) | 436 | void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) |
444 | { | 437 | { |
445 | do_exception(regs, error_code & 0xff, 0); | 438 | do_exception(regs, error_code & 0xff, 0); |
446 | } | 439 | } |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 8053245fe259..202c952a29b4 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -50,7 +50,6 @@ void show_mem(void) | |||
50 | 50 | ||
51 | printk("Mem-info:\n"); | 51 | printk("Mem-info:\n"); |
52 | show_free_areas(); | 52 | show_free_areas(); |
53 | printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10)); | ||
54 | i = max_mapnr; | 53 | i = max_mapnr; |
55 | while (i-- > 0) { | 54 | while (i-- > 0) { |
56 | if (!pfn_valid(i)) | 55 | if (!pfn_valid(i)) |