diff options
Diffstat (limited to 'arch/s390')
39 files changed, 1088 insertions, 722 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 608193cfe43f..ff690564edbd 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -22,6 +22,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
22 | bool | 22 | bool |
23 | default y | 23 | default y |
24 | 24 | ||
25 | config ARCH_HAS_ILOG2_U32 | ||
26 | bool | ||
27 | default n | ||
28 | |||
29 | config ARCH_HAS_ILOG2_U64 | ||
30 | bool | ||
31 | default n | ||
32 | |||
25 | config GENERIC_HWEIGHT | 33 | config GENERIC_HWEIGHT |
26 | bool | 34 | bool |
27 | default y | 35 | default y |
@@ -33,9 +41,6 @@ config GENERIC_CALIBRATE_DELAY | |||
33 | config GENERIC_TIME | 41 | config GENERIC_TIME |
34 | def_bool y | 42 | def_bool y |
35 | 43 | ||
36 | config GENERIC_BUST_SPINLOCK | ||
37 | bool | ||
38 | |||
39 | mainmenu "Linux Kernel Configuration" | 44 | mainmenu "Linux Kernel Configuration" |
40 | 45 | ||
41 | config S390 | 46 | config S390 |
@@ -181,7 +186,7 @@ config PACK_STACK | |||
181 | 186 | ||
182 | config SMALL_STACK | 187 | config SMALL_STACK |
183 | bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb" | 188 | bool "Use 4kb/8kb for kernel stack instead of 8kb/16kb" |
184 | depends on PACK_STACK | 189 | depends on PACK_STACK && !LOCKDEP |
185 | help | 190 | help |
186 | If you say Y here and the compiler supports the -mkernel-backchain | 191 | If you say Y here and the compiler supports the -mkernel-backchain |
187 | option the kernel will use a smaller kernel stack size. For 31 bit | 192 | option the kernel will use a smaller kernel stack size. For 31 bit |
@@ -241,6 +246,9 @@ config ARCH_POPULATES_NODE_MAP | |||
241 | 246 | ||
242 | source "mm/Kconfig" | 247 | source "mm/Kconfig" |
243 | 248 | ||
249 | config HOLES_IN_ZONE | ||
250 | def_bool y | ||
251 | |||
244 | comment "I/O subsystem configuration" | 252 | comment "I/O subsystem configuration" |
245 | 253 | ||
246 | config MACHCHK_WARNING | 254 | config MACHCHK_WARNING |
@@ -264,14 +272,6 @@ config QDIO | |||
264 | 272 | ||
265 | If unsure, say Y. | 273 | If unsure, say Y. |
266 | 274 | ||
267 | config QDIO_PERF_STATS | ||
268 | bool "Performance statistics in /proc" | ||
269 | depends on QDIO | ||
270 | help | ||
271 | Say Y here to get performance statistics in /proc/qdio_perf | ||
272 | |||
273 | If unsure, say N. | ||
274 | |||
275 | config QDIO_DEBUG | 275 | config QDIO_DEBUG |
276 | bool "Extended debugging information" | 276 | bool "Extended debugging information" |
277 | depends on QDIO | 277 | depends on QDIO |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 5deb9f7544a1..6598e5268573 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -35,6 +35,9 @@ cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900) | |||
35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) | 35 | cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990) |
36 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) | 36 | cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) |
37 | 37 | ||
38 | #KBUILD_IMAGE is necessary for make rpm | ||
39 | KBUILD_IMAGE :=arch/s390/boot/image | ||
40 | |||
38 | # | 41 | # |
39 | # Prevent tail-call optimizations, to get clearer backtraces: | 42 | # Prevent tail-call optimizations, to get clearer backtraces: |
40 | # | 43 | # |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 45c9fa7d7545..b8c237290263 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -92,8 +92,8 @@ static int appldata_timer_active; | |||
92 | * Work queue | 92 | * Work queue |
93 | */ | 93 | */ |
94 | static struct workqueue_struct *appldata_wq; | 94 | static struct workqueue_struct *appldata_wq; |
95 | static void appldata_work_fn(void *data); | 95 | static void appldata_work_fn(struct work_struct *work); |
96 | static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); | 96 | static DECLARE_WORK(appldata_work, appldata_work_fn); |
97 | 97 | ||
98 | 98 | ||
99 | /* | 99 | /* |
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data) | |||
125 | * | 125 | * |
126 | * call data gathering function for each (active) module | 126 | * call data gathering function for each (active) module |
127 | */ | 127 | */ |
128 | static void appldata_work_fn(void *data) | 128 | static void appldata_work_fn(struct work_struct *work) |
129 | { | 129 | { |
130 | struct list_head *lh; | 130 | struct list_head *lh; |
131 | struct appldata_ops *ops; | 131 | struct appldata_ops *ops; |
@@ -310,6 +310,7 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, | |||
310 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { | 310 | if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { |
311 | return -EFAULT; | 311 | return -EFAULT; |
312 | } | 312 | } |
313 | interval = 0; | ||
313 | sscanf(buf, "%i", &interval); | 314 | sscanf(buf, "%i", &interval); |
314 | if (interval <= 0) { | 315 | if (interval <= 0) { |
315 | P_ERROR("Timer CPU interval has to be > 0!\n"); | 316 | P_ERROR("Timer CPU interval has to be > 0!\n"); |
@@ -560,7 +561,6 @@ appldata_offline_cpu(int cpu) | |||
560 | spin_unlock(&appldata_timer_lock); | 561 | spin_unlock(&appldata_timer_lock); |
561 | } | 562 | } |
562 | 563 | ||
563 | #ifdef CONFIG_HOTPLUG_CPU | ||
564 | static int __cpuinit | 564 | static int __cpuinit |
565 | appldata_cpu_notify(struct notifier_block *self, | 565 | appldata_cpu_notify(struct notifier_block *self, |
566 | unsigned long action, void *hcpu) | 566 | unsigned long action, void *hcpu) |
@@ -581,7 +581,6 @@ appldata_cpu_notify(struct notifier_block *self, | |||
581 | static struct notifier_block appldata_nb = { | 581 | static struct notifier_block appldata_nb = { |
582 | .notifier_call = appldata_cpu_notify, | 582 | .notifier_call = appldata_cpu_notify, |
583 | }; | 583 | }; |
584 | #endif | ||
585 | 584 | ||
586 | /* | 585 | /* |
587 | * appldata_init() | 586 | * appldata_init() |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index a3257398ea8d..a6ec919ba83f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.18 | 3 | # Linux kernel version: 2.6.19-rc2 |
4 | # Wed Oct 4 19:45:46 2006 | 4 | # Wed Oct 18 17:11:10 2006 |
5 | # | 5 | # |
6 | CONFIG_MMU=y | 6 | CONFIG_MMU=y |
7 | CONFIG_LOCKDEP_SUPPORT=y | 7 | CONFIG_LOCKDEP_SUPPORT=y |
@@ -119,7 +119,6 @@ CONFIG_PACK_STACK=y | |||
119 | CONFIG_CHECK_STACK=y | 119 | CONFIG_CHECK_STACK=y |
120 | CONFIG_STACK_GUARD=256 | 120 | CONFIG_STACK_GUARD=256 |
121 | # CONFIG_WARN_STACK is not set | 121 | # CONFIG_WARN_STACK is not set |
122 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
123 | CONFIG_SELECT_MEMORY_MODEL=y | 122 | CONFIG_SELECT_MEMORY_MODEL=y |
124 | CONFIG_FLATMEM_MANUAL=y | 123 | CONFIG_FLATMEM_MANUAL=y |
125 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 124 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
@@ -135,7 +134,6 @@ CONFIG_RESOURCES_64BIT=y | |||
135 | # | 134 | # |
136 | CONFIG_MACHCHK_WARNING=y | 135 | CONFIG_MACHCHK_WARNING=y |
137 | CONFIG_QDIO=y | 136 | CONFIG_QDIO=y |
138 | # CONFIG_QDIO_PERF_STATS is not set | ||
139 | # CONFIG_QDIO_DEBUG is not set | 137 | # CONFIG_QDIO_DEBUG is not set |
140 | 138 | ||
141 | # | 139 | # |
@@ -211,6 +209,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=y | |||
211 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | 209 | CONFIG_INET6_XFRM_MODE_TUNNEL=y |
212 | CONFIG_INET6_XFRM_MODE_BEET=y | 210 | CONFIG_INET6_XFRM_MODE_BEET=y |
213 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | 211 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set |
212 | CONFIG_IPV6_SIT=y | ||
214 | # CONFIG_IPV6_TUNNEL is not set | 213 | # CONFIG_IPV6_TUNNEL is not set |
215 | # CONFIG_IPV6_SUBTREES is not set | 214 | # CONFIG_IPV6_SUBTREES is not set |
216 | # CONFIG_IPV6_MULTIPLE_TABLES is not set | 215 | # CONFIG_IPV6_MULTIPLE_TABLES is not set |
@@ -528,6 +527,7 @@ CONFIG_EXT3_FS=y | |||
528 | CONFIG_EXT3_FS_XATTR=y | 527 | CONFIG_EXT3_FS_XATTR=y |
529 | # CONFIG_EXT3_FS_POSIX_ACL is not set | 528 | # CONFIG_EXT3_FS_POSIX_ACL is not set |
530 | # CONFIG_EXT3_FS_SECURITY is not set | 529 | # CONFIG_EXT3_FS_SECURITY is not set |
530 | # CONFIG_EXT4DEV_FS is not set | ||
531 | CONFIG_JBD=y | 531 | CONFIG_JBD=y |
532 | # CONFIG_JBD_DEBUG is not set | 532 | # CONFIG_JBD_DEBUG is not set |
533 | CONFIG_FS_MBCACHE=y | 533 | CONFIG_FS_MBCACHE=y |
@@ -646,10 +646,6 @@ CONFIG_MSDOS_PARTITION=y | |||
646 | # CONFIG_NLS is not set | 646 | # CONFIG_NLS is not set |
647 | 647 | ||
648 | # | 648 | # |
649 | # Distributed Lock Manager | ||
650 | # | ||
651 | |||
652 | # | ||
653 | # Instrumentation Support | 649 | # Instrumentation Support |
654 | # | 650 | # |
655 | 651 | ||
@@ -669,7 +665,6 @@ CONFIG_MAGIC_SYSRQ=y | |||
669 | # CONFIG_UNUSED_SYMBOLS is not set | 665 | # CONFIG_UNUSED_SYMBOLS is not set |
670 | CONFIG_DEBUG_KERNEL=y | 666 | CONFIG_DEBUG_KERNEL=y |
671 | CONFIG_LOG_BUF_SHIFT=17 | 667 | CONFIG_LOG_BUF_SHIFT=17 |
672 | # CONFIG_DETECT_SOFTLOCKUP is not set | ||
673 | # CONFIG_SCHEDSTATS is not set | 668 | # CONFIG_SCHEDSTATS is not set |
674 | # CONFIG_DEBUG_SLAB is not set | 669 | # CONFIG_DEBUG_SLAB is not set |
675 | CONFIG_DEBUG_PREEMPT=y | 670 | CONFIG_DEBUG_PREEMPT=y |
@@ -690,6 +685,7 @@ CONFIG_DEBUG_FS=y | |||
690 | # CONFIG_FRAME_POINTER is not set | 685 | # CONFIG_FRAME_POINTER is not set |
691 | # CONFIG_UNWIND_INFO is not set | 686 | # CONFIG_UNWIND_INFO is not set |
692 | CONFIG_FORCED_INLINING=y | 687 | CONFIG_FORCED_INLINING=y |
688 | CONFIG_HEADERS_CHECK=y | ||
693 | # CONFIG_RCU_TORTURE_TEST is not set | 689 | # CONFIG_RCU_TORTURE_TEST is not set |
694 | # CONFIG_LKDTM is not set | 690 | # CONFIG_LKDTM is not set |
695 | 691 | ||
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index cd702ae45d6d..b6716c4b9934 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -109,7 +109,7 @@ static void hypfs_drop_inode(struct inode *inode) | |||
109 | 109 | ||
110 | static int hypfs_open(struct inode *inode, struct file *filp) | 110 | static int hypfs_open(struct inode *inode, struct file *filp) |
111 | { | 111 | { |
112 | char *data = filp->f_dentry->d_inode->i_private; | 112 | char *data = filp->f_path.dentry->d_inode->i_private; |
113 | struct hypfs_sb_info *fs_info; | 113 | struct hypfs_sb_info *fs_info; |
114 | 114 | ||
115 | if (filp->f_mode & FMODE_WRITE) { | 115 | if (filp->f_mode & FMODE_WRITE) { |
@@ -174,7 +174,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
174 | struct hypfs_sb_info *fs_info; | 174 | struct hypfs_sb_info *fs_info; |
175 | size_t count = iov_length(iov, nr_segs); | 175 | size_t count = iov_length(iov, nr_segs); |
176 | 176 | ||
177 | sb = iocb->ki_filp->f_dentry->d_inode->i_sb; | 177 | sb = iocb->ki_filp->f_path.dentry->d_inode->i_sb; |
178 | fs_info = sb->s_fs_info; | 178 | fs_info = sb->s_fs_info; |
179 | /* | 179 | /* |
180 | * Currently we only allow one update per second for two reasons: | 180 | * Currently we only allow one update per second for two reasons: |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index aa978978d3d1..a81881c9b297 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | obj-y := bitmap.o traps.o time.o process.o \ | 7 | obj-y := bitmap.o traps.o time.o process.o reset.o \ |
8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
9 | semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o | 9 | semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o |
10 | 10 | ||
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c index 9565a2dcfadc..5c46054195cb 100644 --- a/arch/s390/kernel/binfmt_elf32.c +++ b/arch/s390/kernel/binfmt_elf32.c | |||
@@ -176,7 +176,6 @@ struct elf_prpsinfo32 | |||
176 | 176 | ||
177 | #include <linux/highuid.h> | 177 | #include <linux/highuid.h> |
178 | 178 | ||
179 | #define elf_addr_t u32 | ||
180 | /* | 179 | /* |
181 | #define init_elf_binfmt init_elf32_binfmt | 180 | #define init_elf_binfmt init_elf32_binfmt |
182 | */ | 181 | */ |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index e15e1489aef5..5b33f823863a 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -295,6 +295,7 @@ static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i) | |||
295 | * | 295 | * |
296 | * This is really horribly ugly. | 296 | * This is really horribly ugly. |
297 | */ | 297 | */ |
298 | #ifdef CONFIG_SYSVIPC | ||
298 | asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) | 299 | asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) |
299 | { | 300 | { |
300 | if (call >> 16) /* hack for backward compatibility */ | 301 | if (call >> 16) /* hack for backward compatibility */ |
@@ -338,6 +339,7 @@ asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) | |||
338 | 339 | ||
339 | return -ENOSYS; | 340 | return -ENOSYS; |
340 | } | 341 | } |
342 | #endif | ||
341 | 343 | ||
342 | asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) | 344 | asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) |
343 | { | 345 | { |
@@ -755,7 +757,9 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) | |||
755 | put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp))) | 757 | put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp))) |
756 | error = -EFAULT; | 758 | error = -EFAULT; |
757 | } | 759 | } |
758 | copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); | 760 | if (copy_to_user(args->__unused, tmp.__unused, |
761 | sizeof(tmp.__unused))) | ||
762 | error = -EFAULT; | ||
759 | } | 763 | } |
760 | return error; | 764 | return error; |
761 | } | 765 | } |
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index d49b876a83bf..861888ab8c13 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c | |||
@@ -169,12 +169,12 @@ sys32_sigaction(int sig, const struct old_sigaction32 __user *act, | |||
169 | compat_old_sigset_t mask; | 169 | compat_old_sigset_t mask; |
170 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 170 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
171 | __get_user(sa_handler, &act->sa_handler) || | 171 | __get_user(sa_handler, &act->sa_handler) || |
172 | __get_user(sa_restorer, &act->sa_restorer)) | 172 | __get_user(sa_restorer, &act->sa_restorer) || |
173 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
174 | __get_user(mask, &act->sa_mask)) | ||
173 | return -EFAULT; | 175 | return -EFAULT; |
174 | new_ka.sa.sa_handler = (__sighandler_t) sa_handler; | 176 | new_ka.sa.sa_handler = (__sighandler_t) sa_handler; |
175 | new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer; | 177 | new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer; |
176 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
177 | __get_user(mask, &act->sa_mask); | ||
178 | siginitset(&new_ka.sa.sa_mask, mask); | 178 | siginitset(&new_ka.sa.sa_mask, mask); |
179 | } | 179 | } |
180 | 180 | ||
@@ -185,10 +185,10 @@ sys32_sigaction(int sig, const struct old_sigaction32 __user *act, | |||
185 | sa_restorer = (unsigned long) old_ka.sa.sa_restorer; | 185 | sa_restorer = (unsigned long) old_ka.sa.sa_restorer; |
186 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 186 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
187 | __put_user(sa_handler, &oact->sa_handler) || | 187 | __put_user(sa_handler, &oact->sa_handler) || |
188 | __put_user(sa_restorer, &oact->sa_restorer)) | 188 | __put_user(sa_restorer, &oact->sa_restorer) || |
189 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
190 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
189 | return -EFAULT; | 191 | return -EFAULT; |
190 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
191 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
192 | } | 192 | } |
193 | 193 | ||
194 | return ret; | 194 | return ret; |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index cb0efae6802f..71e54ef0931e 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1664,4 +1664,4 @@ sys_getcpu_wrapper: | |||
1664 | llgtr %r2,%r2 # unsigned * | 1664 | llgtr %r2,%r2 # unsigned * |
1665 | llgtr %r3,%r3 # unsigned * | 1665 | llgtr %r3,%r3 # unsigned * |
1666 | llgtr %r4,%r4 # struct getcpu_cache * | 1666 | llgtr %r4,%r4 # struct getcpu_cache * |
1667 | jg sys_tee | 1667 | jg sys_getcpu |
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index 1eae74e72f95..a5972f1541fe 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c | |||
@@ -21,14 +21,15 @@ static DEFINE_SPINLOCK(cpcmd_lock); | |||
21 | static char cpcmd_buf[241]; | 21 | static char cpcmd_buf[241]; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * the caller of __cpcmd has to ensure that the response buffer is below 2 GB | 24 | * __cpcmd has some restrictions over cpcmd |
25 | * - the response buffer must reside below 2GB (if any) | ||
26 | * - __cpcmd is unlocked and therefore not SMP-safe | ||
25 | */ | 27 | */ |
26 | int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) | 28 | int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) |
27 | { | 29 | { |
28 | unsigned long flags, cmdlen; | 30 | unsigned cmdlen; |
29 | int return_code, return_len; | 31 | int return_code, return_len; |
30 | 32 | ||
31 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
32 | cmdlen = strlen(cmd); | 33 | cmdlen = strlen(cmd); |
33 | BUG_ON(cmdlen > 240); | 34 | BUG_ON(cmdlen > 240); |
34 | memcpy(cpcmd_buf, cmd, cmdlen); | 35 | memcpy(cpcmd_buf, cmd, cmdlen); |
@@ -74,7 +75,6 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
74 | : "+d" (reg3) : "d" (reg2) : "cc"); | 75 | : "+d" (reg3) : "d" (reg2) : "cc"); |
75 | return_code = (int) reg3; | 76 | return_code = (int) reg3; |
76 | } | 77 | } |
77 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
78 | if (response_code != NULL) | 78 | if (response_code != NULL) |
79 | *response_code = return_code; | 79 | *response_code = return_code; |
80 | return return_len; | 80 | return return_len; |
@@ -82,15 +82,18 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
82 | 82 | ||
83 | EXPORT_SYMBOL(__cpcmd); | 83 | EXPORT_SYMBOL(__cpcmd); |
84 | 84 | ||
85 | #ifdef CONFIG_64BIT | ||
86 | int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | 85 | int cpcmd(const char *cmd, char *response, int rlen, int *response_code) |
87 | { | 86 | { |
88 | char *lowbuf; | 87 | char *lowbuf; |
89 | int len; | 88 | int len; |
89 | unsigned long flags; | ||
90 | 90 | ||
91 | if ((rlen == 0) || (response == NULL) | 91 | if ((rlen == 0) || (response == NULL) |
92 | || !((unsigned long)response >> 31)) | 92 | || !((unsigned long)response >> 31)) { |
93 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
93 | len = __cpcmd(cmd, response, rlen, response_code); | 94 | len = __cpcmd(cmd, response, rlen, response_code); |
95 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
96 | } | ||
94 | else { | 97 | else { |
95 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); | 98 | lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA); |
96 | if (!lowbuf) { | 99 | if (!lowbuf) { |
@@ -98,7 +101,9 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
98 | "cpcmd: could not allocate response buffer\n"); | 101 | "cpcmd: could not allocate response buffer\n"); |
99 | return -ENOMEM; | 102 | return -ENOMEM; |
100 | } | 103 | } |
104 | spin_lock_irqsave(&cpcmd_lock, flags); | ||
101 | len = __cpcmd(cmd, lowbuf, rlen, response_code); | 105 | len = __cpcmd(cmd, lowbuf, rlen, response_code); |
106 | spin_unlock_irqrestore(&cpcmd_lock, flags); | ||
102 | memcpy(response, lowbuf, rlen); | 107 | memcpy(response, lowbuf, rlen); |
103 | kfree(lowbuf); | 108 | kfree(lowbuf); |
104 | } | 109 | } |
@@ -106,4 +111,3 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code) | |||
106 | } | 111 | } |
107 | 112 | ||
108 | EXPORT_SYMBOL(cpcmd); | 113 | EXPORT_SYMBOL(cpcmd); |
109 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 43f3d0c7e132..ef5266fbce62 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
@@ -603,13 +603,13 @@ debug_open(struct inode *inode, struct file *file) | |||
603 | debug_info_t *debug_info, *debug_info_snapshot; | 603 | debug_info_t *debug_info, *debug_info_snapshot; |
604 | 604 | ||
605 | down(&debug_lock); | 605 | down(&debug_lock); |
606 | debug_info = file->f_dentry->d_inode->i_private; | 606 | debug_info = file->f_path.dentry->d_inode->i_private; |
607 | /* find debug view */ | 607 | /* find debug view */ |
608 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { | 608 | for (i = 0; i < DEBUG_MAX_VIEWS; i++) { |
609 | if (!debug_info->views[i]) | 609 | if (!debug_info->views[i]) |
610 | continue; | 610 | continue; |
611 | else if (debug_info->debugfs_entries[i] == | 611 | else if (debug_info->debugfs_entries[i] == |
612 | file->f_dentry) { | 612 | file->f_path.dentry) { |
613 | goto found; /* found view ! */ | 613 | goto found; /* found view ! */ |
614 | } | 614 | } |
615 | } | 615 | } |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 0cf59bb7a857..8f8c802f1bcf 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -418,24 +418,6 @@ start: | |||
418 | .gotr: | 418 | .gotr: |
419 | l %r10,.tbl # EBCDIC to ASCII table | 419 | l %r10,.tbl # EBCDIC to ASCII table |
420 | tr 0(240,%r8),0(%r10) | 420 | tr 0(240,%r8),0(%r10) |
421 | stidp __LC_CPUID # Are we running on VM maybe | ||
422 | cli __LC_CPUID,0xff | ||
423 | bnz .test | ||
424 | .long 0x83300060 # diag 3,0,x'0060' - storage size | ||
425 | b .done | ||
426 | .test: | ||
427 | mvc 0x68(8),.pgmnw # set up pgm check handler | ||
428 | l %r2,.fourmeg | ||
429 | lr %r3,%r2 | ||
430 | bctr %r3,%r0 # 4M-1 | ||
431 | .loop: iske %r0,%r3 | ||
432 | ar %r3,%r2 | ||
433 | .pgmx: | ||
434 | sr %r3,%r2 | ||
435 | la %r3,1(%r3) | ||
436 | .done: | ||
437 | l %r1,.memsize | ||
438 | st %r3,ARCH_OFFSET(%r1) | ||
439 | slr %r0,%r0 | 421 | slr %r0,%r0 |
440 | st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) | 422 | st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) |
441 | st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) | 423 | st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) |
@@ -443,9 +425,6 @@ start: | |||
443 | .tbl: .long _ebcasc # translate table | 425 | .tbl: .long _ebcasc # translate table |
444 | .cmd: .long COMMAND_LINE # address of command line buffer | 426 | .cmd: .long COMMAND_LINE # address of command line buffer |
445 | .parm: .long PARMAREA | 427 | .parm: .long PARMAREA |
446 | .memsize: .long memory_size | ||
447 | .fourmeg: .long 0x00400000 # 4M | ||
448 | .pgmnw: .long 0x00080000,.pgmx | ||
449 | .lowcase: | 428 | .lowcase: |
450 | .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 | 429 | .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 |
451 | .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f | 430 | .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f |
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 0a2c929486ab..4388b3309e0c 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S | |||
@@ -131,10 +131,11 @@ startup_continue: | |||
131 | .long init_thread_union | 131 | .long init_thread_union |
132 | .Lpmask: | 132 | .Lpmask: |
133 | .byte 0 | 133 | .byte 0 |
134 | .align 8 | 134 | .align 8 |
135 | .Lpcext:.long 0x00080000,0x80000000 | 135 | .Lpcext:.long 0x00080000,0x80000000 |
136 | .Lcr: | 136 | .Lcr: |
137 | .long 0x00 # place holder for cr0 | 137 | .long 0x00 # place holder for cr0 |
138 | .align 8 | ||
138 | .Lwaitsclp: | 139 | .Lwaitsclp: |
139 | .long 0x010a0000,0x80000000 + .Lsclph | 140 | .long 0x010a0000,0x80000000 + .Lsclph |
140 | .Lrcp: | 141 | .Lrcp: |
@@ -156,7 +157,7 @@ startup_continue: | |||
156 | slr %r4,%r4 # set start of chunk to zero | 157 | slr %r4,%r4 # set start of chunk to zero |
157 | slr %r5,%r5 # set end of chunk to zero | 158 | slr %r5,%r5 # set end of chunk to zero |
158 | slr %r6,%r6 # set access code to zero | 159 | slr %r6,%r6 # set access code to zero |
159 | la %r10, MEMORY_CHUNKS # number of chunks | 160 | la %r10,MEMORY_CHUNKS # number of chunks |
160 | .Lloop: | 161 | .Lloop: |
161 | tprot 0(%r5),0 # test protection of first byte | 162 | tprot 0(%r5),0 # test protection of first byte |
162 | ipm %r7 | 163 | ipm %r7 |
@@ -176,8 +177,6 @@ startup_continue: | |||
176 | st %r0,4(%r3) # store size of chunk | 177 | st %r0,4(%r3) # store size of chunk |
177 | st %r6,8(%r3) # store type of chunk | 178 | st %r6,8(%r3) # store type of chunk |
178 | la %r3,12(%r3) | 179 | la %r3,12(%r3) |
179 | l %r4,.Lmemsize-.LPG1(%r13) # address of variable memory_size | ||
180 | st %r5,0(%r4) # store last end to memory size | ||
181 | ahi %r10,-1 # update chunk number | 180 | ahi %r10,-1 # update chunk number |
182 | .Lchkloop: | 181 | .Lchkloop: |
183 | lr %r6,%r7 # set access code to last cc | 182 | lr %r6,%r7 # set access code to last cc |
@@ -292,7 +291,6 @@ startup_continue: | |||
292 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg | 291 | .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg |
293 | .Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte | 292 | .Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte |
294 | .Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c | 293 | .Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c |
295 | .Lmemsize:.long memory_size | ||
296 | .Lmchunk:.long memory_chunk | 294 | .Lmchunk:.long memory_chunk |
297 | .Lmflags:.long machine_flags | 295 | .Lmflags:.long machine_flags |
298 | .Lbss_bgn: .long __bss_start | 296 | .Lbss_bgn: .long __bss_start |
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 42f54d482441..c526279e1123 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S | |||
@@ -70,7 +70,20 @@ startup_continue: | |||
70 | sgr %r5,%r5 # set src,length and pad to zero | 70 | sgr %r5,%r5 # set src,length and pad to zero |
71 | mvcle %r2,%r4,0 # clear mem | 71 | mvcle %r2,%r4,0 # clear mem |
72 | jo .-4 # branch back, if not finish | 72 | jo .-4 # branch back, if not finish |
73 | # set program check new psw mask | ||
74 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) | ||
75 | larl %r1,.Lslowmemdetect # set program check address | ||
76 | stg %r1,__LC_PGM_NEW_PSW+8 | ||
77 | lghi %r1,0xc | ||
78 | diag %r0,%r1,0x260 # get memory size of virtual machine | ||
79 | cgr %r0,%r1 # different? -> old detection routine | ||
80 | jne .Lslowmemdetect | ||
81 | aghi %r1,1 # size is one more than end | ||
82 | larl %r2,memory_chunk | ||
83 | stg %r1,8(%r2) # store size of chunk | ||
84 | j .Ldonemem | ||
73 | 85 | ||
86 | .Lslowmemdetect: | ||
74 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word | 87 | l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word |
75 | .Lservicecall: | 88 | .Lservicecall: |
76 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts | 89 | stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts |
@@ -139,8 +152,6 @@ startup_continue: | |||
139 | .int 0x100000 | 152 | .int 0x100000 |
140 | 153 | ||
141 | .Lfchunk: | 154 | .Lfchunk: |
142 | # set program check new psw mask | ||
143 | mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) | ||
144 | 155 | ||
145 | # | 156 | # |
146 | # find memory chunks. | 157 | # find memory chunks. |
@@ -175,8 +186,6 @@ startup_continue: | |||
175 | stg %r0,8(%r3) # store size of chunk | 186 | stg %r0,8(%r3) # store size of chunk |
176 | st %r6,20(%r3) # store type of chunk | 187 | st %r6,20(%r3) # store type of chunk |
177 | la %r3,24(%r3) | 188 | la %r3,24(%r3) |
178 | larl %r8,memory_size | ||
179 | stg %r5,0(%r8) # store memory size | ||
180 | ahi %r10,-1 # update chunk number | 189 | ahi %r10,-1 # update chunk number |
181 | .Lchkloop: | 190 | .Lchkloop: |
182 | lr %r6,%r7 # set access code to last cc | 191 | lr %r6,%r7 # set access code to last cc |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 1f5e782b3d05..a36bea1188d9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -13,12 +13,21 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
16 | #include <linux/ctype.h> | ||
16 | #include <asm/smp.h> | 17 | #include <asm/smp.h> |
17 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
18 | #include <asm/cpcmd.h> | 19 | #include <asm/cpcmd.h> |
19 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
21 | #include <asm/ebcdic.h> | ||
22 | #include <asm/reset.h> | ||
20 | 23 | ||
21 | #define IPL_PARM_BLOCK_VERSION 0 | 24 | #define IPL_PARM_BLOCK_VERSION 0 |
25 | #define LOADPARM_LEN 8 | ||
26 | |||
27 | extern char s390_readinfo_sccb[]; | ||
28 | #define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010) | ||
29 | #define SCCB_LOADPARM (&s390_readinfo_sccb[24]) | ||
30 | #define SCCB_FLAG (s390_readinfo_sccb[91]) | ||
22 | 31 | ||
23 | enum ipl_type { | 32 | enum ipl_type { |
24 | IPL_TYPE_NONE = 1, | 33 | IPL_TYPE_NONE = 1, |
@@ -289,9 +298,25 @@ static struct attribute_group ipl_fcp_attr_group = { | |||
289 | 298 | ||
290 | /* CCW ipl device attributes */ | 299 | /* CCW ipl device attributes */ |
291 | 300 | ||
301 | static ssize_t ipl_ccw_loadparm_show(struct subsystem *subsys, char *page) | ||
302 | { | ||
303 | char loadparm[LOADPARM_LEN + 1] = {}; | ||
304 | |||
305 | if (!SCCB_VALID) | ||
306 | return sprintf(page, "#unknown#\n"); | ||
307 | memcpy(loadparm, SCCB_LOADPARM, LOADPARM_LEN); | ||
308 | EBCASC(loadparm, LOADPARM_LEN); | ||
309 | strstrip(loadparm); | ||
310 | return sprintf(page, "%s\n", loadparm); | ||
311 | } | ||
312 | |||
313 | static struct subsys_attribute sys_ipl_ccw_loadparm_attr = | ||
314 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); | ||
315 | |||
292 | static struct attribute *ipl_ccw_attrs[] = { | 316 | static struct attribute *ipl_ccw_attrs[] = { |
293 | &sys_ipl_type_attr.attr, | 317 | &sys_ipl_type_attr.attr, |
294 | &sys_ipl_device_attr.attr, | 318 | &sys_ipl_device_attr.attr, |
319 | &sys_ipl_ccw_loadparm_attr.attr, | ||
295 | NULL, | 320 | NULL, |
296 | }; | 321 | }; |
297 | 322 | ||
@@ -348,8 +373,57 @@ static struct attribute_group reipl_fcp_attr_group = { | |||
348 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | 373 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", |
349 | reipl_block_ccw->ipl_info.ccw.devno); | 374 | reipl_block_ccw->ipl_info.ccw.devno); |
350 | 375 | ||
376 | static void reipl_get_ascii_loadparm(char *loadparm) | ||
377 | { | ||
378 | memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param, | ||
379 | LOADPARM_LEN); | ||
380 | EBCASC(loadparm, LOADPARM_LEN); | ||
381 | loadparm[LOADPARM_LEN] = 0; | ||
382 | strstrip(loadparm); | ||
383 | } | ||
384 | |||
385 | static ssize_t reipl_ccw_loadparm_show(struct subsystem *subsys, char *page) | ||
386 | { | ||
387 | char buf[LOADPARM_LEN + 1]; | ||
388 | |||
389 | reipl_get_ascii_loadparm(buf); | ||
390 | return sprintf(page, "%s\n", buf); | ||
391 | } | ||
392 | |||
393 | static ssize_t reipl_ccw_loadparm_store(struct subsystem *subsys, | ||
394 | const char *buf, size_t len) | ||
395 | { | ||
396 | int i, lp_len; | ||
397 | |||
398 | /* ignore trailing newline */ | ||
399 | lp_len = len; | ||
400 | if ((len > 0) && (buf[len - 1] == '\n')) | ||
401 | lp_len--; | ||
402 | /* loadparm can have max 8 characters and must not start with a blank */ | ||
403 | if ((lp_len > LOADPARM_LEN) || ((lp_len > 0) && (buf[0] == ' '))) | ||
404 | return -EINVAL; | ||
405 | /* loadparm can only contain "a-z,A-Z,0-9,SP,." */ | ||
406 | for (i = 0; i < lp_len; i++) { | ||
407 | if (isalpha(buf[i]) || isdigit(buf[i]) || (buf[i] == ' ') || | ||
408 | (buf[i] == '.')) | ||
409 | continue; | ||
410 | return -EINVAL; | ||
411 | } | ||
412 | /* initialize loadparm with blanks */ | ||
413 | memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN); | ||
414 | /* copy and convert to ebcdic */ | ||
415 | memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len); | ||
416 | ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN); | ||
417 | return len; | ||
418 | } | ||
419 | |||
420 | static struct subsys_attribute sys_reipl_ccw_loadparm_attr = | ||
421 | __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, | ||
422 | reipl_ccw_loadparm_store); | ||
423 | |||
351 | static struct attribute *reipl_ccw_attrs[] = { | 424 | static struct attribute *reipl_ccw_attrs[] = { |
352 | &sys_reipl_ccw_device_attr.attr, | 425 | &sys_reipl_ccw_device_attr.attr, |
426 | &sys_reipl_ccw_loadparm_attr.attr, | ||
353 | NULL, | 427 | NULL, |
354 | }; | 428 | }; |
355 | 429 | ||
@@ -502,23 +576,6 @@ static struct subsys_attribute dump_type_attr = | |||
502 | 576 | ||
503 | static decl_subsys(dump, NULL, NULL); | 577 | static decl_subsys(dump, NULL, NULL); |
504 | 578 | ||
505 | #ifdef CONFIG_SMP | ||
506 | static void dump_smp_stop_all(void) | ||
507 | { | ||
508 | int cpu; | ||
509 | preempt_disable(); | ||
510 | for_each_online_cpu(cpu) { | ||
511 | if (cpu == smp_processor_id()) | ||
512 | continue; | ||
513 | while (signal_processor(cpu, sigp_stop) == sigp_busy) | ||
514 | udelay(10); | ||
515 | } | ||
516 | preempt_enable(); | ||
517 | } | ||
518 | #else | ||
519 | #define dump_smp_stop_all() do { } while (0) | ||
520 | #endif | ||
521 | |||
522 | /* | 579 | /* |
523 | * Shutdown actions section | 580 | * Shutdown actions section |
524 | */ | 581 | */ |
@@ -571,11 +628,14 @@ void do_reipl(void) | |||
571 | { | 628 | { |
572 | struct ccw_dev_id devid; | 629 | struct ccw_dev_id devid; |
573 | static char buf[100]; | 630 | static char buf[100]; |
631 | char loadparm[LOADPARM_LEN + 1]; | ||
574 | 632 | ||
575 | switch (reipl_type) { | 633 | switch (reipl_type) { |
576 | case IPL_TYPE_CCW: | 634 | case IPL_TYPE_CCW: |
635 | reipl_get_ascii_loadparm(loadparm); | ||
577 | printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n", | 636 | printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n", |
578 | reipl_block_ccw->ipl_info.ccw.devno); | 637 | reipl_block_ccw->ipl_info.ccw.devno); |
638 | printk(KERN_EMERG "loadparm = '%s'\n", loadparm); | ||
579 | break; | 639 | break; |
580 | case IPL_TYPE_FCP: | 640 | case IPL_TYPE_FCP: |
581 | printk(KERN_EMERG "reboot on fcp device:\n"); | 641 | printk(KERN_EMERG "reboot on fcp device:\n"); |
@@ -588,12 +648,19 @@ void do_reipl(void) | |||
588 | switch (reipl_method) { | 648 | switch (reipl_method) { |
589 | case IPL_METHOD_CCW_CIO: | 649 | case IPL_METHOD_CCW_CIO: |
590 | devid.devno = reipl_block_ccw->ipl_info.ccw.devno; | 650 | devid.devno = reipl_block_ccw->ipl_info.ccw.devno; |
651 | if (ipl_get_type() == IPL_TYPE_CCW && devid.devno == ipl_devno) | ||
652 | diag308(DIAG308_IPL, NULL); | ||
591 | devid.ssid = 0; | 653 | devid.ssid = 0; |
592 | reipl_ccw_dev(&devid); | 654 | reipl_ccw_dev(&devid); |
593 | break; | 655 | break; |
594 | case IPL_METHOD_CCW_VM: | 656 | case IPL_METHOD_CCW_VM: |
595 | sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno); | 657 | if (strlen(loadparm) == 0) |
596 | cpcmd(buf, NULL, 0, NULL); | 658 | sprintf(buf, "IPL %X", |
659 | reipl_block_ccw->ipl_info.ccw.devno); | ||
660 | else | ||
661 | sprintf(buf, "IPL %X LOADPARM '%s'", | ||
662 | reipl_block_ccw->ipl_info.ccw.devno, loadparm); | ||
663 | __cpcmd(buf, NULL, 0, NULL); | ||
597 | break; | 664 | break; |
598 | case IPL_METHOD_CCW_DIAG: | 665 | case IPL_METHOD_CCW_DIAG: |
599 | diag308(DIAG308_SET, reipl_block_ccw); | 666 | diag308(DIAG308_SET, reipl_block_ccw); |
@@ -607,16 +674,17 @@ void do_reipl(void) | |||
607 | diag308(DIAG308_IPL, NULL); | 674 | diag308(DIAG308_IPL, NULL); |
608 | break; | 675 | break; |
609 | case IPL_METHOD_FCP_RO_VM: | 676 | case IPL_METHOD_FCP_RO_VM: |
610 | cpcmd("IPL", NULL, 0, NULL); | 677 | __cpcmd("IPL", NULL, 0, NULL); |
611 | break; | 678 | break; |
612 | case IPL_METHOD_NONE: | 679 | case IPL_METHOD_NONE: |
613 | default: | 680 | default: |
614 | if (MACHINE_IS_VM) | 681 | if (MACHINE_IS_VM) |
615 | cpcmd("IPL", NULL, 0, NULL); | 682 | __cpcmd("IPL", NULL, 0, NULL); |
616 | diag308(DIAG308_IPL, NULL); | 683 | diag308(DIAG308_IPL, NULL); |
617 | break; | 684 | break; |
618 | } | 685 | } |
619 | panic("reipl failed!\n"); | 686 | printk(KERN_EMERG "reboot failed!\n"); |
687 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
620 | } | 688 | } |
621 | 689 | ||
622 | static void do_dump(void) | 690 | static void do_dump(void) |
@@ -639,17 +707,17 @@ static void do_dump(void) | |||
639 | 707 | ||
640 | switch (dump_method) { | 708 | switch (dump_method) { |
641 | case IPL_METHOD_CCW_CIO: | 709 | case IPL_METHOD_CCW_CIO: |
642 | dump_smp_stop_all(); | 710 | smp_send_stop(); |
643 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; | 711 | devid.devno = dump_block_ccw->ipl_info.ccw.devno; |
644 | devid.ssid = 0; | 712 | devid.ssid = 0; |
645 | reipl_ccw_dev(&devid); | 713 | reipl_ccw_dev(&devid); |
646 | break; | 714 | break; |
647 | case IPL_METHOD_CCW_VM: | 715 | case IPL_METHOD_CCW_VM: |
648 | dump_smp_stop_all(); | 716 | smp_send_stop(); |
649 | sprintf(buf, "STORE STATUS"); | 717 | sprintf(buf, "STORE STATUS"); |
650 | cpcmd(buf, NULL, 0, NULL); | 718 | __cpcmd(buf, NULL, 0, NULL); |
651 | sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); | 719 | sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); |
652 | cpcmd(buf, NULL, 0, NULL); | 720 | __cpcmd(buf, NULL, 0, NULL); |
653 | break; | 721 | break; |
654 | case IPL_METHOD_CCW_DIAG: | 722 | case IPL_METHOD_CCW_DIAG: |
655 | diag308(DIAG308_SET, dump_block_ccw); | 723 | diag308(DIAG308_SET, dump_block_ccw); |
@@ -746,6 +814,17 @@ static int __init reipl_ccw_init(void) | |||
746 | reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; | 814 | reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; |
747 | reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw); | 815 | reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw); |
748 | reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; | 816 | reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; |
817 | /* check if read scp info worked and set loadparm */ | ||
818 | if (SCCB_VALID) | ||
819 | memcpy(reipl_block_ccw->ipl_info.ccw.load_param, | ||
820 | SCCB_LOADPARM, LOADPARM_LEN); | ||
821 | else | ||
822 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ | ||
823 | memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40, | ||
824 | LOADPARM_LEN); | ||
825 | /* FIXME: check for diag308_set_works when enabling diag ccw reipl */ | ||
826 | if (!MACHINE_IS_VM) | ||
827 | sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO; | ||
749 | if (ipl_get_type() == IPL_TYPE_CCW) | 828 | if (ipl_get_type() == IPL_TYPE_CCW) |
750 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; | 829 | reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; |
751 | reipl_capabilities |= IPL_TYPE_CCW; | 830 | reipl_capabilities |= IPL_TYPE_CCW; |
@@ -827,13 +906,11 @@ static int __init dump_ccw_init(void) | |||
827 | return 0; | 906 | return 0; |
828 | } | 907 | } |
829 | 908 | ||
830 | extern char s390_readinfo_sccb[]; | ||
831 | |||
832 | static int __init dump_fcp_init(void) | 909 | static int __init dump_fcp_init(void) |
833 | { | 910 | { |
834 | int rc; | 911 | int rc; |
835 | 912 | ||
836 | if(!(s390_readinfo_sccb[91] & 0x2)) | 913 | if(!(SCCB_FLAG & 0x2) || !SCCB_VALID) |
837 | return 0; /* LDIPL DUMP is not installed */ | 914 | return 0; /* LDIPL DUMP is not installed */ |
838 | if (!diag308_set_works) | 915 | if (!diag308_set_works) |
839 | return 0; | 916 | return 0; |
@@ -931,3 +1008,53 @@ static int __init s390_ipl_init(void) | |||
931 | } | 1008 | } |
932 | 1009 | ||
933 | __initcall(s390_ipl_init); | 1010 | __initcall(s390_ipl_init); |
1011 | |||
1012 | static LIST_HEAD(rcall); | ||
1013 | static DEFINE_MUTEX(rcall_mutex); | ||
1014 | |||
1015 | void register_reset_call(struct reset_call *reset) | ||
1016 | { | ||
1017 | mutex_lock(&rcall_mutex); | ||
1018 | list_add(&reset->list, &rcall); | ||
1019 | mutex_unlock(&rcall_mutex); | ||
1020 | } | ||
1021 | EXPORT_SYMBOL_GPL(register_reset_call); | ||
1022 | |||
1023 | void unregister_reset_call(struct reset_call *reset) | ||
1024 | { | ||
1025 | mutex_lock(&rcall_mutex); | ||
1026 | list_del(&reset->list); | ||
1027 | mutex_unlock(&rcall_mutex); | ||
1028 | } | ||
1029 | EXPORT_SYMBOL_GPL(unregister_reset_call); | ||
1030 | |||
1031 | static void do_reset_calls(void) | ||
1032 | { | ||
1033 | struct reset_call *reset; | ||
1034 | |||
1035 | list_for_each_entry(reset, &rcall, list) | ||
1036 | reset->fn(); | ||
1037 | } | ||
1038 | |||
1039 | extern void reset_mcck_handler(void); | ||
1040 | |||
1041 | void s390_reset_system(void) | ||
1042 | { | ||
1043 | struct _lowcore *lc; | ||
1044 | |||
1045 | /* Stack for interrupt/machine check handler */ | ||
1046 | lc = (struct _lowcore *)(unsigned long) store_prefix(); | ||
1047 | lc->panic_stack = S390_lowcore.panic_stack; | ||
1048 | |||
1049 | /* Disable prefixing */ | ||
1050 | set_prefix(0); | ||
1051 | |||
1052 | /* Disable lowcore protection */ | ||
1053 | __ctl_clear_bit(0,28); | ||
1054 | |||
1055 | /* Set new machine check handler */ | ||
1056 | S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK; | ||
1057 | S390_lowcore.mcck_new_psw.addr = | ||
1058 | PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler; | ||
1059 | do_reset_calls(); | ||
1060 | } | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 67914fe7f317..576368c4f605 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -200,7 +200,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
200 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 200 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
201 | { | 201 | { |
202 | mutex_lock(&kprobe_mutex); | 202 | mutex_lock(&kprobe_mutex); |
203 | free_insn_slot(p->ainsn.insn); | 203 | free_insn_slot(p->ainsn.insn, 0); |
204 | mutex_unlock(&kprobe_mutex); | 204 | mutex_unlock(&kprobe_mutex); |
205 | } | 205 | } |
206 | 206 | ||
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 60b1ea9f946b..f6d9bcc0f75b 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c | |||
@@ -1,15 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/machine_kexec.c | 2 | * arch/s390/kernel/machine_kexec.c |
3 | * | 3 | * |
4 | * (C) Copyright IBM Corp. 2005 | 4 | * Copyright IBM Corp. 2005,2006 |
5 | * | 5 | * |
6 | * Author(s): Rolf Adelsberger <adelsberger@de.ibm.com> | 6 | * Author(s): Rolf Adelsberger, |
7 | * | 7 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
8 | */ | ||
9 | |||
10 | /* | ||
11 | * s390_machine_kexec.c - handle the transition of Linux booting another kernel | ||
12 | * on the S390 architecture. | ||
13 | */ | 8 | */ |
14 | 9 | ||
15 | #include <linux/device.h> | 10 | #include <linux/device.h> |
@@ -22,86 +17,49 @@ | |||
22 | #include <asm/pgalloc.h> | 17 | #include <asm/pgalloc.h> |
23 | #include <asm/system.h> | 18 | #include <asm/system.h> |
24 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
20 | #include <asm/reset.h> | ||
25 | 21 | ||
26 | static void kexec_halt_all_cpus(void *); | 22 | typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); |
27 | |||
28 | typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long); | ||
29 | 23 | ||
30 | extern const unsigned char relocate_kernel[]; | 24 | extern const unsigned char relocate_kernel[]; |
31 | extern const unsigned long long relocate_kernel_len; | 25 | extern const unsigned long long relocate_kernel_len; |
32 | 26 | ||
33 | int | 27 | int machine_kexec_prepare(struct kimage *image) |
34 | machine_kexec_prepare(struct kimage *image) | ||
35 | { | 28 | { |
36 | unsigned long reboot_code_buffer; | 29 | void *reboot_code_buffer; |
37 | 30 | ||
38 | /* We don't support anything but the default image type for now. */ | 31 | /* We don't support anything but the default image type for now. */ |
39 | if (image->type != KEXEC_TYPE_DEFAULT) | 32 | if (image->type != KEXEC_TYPE_DEFAULT) |
40 | return -EINVAL; | 33 | return -EINVAL; |
41 | 34 | ||
42 | /* Get the destination where the assembler code should be copied to.*/ | 35 | /* Get the destination where the assembler code should be copied to.*/ |
43 | reboot_code_buffer = page_to_pfn(image->control_code_page)<<PAGE_SHIFT; | 36 | reboot_code_buffer = (void *) page_to_phys(image->control_code_page); |
44 | 37 | ||
45 | /* Then copy it */ | 38 | /* Then copy it */ |
46 | memcpy((void *) reboot_code_buffer, relocate_kernel, | 39 | memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len); |
47 | relocate_kernel_len); | ||
48 | return 0; | 40 | return 0; |
49 | } | 41 | } |
50 | 42 | ||
51 | void | 43 | void machine_kexec_cleanup(struct kimage *image) |
52 | machine_kexec_cleanup(struct kimage *image) | ||
53 | { | 44 | { |
54 | } | 45 | } |
55 | 46 | ||
56 | void | 47 | void machine_shutdown(void) |
57 | machine_shutdown(void) | ||
58 | { | 48 | { |
59 | printk(KERN_INFO "kexec: machine_shutdown called\n"); | 49 | printk(KERN_INFO "kexec: machine_shutdown called\n"); |
60 | } | 50 | } |
61 | 51 | ||
62 | NORET_TYPE void | 52 | void machine_kexec(struct kimage *image) |
63 | machine_kexec(struct kimage *image) | ||
64 | { | 53 | { |
65 | clear_all_subchannels(); | ||
66 | cio_reset_channel_paths(); | ||
67 | |||
68 | /* Disable lowcore protection */ | ||
69 | ctl_clear_bit(0,28); | ||
70 | |||
71 | on_each_cpu(kexec_halt_all_cpus, image, 0, 0); | ||
72 | for (;;); | ||
73 | } | ||
74 | |||
75 | extern void pfault_fini(void); | ||
76 | |||
77 | static void | ||
78 | kexec_halt_all_cpus(void *kernel_image) | ||
79 | { | ||
80 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
81 | int cpu; | ||
82 | struct kimage *image; | ||
83 | relocate_kernel_t data_mover; | 54 | relocate_kernel_t data_mover; |
84 | 55 | ||
85 | #ifdef CONFIG_PFAULT | 56 | smp_send_stop(); |
86 | if (MACHINE_IS_VM) | 57 | pfault_fini(); |
87 | pfault_fini(); | 58 | s390_reset_system(); |
88 | #endif | ||
89 | 59 | ||
90 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) | 60 | data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); |
91 | signal_processor(smp_processor_id(), sigp_stop); | ||
92 | |||
93 | /* Wait for all other cpus to enter stopped state */ | ||
94 | for_each_online_cpu(cpu) { | ||
95 | if (cpu == smp_processor_id()) | ||
96 | continue; | ||
97 | while (!smp_cpu_not_running(cpu)) | ||
98 | cpu_relax(); | ||
99 | } | ||
100 | |||
101 | image = (struct kimage *) kernel_image; | ||
102 | data_mover = (relocate_kernel_t) | ||
103 | (page_to_pfn(image->control_code_page) << PAGE_SHIFT); | ||
104 | 61 | ||
105 | /* Call the moving routine */ | 62 | /* Call the moving routine */ |
106 | (*data_mover) (&image->head, image->start); | 63 | (*data_mover)(&image->head, image->start); |
64 | for (;;); | ||
107 | } | 65 | } |
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 0340477f3b08..f9434d42ce9f 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S | |||
@@ -11,19 +11,10 @@ | |||
11 | .globl do_reipl_asm | 11 | .globl do_reipl_asm |
12 | do_reipl_asm: basr %r13,0 | 12 | do_reipl_asm: basr %r13,0 |
13 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) | 13 | .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) |
14 | 14 | .Lpg1: # do store status of all registers | |
15 | # switch off lowcore protection | ||
16 | |||
17 | .Lpg1: stctl %c0,%c0,.Lctlsave1-.Lpg0(%r13) | ||
18 | stctl %c0,%c0,.Lctlsave2-.Lpg0(%r13) | ||
19 | ni .Lctlsave1-.Lpg0(%r13),0xef | ||
20 | lctl %c0,%c0,.Lctlsave1-.Lpg0(%r13) | ||
21 | |||
22 | # do store status of all registers | ||
23 | 15 | ||
24 | stm %r0,%r15,__LC_GPREGS_SAVE_AREA | 16 | stm %r0,%r15,__LC_GPREGS_SAVE_AREA |
25 | stctl %c0,%c15,__LC_CREGS_SAVE_AREA | 17 | stctl %c0,%c15,__LC_CREGS_SAVE_AREA |
26 | mvc __LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13) | ||
27 | stam %a0,%a15,__LC_AREGS_SAVE_AREA | 18 | stam %a0,%a15,__LC_AREGS_SAVE_AREA |
28 | stpx __LC_PREFIX_SAVE_AREA | 19 | stpx __LC_PREFIX_SAVE_AREA |
29 | stckc .Lclkcmp-.Lpg0(%r13) | 20 | stckc .Lclkcmp-.Lpg0(%r13) |
@@ -56,8 +47,7 @@ do_reipl_asm: basr %r13,0 | |||
56 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 | 47 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 |
57 | jz .L003 | 48 | jz .L003 |
58 | bas %r14,.Ldisab-.Lpg0(%r13) | 49 | bas %r14,.Ldisab-.Lpg0(%r13) |
59 | .L003: spx .Lnull-.Lpg0(%r13) | 50 | .L003: st %r1,__LC_SUBCHANNEL_ID |
60 | st %r1,__LC_SUBCHANNEL_ID | ||
61 | lpsw 0 | 51 | lpsw 0 |
62 | sigp 0,0,0(6) | 52 | sigp 0,0,0(6) |
63 | .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) | 53 | .Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13) |
@@ -65,9 +55,6 @@ do_reipl_asm: basr %r13,0 | |||
65 | .align 8 | 55 | .align 8 |
66 | .Lclkcmp: .quad 0x0000000000000000 | 56 | .Lclkcmp: .quad 0x0000000000000000 |
67 | .Lall: .long 0xff000000 | 57 | .Lall: .long 0xff000000 |
68 | .Lnull: .long 0x00000000 | ||
69 | .Lctlsave1: .long 0x00000000 | ||
70 | .Lctlsave2: .long 0x00000000 | ||
71 | .align 8 | 58 | .align 8 |
72 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 | 59 | .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 |
73 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs | 60 | .Lpcnew: .long 0x00080000,0x80000000+.Lecs |
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index de7435054f7c..f18ef260ca23 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S | |||
@@ -10,10 +10,10 @@ | |||
10 | #include <asm/lowcore.h> | 10 | #include <asm/lowcore.h> |
11 | .globl do_reipl_asm | 11 | .globl do_reipl_asm |
12 | do_reipl_asm: basr %r13,0 | 12 | do_reipl_asm: basr %r13,0 |
13 | .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) | ||
14 | .Lpg1: # do store status of all registers | ||
13 | 15 | ||
14 | # do store status of all registers | 16 | stg %r1,.Lregsave-.Lpg0(%r13) |
15 | |||
16 | .Lpg0: stg %r1,.Lregsave-.Lpg0(%r13) | ||
17 | lghi %r1,0x1000 | 17 | lghi %r1,0x1000 |
18 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1) | 18 | stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1) |
19 | lg %r0,.Lregsave-.Lpg0(%r13) | 19 | lg %r0,.Lregsave-.Lpg0(%r13) |
@@ -27,11 +27,7 @@ do_reipl_asm: basr %r13,0 | |||
27 | stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) | 27 | stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) |
28 | stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) | 28 | stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) |
29 | 29 | ||
30 | lpswe .Lnewpsw-.Lpg0(%r13) | 30 | lctlg %c6,%c6,.Lall-.Lpg0(%r13) |
31 | .Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13) | ||
32 | stctg %c0,%c0,.Lregsave-.Lpg0(%r13) | ||
33 | ni .Lregsave+4-.Lpg0(%r13),0xef | ||
34 | lctlg %c0,%c0,.Lregsave-.Lpg0(%r13) | ||
35 | lgr %r1,%r2 | 31 | lgr %r1,%r2 |
36 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) | 32 | mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) |
37 | stsch .Lschib-.Lpg0(%r13) | 33 | stsch .Lschib-.Lpg0(%r13) |
@@ -56,8 +52,7 @@ do_reipl_asm: basr %r13,0 | |||
56 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 | 52 | .L002: tm .Liplirb+8-.Lpg0(%r13),0xf3 |
57 | jz .L003 | 53 | jz .L003 |
58 | bas %r14,.Ldisab-.Lpg0(%r13) | 54 | bas %r14,.Ldisab-.Lpg0(%r13) |
59 | .L003: spx .Lnull-.Lpg0(%r13) | 55 | .L003: st %r1,__LC_SUBCHANNEL_ID |
60 | st %r1,__LC_SUBCHANNEL_ID | ||
61 | lhi %r1,0 # mode 0 = esa | 56 | lhi %r1,0 # mode 0 = esa |
62 | slr %r0,%r0 # set cpuid to zero | 57 | slr %r0,%r0 # set cpuid to zero |
63 | sigp %r1,%r0,0x12 # switch to esa mode | 58 | sigp %r1,%r0,0x12 # switch to esa mode |
@@ -70,7 +65,6 @@ do_reipl_asm: basr %r13,0 | |||
70 | .Lclkcmp: .quad 0x0000000000000000 | 65 | .Lclkcmp: .quad 0x0000000000000000 |
71 | .Lall: .quad 0x00000000ff000000 | 66 | .Lall: .quad 0x00000000ff000000 |
72 | .Lregsave: .quad 0x0000000000000000 | 67 | .Lregsave: .quad 0x0000000000000000 |
73 | .Lnull: .long 0x0000000000000000 | ||
74 | .align 16 | 68 | .align 16 |
75 | /* | 69 | /* |
76 | * These addresses have to be 31 bit otherwise | 70 | * These addresses have to be 31 bit otherwise |
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S index f9899ff2e5b0..3b456b80bcee 100644 --- a/arch/s390/kernel/relocate_kernel.S +++ b/arch/s390/kernel/relocate_kernel.S | |||
@@ -26,8 +26,7 @@ | |||
26 | relocate_kernel: | 26 | relocate_kernel: |
27 | basr %r13,0 # base address | 27 | basr %r13,0 # base address |
28 | .base: | 28 | .base: |
29 | stnsm sys_msk-.base(%r13),0xf8 # disable DAT and IRQ (external) | 29 | stnsm sys_msk-.base(%r13),0xfb # disable DAT |
30 | spx zero64-.base(%r13) # absolute addressing mode | ||
31 | stctl %c0,%c15,ctlregs-.base(%r13) | 30 | stctl %c0,%c15,ctlregs-.base(%r13) |
32 | stm %r0,%r15,gprregs-.base(%r13) | 31 | stm %r0,%r15,gprregs-.base(%r13) |
33 | la %r1,load_psw-.base(%r13) | 32 | la %r1,load_psw-.base(%r13) |
@@ -97,8 +96,6 @@ | |||
97 | lpsw 0 # hopefully start new kernel... | 96 | lpsw 0 # hopefully start new kernel... |
98 | 97 | ||
99 | .align 8 | 98 | .align 8 |
100 | zero64: | ||
101 | .quad 0 | ||
102 | load_psw: | 99 | load_psw: |
103 | .long 0x00080000,0x80000000 | 100 | .long 0x00080000,0x80000000 |
104 | sys_msk: | 101 | sys_msk: |
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S index 4fb443042d9c..1f9ea2067b59 100644 --- a/arch/s390/kernel/relocate_kernel64.S +++ b/arch/s390/kernel/relocate_kernel64.S | |||
@@ -27,8 +27,7 @@ | |||
27 | relocate_kernel: | 27 | relocate_kernel: |
28 | basr %r13,0 # base address | 28 | basr %r13,0 # base address |
29 | .base: | 29 | .base: |
30 | stnsm sys_msk-.base(%r13),0xf8 # disable DAT and IRQs | 30 | stnsm sys_msk-.base(%r13),0xfb # disable DAT |
31 | spx zero64-.base(%r13) # absolute addressing mode | ||
32 | stctg %c0,%c15,ctlregs-.base(%r13) | 31 | stctg %c0,%c15,ctlregs-.base(%r13) |
33 | stmg %r0,%r15,gprregs-.base(%r13) | 32 | stmg %r0,%r15,gprregs-.base(%r13) |
34 | lghi %r0,3 | 33 | lghi %r0,3 |
@@ -100,8 +99,6 @@ | |||
100 | lpsw 0 # hopefully start new kernel... | 99 | lpsw 0 # hopefully start new kernel... |
101 | 100 | ||
102 | .align 8 | 101 | .align 8 |
103 | zero64: | ||
104 | .quad 0 | ||
105 | load_psw: | 102 | load_psw: |
106 | .long 0x00080000,0x80000000 | 103 | .long 0x00080000,0x80000000 |
107 | sys_msk: | 104 | sys_msk: |
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S new file mode 100644 index 000000000000..be8688c0665c --- /dev/null +++ b/arch/s390/kernel/reset.S | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/reset.S | ||
3 | * | ||
4 | * Copyright (C) IBM Corp. 2006 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm/lowcore.h> | ||
10 | |||
11 | #ifdef CONFIG_64BIT | ||
12 | |||
13 | .globl reset_mcck_handler | ||
14 | reset_mcck_handler: | ||
15 | basr %r13,0 | ||
16 | 0: lg %r15,__LC_PANIC_STACK # load panic stack | ||
17 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
18 | lg %r1,s390_reset_mcck_handler-0b(%r13) | ||
19 | ltgr %r1,%r1 | ||
20 | jz 1f | ||
21 | basr %r14,%r1 | ||
22 | 1: la %r1,4095 | ||
23 | lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1) | ||
24 | lpswe __LC_MCK_OLD_PSW | ||
25 | |||
26 | .globl s390_reset_mcck_handler | ||
27 | s390_reset_mcck_handler: | ||
28 | .quad 0 | ||
29 | |||
30 | #else /* CONFIG_64BIT */ | ||
31 | |||
32 | .globl reset_mcck_handler | ||
33 | reset_mcck_handler: | ||
34 | basr %r13,0 | ||
35 | 0: l %r15,__LC_PANIC_STACK # load panic stack | ||
36 | ahi %r15,-STACK_FRAME_OVERHEAD | ||
37 | l %r1,s390_reset_mcck_handler-0b(%r13) | ||
38 | ltr %r1,%r1 | ||
39 | jz 1f | ||
40 | basr %r14,%r1 | ||
41 | 1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA | ||
42 | lpsw __LC_MCK_OLD_PSW | ||
43 | |||
44 | .globl s390_reset_mcck_handler | ||
45 | s390_reset_mcck_handler: | ||
46 | .long 0 | ||
47 | |||
48 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 49f2b68e32b1..49ef206ec880 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -62,13 +62,9 @@ EXPORT_SYMBOL_GPL(uaccess); | |||
62 | unsigned int console_mode = 0; | 62 | unsigned int console_mode = 0; |
63 | unsigned int console_devno = -1; | 63 | unsigned int console_devno = -1; |
64 | unsigned int console_irq = -1; | 64 | unsigned int console_irq = -1; |
65 | unsigned long memory_size = 0; | ||
66 | unsigned long machine_flags = 0; | 65 | unsigned long machine_flags = 0; |
67 | struct { | 66 | |
68 | unsigned long addr, size, type; | 67 | struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; |
69 | } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; | ||
70 | #define CHUNK_READ_WRITE 0 | ||
71 | #define CHUNK_READ_ONLY 1 | ||
72 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | 68 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ |
73 | static unsigned long __initdata memory_end; | 69 | static unsigned long __initdata memory_end; |
74 | 70 | ||
@@ -228,11 +224,11 @@ static void __init conmode_default(void) | |||
228 | char *ptr; | 224 | char *ptr; |
229 | 225 | ||
230 | if (MACHINE_IS_VM) { | 226 | if (MACHINE_IS_VM) { |
231 | __cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); | 227 | cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); |
232 | console_devno = simple_strtoul(query_buffer + 5, NULL, 16); | 228 | console_devno = simple_strtoul(query_buffer + 5, NULL, 16); |
233 | ptr = strstr(query_buffer, "SUBCHANNEL ="); | 229 | ptr = strstr(query_buffer, "SUBCHANNEL ="); |
234 | console_irq = simple_strtoul(ptr + 13, NULL, 16); | 230 | console_irq = simple_strtoul(ptr + 13, NULL, 16); |
235 | __cpcmd("QUERY TERM", query_buffer, 1024, NULL); | 231 | cpcmd("QUERY TERM", query_buffer, 1024, NULL); |
236 | ptr = strstr(query_buffer, "CONMODE"); | 232 | ptr = strstr(query_buffer, "CONMODE"); |
237 | /* | 233 | /* |
238 | * Set the conmode to 3215 so that the device recognition | 234 | * Set the conmode to 3215 so that the device recognition |
@@ -241,7 +237,7 @@ static void __init conmode_default(void) | |||
241 | * 3215 and the 3270 driver will try to access the console | 237 | * 3215 and the 3270 driver will try to access the console |
242 | * device (3215 as console and 3270 as normal tty). | 238 | * device (3215 as console and 3270 as normal tty). |
243 | */ | 239 | */ |
244 | __cpcmd("TERM CONMODE 3215", NULL, 0, NULL); | 240 | cpcmd("TERM CONMODE 3215", NULL, 0, NULL); |
245 | if (ptr == NULL) { | 241 | if (ptr == NULL) { |
246 | #if defined(CONFIG_SCLP_CONSOLE) | 242 | #if defined(CONFIG_SCLP_CONSOLE) |
247 | SET_CONSOLE_SCLP; | 243 | SET_CONSOLE_SCLP; |
@@ -298,14 +294,14 @@ static void do_machine_restart_nonsmp(char * __unused) | |||
298 | static void do_machine_halt_nonsmp(void) | 294 | static void do_machine_halt_nonsmp(void) |
299 | { | 295 | { |
300 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | 296 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) |
301 | cpcmd(vmhalt_cmd, NULL, 0, NULL); | 297 | __cpcmd(vmhalt_cmd, NULL, 0, NULL); |
302 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | 298 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); |
303 | } | 299 | } |
304 | 300 | ||
305 | static void do_machine_power_off_nonsmp(void) | 301 | static void do_machine_power_off_nonsmp(void) |
306 | { | 302 | { |
307 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | 303 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) |
308 | cpcmd(vmpoff_cmd, NULL, 0, NULL); | 304 | __cpcmd(vmpoff_cmd, NULL, 0, NULL); |
309 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | 305 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); |
310 | } | 306 | } |
311 | 307 | ||
@@ -434,7 +430,7 @@ setup_lowcore(void) | |||
434 | lc->extended_save_area_addr = (__u32) | 430 | lc->extended_save_area_addr = (__u32) |
435 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); | 431 | __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); |
436 | /* enable extended save area */ | 432 | /* enable extended save area */ |
437 | ctl_set_bit(14, 29); | 433 | __ctl_set_bit(14, 29); |
438 | } | 434 | } |
439 | #endif | 435 | #endif |
440 | set_prefix((u32)(unsigned long) lc); | 436 | set_prefix((u32)(unsigned long) lc); |
@@ -473,6 +469,37 @@ setup_resources(void) | |||
473 | } | 469 | } |
474 | } | 470 | } |
475 | 471 | ||
472 | static void __init setup_memory_end(void) | ||
473 | { | ||
474 | unsigned long real_size, memory_size; | ||
475 | unsigned long max_mem, max_phys; | ||
476 | int i; | ||
477 | |||
478 | memory_size = real_size = 0; | ||
479 | max_phys = VMALLOC_END - VMALLOC_MIN_SIZE; | ||
480 | memory_end &= PAGE_MASK; | ||
481 | |||
482 | max_mem = memory_end ? min(max_phys, memory_end) : max_phys; | ||
483 | |||
484 | for (i = 0; i < MEMORY_CHUNKS; i++) { | ||
485 | struct mem_chunk *chunk = &memory_chunk[i]; | ||
486 | |||
487 | real_size = max(real_size, chunk->addr + chunk->size); | ||
488 | if (chunk->addr >= max_mem) { | ||
489 | memset(chunk, 0, sizeof(*chunk)); | ||
490 | continue; | ||
491 | } | ||
492 | if (chunk->addr + chunk->size > max_mem) | ||
493 | chunk->size = max_mem - chunk->addr; | ||
494 | memory_size = max(memory_size, chunk->addr + chunk->size); | ||
495 | } | ||
496 | if (!memory_end) | ||
497 | memory_end = memory_size; | ||
498 | if (real_size > memory_end) | ||
499 | printk("More memory detected than supported. Unused: %luk\n", | ||
500 | (real_size - memory_end) >> 10); | ||
501 | } | ||
502 | |||
476 | static void __init | 503 | static void __init |
477 | setup_memory(void) | 504 | setup_memory(void) |
478 | { | 505 | { |
@@ -616,8 +643,6 @@ setup_arch(char **cmdline_p) | |||
616 | init_mm.end_data = (unsigned long) &_edata; | 643 | init_mm.end_data = (unsigned long) &_edata; |
617 | init_mm.brk = (unsigned long) &_end; | 644 | init_mm.brk = (unsigned long) &_end; |
618 | 645 | ||
619 | memory_end = memory_size; | ||
620 | |||
621 | if (MACHINE_HAS_MVCOS) | 646 | if (MACHINE_HAS_MVCOS) |
622 | memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); | 647 | memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess)); |
623 | else | 648 | else |
@@ -625,20 +650,7 @@ setup_arch(char **cmdline_p) | |||
625 | 650 | ||
626 | parse_early_param(); | 651 | parse_early_param(); |
627 | 652 | ||
628 | #ifndef CONFIG_64BIT | 653 | setup_memory_end(); |
629 | memory_end &= ~0x400000UL; | ||
630 | |||
631 | /* | ||
632 | * We need some free virtual space to be able to do vmalloc. | ||
633 | * On a machine with 2GB memory we make sure that we have at | ||
634 | * least 128 MB free space for vmalloc. | ||
635 | */ | ||
636 | if (memory_end > 1920*1024*1024) | ||
637 | memory_end = 1920*1024*1024; | ||
638 | #else /* CONFIG_64BIT */ | ||
639 | memory_end &= ~0x200000UL; | ||
640 | #endif /* CONFIG_64BIT */ | ||
641 | |||
642 | setup_memory(); | 654 | setup_memory(); |
643 | setup_resources(); | 655 | setup_resources(); |
644 | setup_lowcore(); | 656 | setup_lowcore(); |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 4392a77cbae8..4c8a7954ef48 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
@@ -80,10 +80,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
80 | old_sigset_t mask; | 80 | old_sigset_t mask; |
81 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 81 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
82 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 82 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
83 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | 83 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || |
84 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | ||
85 | __get_user(mask, &act->sa_mask)) | ||
84 | return -EFAULT; | 86 | return -EFAULT; |
85 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
86 | __get_user(mask, &act->sa_mask); | ||
87 | siginitset(&new_ka.sa.sa_mask, mask); | 87 | siginitset(&new_ka.sa.sa_mask, mask); |
88 | } | 88 | } |
89 | 89 | ||
@@ -92,10 +92,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
92 | if (!ret && oact) { | 92 | if (!ret && oact) { |
93 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 93 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
94 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 94 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
95 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | 95 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || |
96 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | ||
97 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | ||
96 | return -EFAULT; | 98 | return -EFAULT; |
97 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
98 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | ||
99 | } | 99 | } |
100 | 100 | ||
101 | return ret; | 101 | return ret; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 62822245f9be..19090f7d4f51 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -230,18 +230,37 @@ static inline void do_store_status(void) | |||
230 | } | 230 | } |
231 | } | 231 | } |
232 | 232 | ||
233 | static inline void do_wait_for_stop(void) | ||
234 | { | ||
235 | int cpu; | ||
236 | |||
237 | /* Wait for all other cpus to enter stopped state */ | ||
238 | for_each_online_cpu(cpu) { | ||
239 | if (cpu == smp_processor_id()) | ||
240 | continue; | ||
241 | while(!smp_cpu_not_running(cpu)) | ||
242 | cpu_relax(); | ||
243 | } | ||
244 | } | ||
245 | |||
233 | /* | 246 | /* |
234 | * this function sends a 'stop' sigp to all other CPUs in the system. | 247 | * this function sends a 'stop' sigp to all other CPUs in the system. |
235 | * it goes straight through. | 248 | * it goes straight through. |
236 | */ | 249 | */ |
237 | void smp_send_stop(void) | 250 | void smp_send_stop(void) |
238 | { | 251 | { |
252 | /* Disable all interrupts/machine checks */ | ||
253 | __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK); | ||
254 | |||
239 | /* write magic number to zero page (absolute 0) */ | 255 | /* write magic number to zero page (absolute 0) */ |
240 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | 256 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; |
241 | 257 | ||
242 | /* stop other processors. */ | 258 | /* stop other processors. */ |
243 | do_send_stop(); | 259 | do_send_stop(); |
244 | 260 | ||
261 | /* wait until other processors are stopped */ | ||
262 | do_wait_for_stop(); | ||
263 | |||
245 | /* store status of other processors. */ | 264 | /* store status of other processors. */ |
246 | do_store_status(); | 265 | do_store_status(); |
247 | } | 266 | } |
@@ -250,88 +269,28 @@ void smp_send_stop(void) | |||
250 | * Reboot, halt and power_off routines for SMP. | 269 | * Reboot, halt and power_off routines for SMP. |
251 | */ | 270 | */ |
252 | 271 | ||
253 | static void do_machine_restart(void * __unused) | ||
254 | { | ||
255 | int cpu; | ||
256 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
257 | |||
258 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) | ||
259 | signal_processor(smp_processor_id(), sigp_stop); | ||
260 | |||
261 | /* Wait for all other cpus to enter stopped state */ | ||
262 | for_each_online_cpu(cpu) { | ||
263 | if (cpu == smp_processor_id()) | ||
264 | continue; | ||
265 | while(!smp_cpu_not_running(cpu)) | ||
266 | cpu_relax(); | ||
267 | } | ||
268 | |||
269 | /* Store status of other cpus. */ | ||
270 | do_store_status(); | ||
271 | |||
272 | /* | ||
273 | * Finally call reipl. Because we waited for all other | ||
274 | * cpus to enter this function we know that they do | ||
275 | * not hold any s390irq-locks (the cpus have been | ||
276 | * interrupted by an external interrupt and s390irq | ||
277 | * locks are always held disabled). | ||
278 | */ | ||
279 | do_reipl(); | ||
280 | } | ||
281 | |||
282 | void machine_restart_smp(char * __unused) | 272 | void machine_restart_smp(char * __unused) |
283 | { | 273 | { |
284 | on_each_cpu(do_machine_restart, NULL, 0, 0); | 274 | smp_send_stop(); |
285 | } | 275 | do_reipl(); |
286 | |||
287 | static void do_wait_for_stop(void) | ||
288 | { | ||
289 | unsigned long cr[16]; | ||
290 | |||
291 | __ctl_store(cr, 0, 15); | ||
292 | cr[0] &= ~0xffff; | ||
293 | cr[6] = 0; | ||
294 | __ctl_load(cr, 0, 15); | ||
295 | for (;;) | ||
296 | enabled_wait(); | ||
297 | } | ||
298 | |||
299 | static void do_machine_halt(void * __unused) | ||
300 | { | ||
301 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
302 | |||
303 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { | ||
304 | smp_send_stop(); | ||
305 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | ||
306 | cpcmd(vmhalt_cmd, NULL, 0, NULL); | ||
307 | signal_processor(smp_processor_id(), | ||
308 | sigp_stop_and_store_status); | ||
309 | } | ||
310 | do_wait_for_stop(); | ||
311 | } | 276 | } |
312 | 277 | ||
313 | void machine_halt_smp(void) | 278 | void machine_halt_smp(void) |
314 | { | 279 | { |
315 | on_each_cpu(do_machine_halt, NULL, 0, 0); | 280 | smp_send_stop(); |
316 | } | 281 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) |
317 | 282 | __cpcmd(vmhalt_cmd, NULL, 0, NULL); | |
318 | static void do_machine_power_off(void * __unused) | 283 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); |
319 | { | 284 | for (;;); |
320 | static atomic_t cpuid = ATOMIC_INIT(-1); | ||
321 | |||
322 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { | ||
323 | smp_send_stop(); | ||
324 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | ||
325 | cpcmd(vmpoff_cmd, NULL, 0, NULL); | ||
326 | signal_processor(smp_processor_id(), | ||
327 | sigp_stop_and_store_status); | ||
328 | } | ||
329 | do_wait_for_stop(); | ||
330 | } | 285 | } |
331 | 286 | ||
332 | void machine_power_off_smp(void) | 287 | void machine_power_off_smp(void) |
333 | { | 288 | { |
334 | on_each_cpu(do_machine_power_off, NULL, 0, 0); | 289 | smp_send_stop(); |
290 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | ||
291 | __cpcmd(vmpoff_cmd, NULL, 0, NULL); | ||
292 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | ||
293 | for (;;); | ||
335 | } | 294 | } |
336 | 295 | ||
337 | /* | 296 | /* |
@@ -501,8 +460,6 @@ __init smp_count_cpus(void) | |||
501 | */ | 460 | */ |
502 | extern void init_cpu_timer(void); | 461 | extern void init_cpu_timer(void); |
503 | extern void init_cpu_vtimer(void); | 462 | extern void init_cpu_vtimer(void); |
504 | extern int pfault_init(void); | ||
505 | extern void pfault_fini(void); | ||
506 | 463 | ||
507 | int __devinit start_secondary(void *cpuvoid) | 464 | int __devinit start_secondary(void *cpuvoid) |
508 | { | 465 | { |
@@ -514,11 +471,9 @@ int __devinit start_secondary(void *cpuvoid) | |||
514 | #ifdef CONFIG_VIRT_TIMER | 471 | #ifdef CONFIG_VIRT_TIMER |
515 | init_cpu_vtimer(); | 472 | init_cpu_vtimer(); |
516 | #endif | 473 | #endif |
517 | #ifdef CONFIG_PFAULT | ||
518 | /* Enable pfault pseudo page faults on this cpu. */ | 474 | /* Enable pfault pseudo page faults on this cpu. */ |
519 | if (MACHINE_IS_VM) | 475 | pfault_init(); |
520 | pfault_init(); | 476 | |
521 | #endif | ||
522 | /* Mark this cpu as online */ | 477 | /* Mark this cpu as online */ |
523 | cpu_set(smp_processor_id(), cpu_online_map); | 478 | cpu_set(smp_processor_id(), cpu_online_map); |
524 | /* Switch on interrupts */ | 479 | /* Switch on interrupts */ |
@@ -708,11 +663,8 @@ __cpu_disable(void) | |||
708 | } | 663 | } |
709 | cpu_clear(cpu, cpu_online_map); | 664 | cpu_clear(cpu, cpu_online_map); |
710 | 665 | ||
711 | #ifdef CONFIG_PFAULT | ||
712 | /* Disable pfault pseudo page faults on this cpu. */ | 666 | /* Disable pfault pseudo page faults on this cpu. */ |
713 | if (MACHINE_IS_VM) | 667 | pfault_fini(); |
714 | pfault_fini(); | ||
715 | #endif | ||
716 | 668 | ||
717 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); | 669 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); |
718 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); | 670 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); |
@@ -860,4 +812,3 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); | |||
860 | EXPORT_SYMBOL(smp_call_function); | 812 | EXPORT_SYMBOL(smp_call_function); |
861 | EXPORT_SYMBOL(smp_get_cpu); | 813 | EXPORT_SYMBOL(smp_get_cpu); |
862 | EXPORT_SYMBOL(smp_put_cpu); | 814 | EXPORT_SYMBOL(smp_put_cpu); |
863 | |||
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index e59baec56520..a4ceae3dbcf1 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -320,3 +320,4 @@ SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) | |||
320 | SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper) | 320 | SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper) |
321 | NI_SYSCALL /* 310 sys_move_pages */ | 321 | NI_SYSCALL /* 310 sys_move_pages */ |
322 | SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) | 322 | SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) |
323 | SYSCALL(sys_epoll_pwait,sys_epoll_pwait,sys_ni_syscall) | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 66375a5e3d12..3cbb0dcf1f1d 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -58,12 +58,6 @@ int sysctl_userprocess_debug = 0; | |||
58 | 58 | ||
59 | extern pgm_check_handler_t do_protection_exception; | 59 | extern pgm_check_handler_t do_protection_exception; |
60 | extern pgm_check_handler_t do_dat_exception; | 60 | extern pgm_check_handler_t do_dat_exception; |
61 | #ifdef CONFIG_PFAULT | ||
62 | extern int pfault_init(void); | ||
63 | extern void pfault_fini(void); | ||
64 | extern void pfault_interrupt(__u16 error_code); | ||
65 | static ext_int_info_t ext_int_pfault; | ||
66 | #endif | ||
67 | extern pgm_check_handler_t do_monitor_call; | 61 | extern pgm_check_handler_t do_monitor_call; |
68 | 62 | ||
69 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) | 63 | #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) |
@@ -135,7 +129,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) | |||
135 | } | 129 | } |
136 | } | 130 | } |
137 | 131 | ||
138 | void show_trace(struct task_struct *task, unsigned long * stack) | 132 | void show_trace(struct task_struct *task, unsigned long *stack) |
139 | { | 133 | { |
140 | register unsigned long __r15 asm ("15"); | 134 | register unsigned long __r15 asm ("15"); |
141 | unsigned long sp; | 135 | unsigned long sp; |
@@ -157,6 +151,9 @@ void show_trace(struct task_struct *task, unsigned long * stack) | |||
157 | __show_trace(sp, S390_lowcore.thread_info, | 151 | __show_trace(sp, S390_lowcore.thread_info, |
158 | S390_lowcore.thread_info + THREAD_SIZE); | 152 | S390_lowcore.thread_info + THREAD_SIZE); |
159 | printk("\n"); | 153 | printk("\n"); |
154 | if (!task) | ||
155 | task = current; | ||
156 | debug_show_held_locks(task); | ||
160 | } | 157 | } |
161 | 158 | ||
162 | void show_stack(struct task_struct *task, unsigned long *sp) | 159 | void show_stack(struct task_struct *task, unsigned long *sp) |
@@ -462,7 +459,8 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | |||
462 | local_irq_enable(); | 459 | local_irq_enable(); |
463 | 460 | ||
464 | if (regs->psw.mask & PSW_MASK_PSTATE) { | 461 | if (regs->psw.mask & PSW_MASK_PSTATE) { |
465 | get_user(*((__u16 *) opcode), (__u16 __user *) location); | 462 | if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) |
463 | return; | ||
466 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { | 464 | if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { |
467 | if (current->ptrace & PT_PTRACED) | 465 | if (current->ptrace & PT_PTRACED) |
468 | force_sig(SIGTRAP, current); | 466 | force_sig(SIGTRAP, current); |
@@ -470,20 +468,25 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) | |||
470 | signal = SIGILL; | 468 | signal = SIGILL; |
471 | #ifdef CONFIG_MATHEMU | 469 | #ifdef CONFIG_MATHEMU |
472 | } else if (opcode[0] == 0xb3) { | 470 | } else if (opcode[0] == 0xb3) { |
473 | get_user(*((__u16 *) (opcode+2)), location+1); | 471 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
472 | return; | ||
474 | signal = math_emu_b3(opcode, regs); | 473 | signal = math_emu_b3(opcode, regs); |
475 | } else if (opcode[0] == 0xed) { | 474 | } else if (opcode[0] == 0xed) { |
476 | get_user(*((__u32 *) (opcode+2)), | 475 | if (get_user(*((__u32 *) (opcode+2)), |
477 | (__u32 __user *)(location+1)); | 476 | (__u32 __user *)(location+1))) |
477 | return; | ||
478 | signal = math_emu_ed(opcode, regs); | 478 | signal = math_emu_ed(opcode, regs); |
479 | } else if (*((__u16 *) opcode) == 0xb299) { | 479 | } else if (*((__u16 *) opcode) == 0xb299) { |
480 | get_user(*((__u16 *) (opcode+2)), location+1); | 480 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
481 | return; | ||
481 | signal = math_emu_srnm(opcode, regs); | 482 | signal = math_emu_srnm(opcode, regs); |
482 | } else if (*((__u16 *) opcode) == 0xb29c) { | 483 | } else if (*((__u16 *) opcode) == 0xb29c) { |
483 | get_user(*((__u16 *) (opcode+2)), location+1); | 484 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
485 | return; | ||
484 | signal = math_emu_stfpc(opcode, regs); | 486 | signal = math_emu_stfpc(opcode, regs); |
485 | } else if (*((__u16 *) opcode) == 0xb29d) { | 487 | } else if (*((__u16 *) opcode) == 0xb29d) { |
486 | get_user(*((__u16 *) (opcode+2)), location+1); | 488 | if (get_user(*((__u16 *) (opcode+2)), location+1)) |
489 | return; | ||
487 | signal = math_emu_lfpc(opcode, regs); | 490 | signal = math_emu_lfpc(opcode, regs); |
488 | #endif | 491 | #endif |
489 | } else | 492 | } else |
@@ -733,22 +736,5 @@ void __init trap_init(void) | |||
733 | pgm_check_table[0x1C] = &space_switch_exception; | 736 | pgm_check_table[0x1C] = &space_switch_exception; |
734 | pgm_check_table[0x1D] = &hfp_sqrt_exception; | 737 | pgm_check_table[0x1D] = &hfp_sqrt_exception; |
735 | pgm_check_table[0x40] = &do_monitor_call; | 738 | pgm_check_table[0x40] = &do_monitor_call; |
736 | 739 | pfault_irq_init(); | |
737 | if (MACHINE_IS_VM) { | ||
738 | #ifdef CONFIG_PFAULT | ||
739 | /* | ||
740 | * Try to get pfault pseudo page faults going. | ||
741 | */ | ||
742 | if (register_early_external_interrupt(0x2603, pfault_interrupt, | ||
743 | &ext_int_pfault) != 0) | ||
744 | panic("Couldn't request external interrupt 0x2603"); | ||
745 | |||
746 | if (pfault_init() == 0) | ||
747 | return; | ||
748 | |||
749 | /* Tough luck, no pfault. */ | ||
750 | unregister_early_external_interrupt(0x2603, pfault_interrupt, | ||
751 | &ext_int_pfault); | ||
752 | #endif | ||
753 | } | ||
754 | } | 740 | } |
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index af9e69a03011..fe0f2e97ba7b 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
@@ -83,13 +83,7 @@ SECTIONS | |||
83 | __setup_end = .; | 83 | __setup_end = .; |
84 | __initcall_start = .; | 84 | __initcall_start = .; |
85 | .initcall.init : { | 85 | .initcall.init : { |
86 | *(.initcall1.init) | 86 | INITCALLS |
87 | *(.initcall2.init) | ||
88 | *(.initcall3.init) | ||
89 | *(.initcall4.init) | ||
90 | *(.initcall5.init) | ||
91 | *(.initcall6.init) | ||
92 | *(.initcall7.init) | ||
93 | } | 87 | } |
94 | __initcall_end = .; | 88 | __initcall_end = .; |
95 | __con_initcall_start = .; | 89 | __con_initcall_start = .; |
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index b0cfa6c4883d..b5f94cf3bde8 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | lib-y += delay.o string.o uaccess_std.o | 7 | lib-y += delay.o string.o uaccess_std.o uaccess_pt.o |
8 | lib-$(CONFIG_32BIT) += div64.o | 8 | lib-$(CONFIG_32BIT) += div64.o |
9 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o | 9 | lib-$(CONFIG_64BIT) += uaccess_mvcos.o |
10 | lib-$(CONFIG_SMP) += spinlock.o | 10 | lib-$(CONFIG_SMP) += spinlock.o |
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c index 121b2935a422..f9a23d57eb79 100644 --- a/arch/s390/lib/uaccess_mvcos.c +++ b/arch/s390/lib/uaccess_mvcos.c | |||
@@ -27,6 +27,9 @@ | |||
27 | #define SLR "slgr" | 27 | #define SLR "slgr" |
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | extern size_t copy_from_user_std(size_t, const void __user *, void *); | ||
31 | extern size_t copy_to_user_std(size_t, void __user *, const void *); | ||
32 | |||
30 | size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) | 33 | size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) |
31 | { | 34 | { |
32 | register unsigned long reg0 asm("0") = 0x81UL; | 35 | register unsigned long reg0 asm("0") = 0x81UL; |
@@ -66,6 +69,13 @@ size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) | |||
66 | return size; | 69 | return size; |
67 | } | 70 | } |
68 | 71 | ||
72 | size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x) | ||
73 | { | ||
74 | if (size <= 256) | ||
75 | return copy_from_user_std(size, ptr, x); | ||
76 | return copy_from_user_mvcos(size, ptr, x); | ||
77 | } | ||
78 | |||
69 | size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) | 79 | size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) |
70 | { | 80 | { |
71 | register unsigned long reg0 asm("0") = 0x810000UL; | 81 | register unsigned long reg0 asm("0") = 0x810000UL; |
@@ -95,6 +105,13 @@ size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) | |||
95 | return size; | 105 | return size; |
96 | } | 106 | } |
97 | 107 | ||
108 | size_t copy_to_user_mvcos_check(size_t size, void __user *ptr, const void *x) | ||
109 | { | ||
110 | if (size <= 256) | ||
111 | return copy_to_user_std(size, ptr, x); | ||
112 | return copy_to_user_mvcos(size, ptr, x); | ||
113 | } | ||
114 | |||
98 | size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) | 115 | size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) |
99 | { | 116 | { |
100 | register unsigned long reg0 asm("0") = 0x810081UL; | 117 | register unsigned long reg0 asm("0") = 0x810081UL; |
@@ -145,18 +162,16 @@ size_t clear_user_mvcos(size_t size, void __user *to) | |||
145 | return size; | 162 | return size; |
146 | } | 163 | } |
147 | 164 | ||
148 | extern size_t copy_from_user_std_small(size_t, const void __user *, void *); | ||
149 | extern size_t copy_to_user_std_small(size_t, void __user *, const void *); | ||
150 | extern size_t strnlen_user_std(size_t, const char __user *); | 165 | extern size_t strnlen_user_std(size_t, const char __user *); |
151 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); | 166 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); |
152 | extern int futex_atomic_op(int, int __user *, int, int *); | 167 | extern int futex_atomic_op(int, int __user *, int, int *); |
153 | extern int futex_atomic_cmpxchg(int __user *, int, int); | 168 | extern int futex_atomic_cmpxchg(int __user *, int, int); |
154 | 169 | ||
155 | struct uaccess_ops uaccess_mvcos = { | 170 | struct uaccess_ops uaccess_mvcos = { |
156 | .copy_from_user = copy_from_user_mvcos, | 171 | .copy_from_user = copy_from_user_mvcos_check, |
157 | .copy_from_user_small = copy_from_user_std_small, | 172 | .copy_from_user_small = copy_from_user_std, |
158 | .copy_to_user = copy_to_user_mvcos, | 173 | .copy_to_user = copy_to_user_mvcos_check, |
159 | .copy_to_user_small = copy_to_user_std_small, | 174 | .copy_to_user_small = copy_to_user_std, |
160 | .copy_in_user = copy_in_user_mvcos, | 175 | .copy_in_user = copy_in_user_mvcos, |
161 | .clear_user = clear_user_mvcos, | 176 | .clear_user = clear_user_mvcos, |
162 | .strnlen_user = strnlen_user_std, | 177 | .strnlen_user = strnlen_user_std, |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c new file mode 100644 index 000000000000..633249c3ba91 --- /dev/null +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -0,0 +1,154 @@ | |||
1 | /* | ||
2 | * arch/s390/lib/uaccess_pt.c | ||
3 | * | ||
4 | * User access functions based on page table walks. | ||
5 | * | ||
6 | * Copyright IBM Corp. 2006 | ||
7 | * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <asm/uaccess.h> | ||
13 | #include <asm/futex.h> | ||
14 | |||
15 | static inline int __handle_fault(struct mm_struct *mm, unsigned long address, | ||
16 | int write_access) | ||
17 | { | ||
18 | struct vm_area_struct *vma; | ||
19 | int ret = -EFAULT; | ||
20 | |||
21 | down_read(&mm->mmap_sem); | ||
22 | vma = find_vma(mm, address); | ||
23 | if (unlikely(!vma)) | ||
24 | goto out; | ||
25 | if (unlikely(vma->vm_start > address)) { | ||
26 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
27 | goto out; | ||
28 | if (expand_stack(vma, address)) | ||
29 | goto out; | ||
30 | } | ||
31 | |||
32 | if (!write_access) { | ||
33 | /* page not present, check vm flags */ | ||
34 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) | ||
35 | goto out; | ||
36 | } else { | ||
37 | if (!(vma->vm_flags & VM_WRITE)) | ||
38 | goto out; | ||
39 | } | ||
40 | |||
41 | survive: | ||
42 | switch (handle_mm_fault(mm, vma, address, write_access)) { | ||
43 | case VM_FAULT_MINOR: | ||
44 | current->min_flt++; | ||
45 | break; | ||
46 | case VM_FAULT_MAJOR: | ||
47 | current->maj_flt++; | ||
48 | break; | ||
49 | case VM_FAULT_SIGBUS: | ||
50 | goto out_sigbus; | ||
51 | case VM_FAULT_OOM: | ||
52 | goto out_of_memory; | ||
53 | default: | ||
54 | BUG(); | ||
55 | } | ||
56 | ret = 0; | ||
57 | out: | ||
58 | up_read(&mm->mmap_sem); | ||
59 | return ret; | ||
60 | |||
61 | out_of_memory: | ||
62 | up_read(&mm->mmap_sem); | ||
63 | if (is_init(current)) { | ||
64 | yield(); | ||
65 | down_read(&mm->mmap_sem); | ||
66 | goto survive; | ||
67 | } | ||
68 | printk("VM: killing process %s\n", current->comm); | ||
69 | return ret; | ||
70 | |||
71 | out_sigbus: | ||
72 | up_read(&mm->mmap_sem); | ||
73 | current->thread.prot_addr = address; | ||
74 | current->thread.trap_no = 0x11; | ||
75 | force_sig(SIGBUS, current); | ||
76 | return ret; | ||
77 | } | ||
78 | |||
79 | static inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | ||
80 | size_t n, int write_user) | ||
81 | { | ||
82 | struct mm_struct *mm = current->mm; | ||
83 | unsigned long offset, pfn, done, size; | ||
84 | pgd_t *pgd; | ||
85 | pmd_t *pmd; | ||
86 | pte_t *pte; | ||
87 | void *from, *to; | ||
88 | |||
89 | done = 0; | ||
90 | retry: | ||
91 | spin_lock(&mm->page_table_lock); | ||
92 | do { | ||
93 | pgd = pgd_offset(mm, uaddr); | ||
94 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
95 | goto fault; | ||
96 | |||
97 | pmd = pmd_offset(pgd, uaddr); | ||
98 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) | ||
99 | goto fault; | ||
100 | |||
101 | pte = pte_offset_map(pmd, uaddr); | ||
102 | if (!pte || !pte_present(*pte) || | ||
103 | (write_user && !pte_write(*pte))) | ||
104 | goto fault; | ||
105 | |||
106 | pfn = pte_pfn(*pte); | ||
107 | if (!pfn_valid(pfn)) | ||
108 | goto out; | ||
109 | |||
110 | offset = uaddr & (PAGE_SIZE - 1); | ||
111 | size = min(n - done, PAGE_SIZE - offset); | ||
112 | if (write_user) { | ||
113 | to = (void *)((pfn << PAGE_SHIFT) + offset); | ||
114 | from = kptr + done; | ||
115 | } else { | ||
116 | from = (void *)((pfn << PAGE_SHIFT) + offset); | ||
117 | to = kptr + done; | ||
118 | } | ||
119 | memcpy(to, from, size); | ||
120 | done += size; | ||
121 | uaddr += size; | ||
122 | } while (done < n); | ||
123 | out: | ||
124 | spin_unlock(&mm->page_table_lock); | ||
125 | return n - done; | ||
126 | fault: | ||
127 | spin_unlock(&mm->page_table_lock); | ||
128 | if (__handle_fault(mm, uaddr, write_user)) | ||
129 | return n - done; | ||
130 | goto retry; | ||
131 | } | ||
132 | |||
133 | size_t copy_from_user_pt(size_t n, const void __user *from, void *to) | ||
134 | { | ||
135 | size_t rc; | ||
136 | |||
137 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
138 | memcpy(to, (void __kernel __force *) from, n); | ||
139 | return 0; | ||
140 | } | ||
141 | rc = __user_copy_pt((unsigned long) from, to, n, 0); | ||
142 | if (unlikely(rc)) | ||
143 | memset(to + n - rc, 0, rc); | ||
144 | return rc; | ||
145 | } | ||
146 | |||
147 | size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | ||
148 | { | ||
149 | if (segment_eq(get_fs(), KERNEL_DS)) { | ||
150 | memcpy((void __kernel __force *) to, from, n); | ||
151 | return 0; | ||
152 | } | ||
153 | return __user_copy_pt((unsigned long) to, (void *) from, n, 1); | ||
154 | } | ||
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c index f44f0078b354..bbaca66fa293 100644 --- a/arch/s390/lib/uaccess_std.c +++ b/arch/s390/lib/uaccess_std.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <asm/uaccess.h> | 14 | #include <linux/uaccess.h> |
15 | #include <asm/futex.h> | 15 | #include <asm/futex.h> |
16 | 16 | ||
17 | #ifndef __s390x__ | 17 | #ifndef __s390x__ |
@@ -28,6 +28,9 @@ | |||
28 | #define SLR "slgr" | 28 | #define SLR "slgr" |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | extern size_t copy_from_user_pt(size_t n, const void __user *from, void *to); | ||
32 | extern size_t copy_to_user_pt(size_t n, void __user *to, const void *from); | ||
33 | |||
31 | size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) | 34 | size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) |
32 | { | 35 | { |
33 | unsigned long tmp1, tmp2; | 36 | unsigned long tmp1, tmp2; |
@@ -69,34 +72,11 @@ size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) | |||
69 | return size; | 72 | return size; |
70 | } | 73 | } |
71 | 74 | ||
72 | size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x) | 75 | size_t copy_from_user_std_check(size_t size, const void __user *ptr, void *x) |
73 | { | 76 | { |
74 | unsigned long tmp1, tmp2; | 77 | if (size <= 1024) |
75 | 78 | return copy_from_user_std(size, ptr, x); | |
76 | tmp1 = 0UL; | 79 | return copy_from_user_pt(size, ptr, x); |
77 | asm volatile( | ||
78 | "0: mvcp 0(%0,%2),0(%1),%3\n" | ||
79 | " "SLR" %0,%0\n" | ||
80 | " j 5f\n" | ||
81 | "1: la %4,255(%1)\n" /* %4 = ptr + 255 */ | ||
82 | " "LHI" %3,-4096\n" | ||
83 | " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ | ||
84 | " "SLR" %4,%1\n" | ||
85 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
86 | " jnh 5f\n" | ||
87 | "2: mvcp 0(%4,%2),0(%1),%3\n" | ||
88 | " "SLR" %0,%4\n" | ||
89 | " "ALR" %2,%4\n" | ||
90 | "3:"LHI" %4,-1\n" | ||
91 | " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ | ||
92 | " bras %3,4f\n" | ||
93 | " xc 0(1,%2),0(%2)\n" | ||
94 | "4: ex %4,0(%3)\n" | ||
95 | "5:\n" | ||
96 | EX_TABLE(0b,1b) EX_TABLE(2b,3b) | ||
97 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
98 | : : "cc", "memory"); | ||
99 | return size; | ||
100 | } | 80 | } |
101 | 81 | ||
102 | size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) | 82 | size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) |
@@ -130,28 +110,11 @@ size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) | |||
130 | return size; | 110 | return size; |
131 | } | 111 | } |
132 | 112 | ||
133 | size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x) | 113 | size_t copy_to_user_std_check(size_t size, void __user *ptr, const void *x) |
134 | { | 114 | { |
135 | unsigned long tmp1, tmp2; | 115 | if (size <= 1024) |
136 | 116 | return copy_to_user_std(size, ptr, x); | |
137 | tmp1 = 0UL; | 117 | return copy_to_user_pt(size, ptr, x); |
138 | asm volatile( | ||
139 | "0: mvcs 0(%0,%1),0(%2),%3\n" | ||
140 | " "SLR" %0,%0\n" | ||
141 | " j 3f\n" | ||
142 | "1: la %4,255(%1)\n" /* ptr + 255 */ | ||
143 | " "LHI" %3,-4096\n" | ||
144 | " nr %4,%3\n" /* (ptr + 255) & -4096UL */ | ||
145 | " "SLR" %4,%1\n" | ||
146 | " "CLR" %0,%4\n" /* copy crosses next page boundary? */ | ||
147 | " jnh 3f\n" | ||
148 | "2: mvcs 0(%4,%1),0(%2),%3\n" | ||
149 | " "SLR" %0,%4\n" | ||
150 | "3:\n" | ||
151 | EX_TABLE(0b,1b) EX_TABLE(2b,3b) | ||
152 | : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) | ||
153 | : : "cc", "memory"); | ||
154 | return size; | ||
155 | } | 118 | } |
156 | 119 | ||
157 | size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) | 120 | size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) |
@@ -295,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | |||
295 | { | 258 | { |
296 | int oldval = 0, newval, ret; | 259 | int oldval = 0, newval, ret; |
297 | 260 | ||
298 | inc_preempt_count(); | 261 | pagefault_disable(); |
299 | 262 | ||
300 | switch (op) { | 263 | switch (op) { |
301 | case FUTEX_OP_SET: | 264 | case FUTEX_OP_SET: |
@@ -321,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) | |||
321 | default: | 284 | default: |
322 | ret = -ENOSYS; | 285 | ret = -ENOSYS; |
323 | } | 286 | } |
324 | dec_preempt_count(); | 287 | pagefault_enable(); |
325 | *old = oldval; | 288 | *old = oldval; |
326 | return ret; | 289 | return ret; |
327 | } | 290 | } |
@@ -343,10 +306,10 @@ int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) | |||
343 | } | 306 | } |
344 | 307 | ||
345 | struct uaccess_ops uaccess_std = { | 308 | struct uaccess_ops uaccess_std = { |
346 | .copy_from_user = copy_from_user_std, | 309 | .copy_from_user = copy_from_user_std_check, |
347 | .copy_from_user_small = copy_from_user_std_small, | 310 | .copy_from_user_small = copy_from_user_std, |
348 | .copy_to_user = copy_to_user_std, | 311 | .copy_to_user = copy_to_user_std_check, |
349 | .copy_to_user_small = copy_to_user_std_small, | 312 | .copy_to_user_small = copy_to_user_std, |
350 | .copy_in_user = copy_in_user_std, | 313 | .copy_in_user = copy_in_user_std, |
351 | .clear_user = clear_user_std, | 314 | .clear_user = clear_user_std, |
352 | .strnlen_user = strnlen_user_std, | 315 | .strnlen_user = strnlen_user_std, |
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index aa9a42b6e62d..8e09db1edbb9 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for the linux s390-specific parts of the memory manager. | 2 | # Makefile for the linux s390-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o ioremap.o extmem.o mmap.o | 5 | obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o |
6 | obj-$(CONFIG_CMM) += cmm.o | 6 | obj-$(CONFIG_CMM) += cmm.o |
7 | 7 | ||
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 226275d5c4f6..775bf19e742b 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -14,12 +14,14 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
17 | #include <linux/ctype.h> | ||
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/pgtable.h> | ||
18 | #include <asm/ebcdic.h> | 20 | #include <asm/ebcdic.h> |
19 | #include <asm/errno.h> | 21 | #include <asm/errno.h> |
20 | #include <asm/extmem.h> | 22 | #include <asm/extmem.h> |
21 | #include <asm/cpcmd.h> | 23 | #include <asm/cpcmd.h> |
22 | #include <linux/ctype.h> | 24 | #include <asm/setup.h> |
23 | 25 | ||
24 | #define DCSS_DEBUG /* Debug messages on/off */ | 26 | #define DCSS_DEBUG /* Debug messages on/off */ |
25 | 27 | ||
@@ -77,15 +79,11 @@ struct dcss_segment { | |||
77 | int segcnt; | 79 | int segcnt; |
78 | }; | 80 | }; |
79 | 81 | ||
80 | static DEFINE_SPINLOCK(dcss_lock); | 82 | static DEFINE_MUTEX(dcss_lock); |
81 | static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); | 83 | static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); |
82 | static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", | 84 | static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", |
83 | "EW/EN-MIXED" }; | 85 | "EW/EN-MIXED" }; |
84 | 86 | ||
85 | extern struct { | ||
86 | unsigned long addr, size, type; | ||
87 | } memory_chunk[MEMORY_CHUNKS]; | ||
88 | |||
89 | /* | 87 | /* |
90 | * Create the 8 bytes, ebcdic VM segment name from | 88 | * Create the 8 bytes, ebcdic VM segment name from |
91 | * an ascii name. | 89 | * an ascii name. |
@@ -117,7 +115,7 @@ segment_by_name (char *name) | |||
117 | struct list_head *l; | 115 | struct list_head *l; |
118 | struct dcss_segment *tmp, *retval = NULL; | 116 | struct dcss_segment *tmp, *retval = NULL; |
119 | 117 | ||
120 | assert_spin_locked(&dcss_lock); | 118 | BUG_ON(!mutex_is_locked(&dcss_lock)); |
121 | dcss_mkname (name, dcss_name); | 119 | dcss_mkname (name, dcss_name); |
122 | list_for_each (l, &dcss_list) { | 120 | list_for_each (l, &dcss_list) { |
123 | tmp = list_entry (l, struct dcss_segment, list); | 121 | tmp = list_entry (l, struct dcss_segment, list); |
@@ -241,65 +239,6 @@ query_segment_type (struct dcss_segment *seg) | |||
241 | } | 239 | } |
242 | 240 | ||
243 | /* | 241 | /* |
244 | * check if the given segment collides with guest storage. | ||
245 | * returns 1 if this is the case, 0 if no collision was found | ||
246 | */ | ||
247 | static int | ||
248 | segment_overlaps_storage(struct dcss_segment *seg) | ||
249 | { | ||
250 | int i; | ||
251 | |||
252 | for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
253 | if (memory_chunk[i].type != 0) | ||
254 | continue; | ||
255 | if ((memory_chunk[i].addr >> 20) > (seg->end >> 20)) | ||
256 | continue; | ||
257 | if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20) | ||
258 | < (seg->start_addr >> 20)) | ||
259 | continue; | ||
260 | return 1; | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * check if segment collides with other segments that are currently loaded | ||
267 | * returns 1 if this is the case, 0 if no collision was found | ||
268 | */ | ||
269 | static int | ||
270 | segment_overlaps_others (struct dcss_segment *seg) | ||
271 | { | ||
272 | struct list_head *l; | ||
273 | struct dcss_segment *tmp; | ||
274 | |||
275 | assert_spin_locked(&dcss_lock); | ||
276 | list_for_each(l, &dcss_list) { | ||
277 | tmp = list_entry(l, struct dcss_segment, list); | ||
278 | if ((tmp->start_addr >> 20) > (seg->end >> 20)) | ||
279 | continue; | ||
280 | if ((tmp->end >> 20) < (seg->start_addr >> 20)) | ||
281 | continue; | ||
282 | if (seg == tmp) | ||
283 | continue; | ||
284 | return 1; | ||
285 | } | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * check if segment exceeds the kernel mapping range (detected or set via mem=) | ||
291 | * returns 1 if this is the case, 0 if segment fits into the range | ||
292 | */ | ||
293 | static inline int | ||
294 | segment_exceeds_range (struct dcss_segment *seg) | ||
295 | { | ||
296 | int seg_last_pfn = (seg->end) >> PAGE_SHIFT; | ||
297 | if (seg_last_pfn > max_pfn) | ||
298 | return 1; | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * get info about a segment | 242 | * get info about a segment |
304 | * possible return values: | 243 | * possible return values: |
305 | * -ENOSYS : we are not running on VM | 244 | * -ENOSYS : we are not running on VM |
@@ -344,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
344 | rc = query_segment_type (seg); | 283 | rc = query_segment_type (seg); |
345 | if (rc < 0) | 284 | if (rc < 0) |
346 | goto out_free; | 285 | goto out_free; |
347 | if (segment_exceeds_range(seg)) { | 286 | |
348 | PRINT_WARN ("segment_load: not loading segment %s - exceeds" | 287 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
349 | " kernel mapping range\n",name); | 288 | |
350 | rc = -ERANGE; | 289 | switch (rc) { |
290 | case 0: | ||
291 | break; | ||
292 | case -ENOSPC: | ||
293 | PRINT_WARN("segment_load: not loading segment %s - overlaps " | ||
294 | "storage/segment\n", name); | ||
351 | goto out_free; | 295 | goto out_free; |
352 | } | 296 | case -ERANGE: |
353 | if (segment_overlaps_storage(seg)) { | 297 | PRINT_WARN("segment_load: not loading segment %s - exceeds " |
354 | PRINT_WARN ("segment_load: not loading segment %s - overlaps" | 298 | "kernel mapping range\n", name); |
355 | " storage\n",name); | ||
356 | rc = -ENOSPC; | ||
357 | goto out_free; | 299 | goto out_free; |
358 | } | 300 | default: |
359 | if (segment_overlaps_others(seg)) { | 301 | PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n", |
360 | PRINT_WARN ("segment_load: not loading segment %s - overlaps" | 302 | name, rc); |
361 | " other segments\n",name); | ||
362 | rc = -EBUSY; | ||
363 | goto out_free; | 303 | goto out_free; |
364 | } | 304 | } |
305 | |||
365 | if (do_nonshared) | 306 | if (do_nonshared) |
366 | dcss_command = DCSS_LOADNSR; | 307 | dcss_command = DCSS_LOADNSR; |
367 | else | 308 | else |
@@ -375,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
375 | rc = dcss_diag_translate_rc (seg->end); | 316 | rc = dcss_diag_translate_rc (seg->end); |
376 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 317 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, |
377 | &seg->start_addr, &seg->end); | 318 | &seg->start_addr, &seg->end); |
378 | goto out_free; | 319 | goto out_shared; |
379 | } | 320 | } |
380 | seg->do_nonshared = do_nonshared; | 321 | seg->do_nonshared = do_nonshared; |
381 | atomic_set(&seg->ref_count, 1); | 322 | atomic_set(&seg->ref_count, 1); |
@@ -394,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
394 | (void*)seg->start_addr, (void*)seg->end, | 335 | (void*)seg->start_addr, (void*)seg->end, |
395 | segtype_string[seg->vm_segtype]); | 336 | segtype_string[seg->vm_segtype]); |
396 | goto out; | 337 | goto out; |
338 | out_shared: | ||
339 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | ||
397 | out_free: | 340 | out_free: |
398 | kfree(seg); | 341 | kfree(seg); |
399 | out: | 342 | out: |
@@ -429,7 +372,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr, | |||
429 | if (!MACHINE_IS_VM) | 372 | if (!MACHINE_IS_VM) |
430 | return -ENOSYS; | 373 | return -ENOSYS; |
431 | 374 | ||
432 | spin_lock (&dcss_lock); | 375 | mutex_lock(&dcss_lock); |
433 | seg = segment_by_name (name); | 376 | seg = segment_by_name (name); |
434 | if (seg == NULL) | 377 | if (seg == NULL) |
435 | rc = __segment_load (name, do_nonshared, addr, end); | 378 | rc = __segment_load (name, do_nonshared, addr, end); |
@@ -444,7 +387,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr, | |||
444 | rc = -EPERM; | 387 | rc = -EPERM; |
445 | } | 388 | } |
446 | } | 389 | } |
447 | spin_unlock (&dcss_lock); | 390 | mutex_unlock(&dcss_lock); |
448 | return rc; | 391 | return rc; |
449 | } | 392 | } |
450 | 393 | ||
@@ -467,7 +410,7 @@ segment_modify_shared (char *name, int do_nonshared) | |||
467 | unsigned long dummy; | 410 | unsigned long dummy; |
468 | int dcss_command, rc, diag_cc; | 411 | int dcss_command, rc, diag_cc; |
469 | 412 | ||
470 | spin_lock (&dcss_lock); | 413 | mutex_lock(&dcss_lock); |
471 | seg = segment_by_name (name); | 414 | seg = segment_by_name (name); |
472 | if (seg == NULL) { | 415 | if (seg == NULL) { |
473 | rc = -EINVAL; | 416 | rc = -EINVAL; |
@@ -508,7 +451,7 @@ segment_modify_shared (char *name, int do_nonshared) | |||
508 | &dummy, &dummy); | 451 | &dummy, &dummy); |
509 | kfree(seg); | 452 | kfree(seg); |
510 | out_unlock: | 453 | out_unlock: |
511 | spin_unlock(&dcss_lock); | 454 | mutex_unlock(&dcss_lock); |
512 | return rc; | 455 | return rc; |
513 | } | 456 | } |
514 | 457 | ||
@@ -526,21 +469,21 @@ segment_unload(char *name) | |||
526 | if (!MACHINE_IS_VM) | 469 | if (!MACHINE_IS_VM) |
527 | return; | 470 | return; |
528 | 471 | ||
529 | spin_lock(&dcss_lock); | 472 | mutex_lock(&dcss_lock); |
530 | seg = segment_by_name (name); | 473 | seg = segment_by_name (name); |
531 | if (seg == NULL) { | 474 | if (seg == NULL) { |
532 | PRINT_ERR ("could not find segment %s in segment_unload, " | 475 | PRINT_ERR ("could not find segment %s in segment_unload, " |
533 | "please report to linux390@de.ibm.com\n",name); | 476 | "please report to linux390@de.ibm.com\n",name); |
534 | goto out_unlock; | 477 | goto out_unlock; |
535 | } | 478 | } |
536 | if (atomic_dec_return(&seg->ref_count) == 0) { | 479 | if (atomic_dec_return(&seg->ref_count) != 0) |
537 | list_del(&seg->list); | 480 | goto out_unlock; |
538 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 481 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
539 | &dummy, &dummy); | 482 | list_del(&seg->list); |
540 | kfree(seg); | 483 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
541 | } | 484 | kfree(seg); |
542 | out_unlock: | 485 | out_unlock: |
543 | spin_unlock(&dcss_lock); | 486 | mutex_unlock(&dcss_lock); |
544 | } | 487 | } |
545 | 488 | ||
546 | /* | 489 | /* |
@@ -559,12 +502,13 @@ segment_save(char *name) | |||
559 | if (!MACHINE_IS_VM) | 502 | if (!MACHINE_IS_VM) |
560 | return; | 503 | return; |
561 | 504 | ||
562 | spin_lock(&dcss_lock); | 505 | mutex_lock(&dcss_lock); |
563 | seg = segment_by_name (name); | 506 | seg = segment_by_name (name); |
564 | 507 | ||
565 | if (seg == NULL) { | 508 | if (seg == NULL) { |
566 | PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name); | 509 | PRINT_ERR("could not find segment %s in segment_save, please " |
567 | return; | 510 | "report to linux390@de.ibm.com\n", name); |
511 | goto out; | ||
568 | } | 512 | } |
569 | 513 | ||
570 | startpfn = seg->start_addr >> PAGE_SHIFT; | 514 | startpfn = seg->start_addr >> PAGE_SHIFT; |
@@ -591,7 +535,7 @@ segment_save(char *name) | |||
591 | goto out; | 535 | goto out; |
592 | } | 536 | } |
593 | out: | 537 | out: |
594 | spin_unlock(&dcss_lock); | 538 | mutex_unlock(&dcss_lock); |
595 | } | 539 | } |
596 | 540 | ||
597 | EXPORT_SYMBOL(segment_load); | 541 | EXPORT_SYMBOL(segment_load); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1c323bbfda91..cd85e34d8703 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
33 | #include <asm/kdebug.h> | 33 | #include <asm/kdebug.h> |
34 | #include <asm/s390_ext.h> | ||
34 | 35 | ||
35 | #ifndef CONFIG_64BIT | 36 | #ifndef CONFIG_64BIT |
36 | #define __FAIL_ADDR_MASK 0x7ffff000 | 37 | #define __FAIL_ADDR_MASK 0x7ffff000 |
@@ -394,6 +395,7 @@ void do_dat_exception(struct pt_regs *regs, unsigned long error_code) | |||
394 | /* | 395 | /* |
395 | * 'pfault' pseudo page faults routines. | 396 | * 'pfault' pseudo page faults routines. |
396 | */ | 397 | */ |
398 | static ext_int_info_t ext_int_pfault; | ||
397 | static int pfault_disable = 0; | 399 | static int pfault_disable = 0; |
398 | 400 | ||
399 | static int __init nopfault(char *str) | 401 | static int __init nopfault(char *str) |
@@ -422,7 +424,7 @@ int pfault_init(void) | |||
422 | __PF_RES_FIELD }; | 424 | __PF_RES_FIELD }; |
423 | int rc; | 425 | int rc; |
424 | 426 | ||
425 | if (pfault_disable) | 427 | if (!MACHINE_IS_VM || pfault_disable) |
426 | return -1; | 428 | return -1; |
427 | asm volatile( | 429 | asm volatile( |
428 | " diag %1,%0,0x258\n" | 430 | " diag %1,%0,0x258\n" |
@@ -440,7 +442,7 @@ void pfault_fini(void) | |||
440 | pfault_refbk_t refbk = | 442 | pfault_refbk_t refbk = |
441 | { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; | 443 | { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; |
442 | 444 | ||
443 | if (pfault_disable) | 445 | if (!MACHINE_IS_VM || pfault_disable) |
444 | return; | 446 | return; |
445 | __ctl_clear_bit(0,9); | 447 | __ctl_clear_bit(0,9); |
446 | asm volatile( | 448 | asm volatile( |
@@ -500,5 +502,25 @@ pfault_interrupt(__u16 error_code) | |||
500 | set_tsk_need_resched(tsk); | 502 | set_tsk_need_resched(tsk); |
501 | } | 503 | } |
502 | } | 504 | } |
503 | #endif | ||
504 | 505 | ||
506 | void __init pfault_irq_init(void) | ||
507 | { | ||
508 | if (!MACHINE_IS_VM) | ||
509 | return; | ||
510 | |||
511 | /* | ||
512 | * Try to get pfault pseudo page faults going. | ||
513 | */ | ||
514 | if (register_early_external_interrupt(0x2603, pfault_interrupt, | ||
515 | &ext_int_pfault) != 0) | ||
516 | panic("Couldn't request external interrupt 0x2603"); | ||
517 | |||
518 | if (pfault_init() == 0) | ||
519 | return; | ||
520 | |||
521 | /* Tough luck, no pfault. */ | ||
522 | pfault_disable = 1; | ||
523 | unregister_early_external_interrupt(0x2603, pfault_interrupt, | ||
524 | &ext_int_pfault); | ||
525 | } | ||
526 | #endif | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d99891718709..4bb21be3b007 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
27 | #include <linux/poison.h> | ||
27 | 28 | ||
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
@@ -69,6 +70,8 @@ void show_mem(void) | |||
69 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 70 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
70 | i = max_mapnr; | 71 | i = max_mapnr; |
71 | while (i-- > 0) { | 72 | while (i-- > 0) { |
73 | if (!pfn_valid(i)) | ||
74 | continue; | ||
72 | page = pfn_to_page(i); | 75 | page = pfn_to_page(i); |
73 | total++; | 76 | total++; |
74 | if (PageReserved(page)) | 77 | if (PageReserved(page)) |
@@ -84,65 +87,52 @@ void show_mem(void) | |||
84 | printk("%d pages swap cached\n",cached); | 87 | printk("%d pages swap cached\n",cached); |
85 | } | 88 | } |
86 | 89 | ||
90 | static void __init setup_ro_region(void) | ||
91 | { | ||
92 | pgd_t *pgd; | ||
93 | pmd_t *pmd; | ||
94 | pte_t *pte; | ||
95 | pte_t new_pte; | ||
96 | unsigned long address, end; | ||
97 | |||
98 | address = ((unsigned long)&__start_rodata) & PAGE_MASK; | ||
99 | end = PFN_ALIGN((unsigned long)&__end_rodata); | ||
100 | |||
101 | for (; address < end; address += PAGE_SIZE) { | ||
102 | pgd = pgd_offset_k(address); | ||
103 | pmd = pmd_offset(pgd, address); | ||
104 | pte = pte_offset_kernel(pmd, address); | ||
105 | new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); | ||
106 | set_pte(pte, new_pte); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | extern void vmem_map_init(void); | ||
111 | |||
87 | /* | 112 | /* |
88 | * paging_init() sets up the page tables | 113 | * paging_init() sets up the page tables |
89 | */ | 114 | */ |
90 | |||
91 | #ifndef CONFIG_64BIT | ||
92 | void __init paging_init(void) | 115 | void __init paging_init(void) |
93 | { | 116 | { |
94 | pgd_t * pg_dir; | 117 | pgd_t *pg_dir; |
95 | pte_t * pg_table; | 118 | int i; |
96 | pte_t pte; | 119 | unsigned long pgdir_k; |
97 | int i; | 120 | static const int ssm_mask = 0x04000000L; |
98 | unsigned long tmp; | ||
99 | unsigned long pfn = 0; | ||
100 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | ||
101 | static const int ssm_mask = 0x04000000L; | ||
102 | unsigned long ro_start_pfn, ro_end_pfn; | ||
103 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 121 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
104 | 122 | ||
105 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); | 123 | pg_dir = swapper_pg_dir; |
106 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); | ||
107 | |||
108 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
109 | max_zone_pfns[ZONE_DMA] = max_low_pfn; | ||
110 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
111 | free_area_init_nodes(max_zone_pfns); | ||
112 | |||
113 | /* unmap whole virtual address space */ | ||
114 | 124 | ||
115 | pg_dir = swapper_pg_dir; | 125 | #ifdef CONFIG_64BIT |
116 | 126 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; | |
117 | for (i = 0; i < PTRS_PER_PGD; i++) | 127 | for (i = 0; i < PTRS_PER_PGD; i++) |
118 | pmd_clear((pmd_t *) pg_dir++); | 128 | pgd_clear(pg_dir + i); |
119 | 129 | #else | |
120 | /* | 130 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; |
121 | * map whole physical memory to virtual memory (identity mapping) | 131 | for (i = 0; i < PTRS_PER_PGD; i++) |
122 | */ | 132 | pmd_clear((pmd_t *)(pg_dir + i)); |
123 | 133 | #endif | |
124 | pg_dir = swapper_pg_dir; | 134 | vmem_map_init(); |
125 | 135 | setup_ro_region(); | |
126 | while (pfn < max_low_pfn) { | ||
127 | /* | ||
128 | * pg_table is physical at this point | ||
129 | */ | ||
130 | pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | ||
131 | |||
132 | pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table); | ||
133 | pg_dir++; | ||
134 | |||
135 | for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { | ||
136 | if (pfn >= ro_start_pfn && pfn < ro_end_pfn) | ||
137 | pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); | ||
138 | else | ||
139 | pte = pfn_pte(pfn, PAGE_KERNEL); | ||
140 | if (pfn >= max_low_pfn) | ||
141 | pte_val(pte) = _PAGE_TYPE_EMPTY; | ||
142 | set_pte(pg_table, pte); | ||
143 | pfn++; | ||
144 | } | ||
145 | } | ||
146 | 136 | ||
147 | S390_lowcore.kernel_asce = pgdir_k; | 137 | S390_lowcore.kernel_asce = pgdir_k; |
148 | 138 | ||
@@ -152,82 +142,11 @@ void __init paging_init(void) | |||
152 | __ctl_load(pgdir_k, 13, 13); | 142 | __ctl_load(pgdir_k, 13, 13); |
153 | __raw_local_irq_ssm(ssm_mask); | 143 | __raw_local_irq_ssm(ssm_mask); |
154 | 144 | ||
155 | local_flush_tlb(); | ||
156 | } | ||
157 | |||
158 | #else /* CONFIG_64BIT */ | ||
159 | |||
160 | void __init paging_init(void) | ||
161 | { | ||
162 | pgd_t * pg_dir; | ||
163 | pmd_t * pm_dir; | ||
164 | pte_t * pt_dir; | ||
165 | pte_t pte; | ||
166 | int i,j,k; | ||
167 | unsigned long pfn = 0; | ||
168 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | | ||
169 | _KERN_REGION_TABLE; | ||
170 | static const int ssm_mask = 0x04000000L; | ||
171 | unsigned long ro_start_pfn, ro_end_pfn; | ||
172 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
173 | |||
174 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); | ||
175 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); | ||
176 | |||
177 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 145 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
178 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); | 146 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
179 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 147 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
180 | free_area_init_nodes(max_zone_pfns); | 148 | free_area_init_nodes(max_zone_pfns); |
181 | |||
182 | /* | ||
183 | * map whole physical memory to virtual memory (identity mapping) | ||
184 | */ | ||
185 | |||
186 | pg_dir = swapper_pg_dir; | ||
187 | |||
188 | for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { | ||
189 | |||
190 | if (pfn >= max_low_pfn) { | ||
191 | pgd_clear(pg_dir); | ||
192 | continue; | ||
193 | } | ||
194 | |||
195 | pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); | ||
196 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
197 | |||
198 | for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { | ||
199 | if (pfn >= max_low_pfn) { | ||
200 | pmd_clear(pm_dir); | ||
201 | continue; | ||
202 | } | ||
203 | |||
204 | pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | ||
205 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
206 | |||
207 | for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { | ||
208 | if (pfn >= ro_start_pfn && pfn < ro_end_pfn) | ||
209 | pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); | ||
210 | else | ||
211 | pte = pfn_pte(pfn, PAGE_KERNEL); | ||
212 | if (pfn >= max_low_pfn) | ||
213 | pte_val(pte) = _PAGE_TYPE_EMPTY; | ||
214 | set_pte(pt_dir, pte); | ||
215 | pfn++; | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | |||
220 | S390_lowcore.kernel_asce = pgdir_k; | ||
221 | |||
222 | /* enable virtual mapping in kernel mode */ | ||
223 | __ctl_load(pgdir_k, 1, 1); | ||
224 | __ctl_load(pgdir_k, 7, 7); | ||
225 | __ctl_load(pgdir_k, 13, 13); | ||
226 | __raw_local_irq_ssm(ssm_mask); | ||
227 | |||
228 | local_flush_tlb(); | ||
229 | } | 149 | } |
230 | #endif /* CONFIG_64BIT */ | ||
231 | 150 | ||
232 | void __init mem_init(void) | 151 | void __init mem_init(void) |
233 | { | 152 | { |
@@ -257,6 +176,8 @@ void __init mem_init(void) | |||
257 | printk("Write protected kernel read-only data: %#lx - %#lx\n", | 176 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
258 | (unsigned long)&__start_rodata, | 177 | (unsigned long)&__start_rodata, |
259 | PFN_ALIGN((unsigned long)&__end_rodata) - 1); | 178 | PFN_ALIGN((unsigned long)&__end_rodata) - 1); |
179 | printk("Virtual memmap size: %ldk\n", | ||
180 | (max_pfn * sizeof(struct page)) >> 10); | ||
260 | } | 181 | } |
261 | 182 | ||
262 | void free_initmem(void) | 183 | void free_initmem(void) |
@@ -267,6 +188,7 @@ void free_initmem(void) | |||
267 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 188 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { |
268 | ClearPageReserved(virt_to_page(addr)); | 189 | ClearPageReserved(virt_to_page(addr)); |
269 | init_page_count(virt_to_page(addr)); | 190 | init_page_count(virt_to_page(addr)); |
191 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
270 | free_page(addr); | 192 | free_page(addr); |
271 | totalram_pages++; | 193 | totalram_pages++; |
272 | } | 194 | } |
diff --git a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c index 0f6e9ecbefe2..3d2100a4e209 100644 --- a/arch/s390/mm/ioremap.c +++ b/arch/s390/mm/ioremap.c | |||
@@ -15,87 +15,8 @@ | |||
15 | 15 | ||
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <asm/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/pgalloc.h> | 19 | #include <asm/pgalloc.h> |
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/tlbflush.h> | ||
22 | |||
23 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
24 | unsigned long phys_addr, unsigned long flags) | ||
25 | { | ||
26 | unsigned long end; | ||
27 | unsigned long pfn; | ||
28 | |||
29 | address &= ~PMD_MASK; | ||
30 | end = address + size; | ||
31 | if (end > PMD_SIZE) | ||
32 | end = PMD_SIZE; | ||
33 | if (address >= end) | ||
34 | BUG(); | ||
35 | pfn = phys_addr >> PAGE_SHIFT; | ||
36 | do { | ||
37 | if (!pte_none(*pte)) { | ||
38 | printk("remap_area_pte: page already exists\n"); | ||
39 | BUG(); | ||
40 | } | ||
41 | set_pte(pte, pfn_pte(pfn, __pgprot(flags))); | ||
42 | address += PAGE_SIZE; | ||
43 | pfn++; | ||
44 | pte++; | ||
45 | } while (address && (address < end)); | ||
46 | } | ||
47 | |||
48 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
49 | unsigned long phys_addr, unsigned long flags) | ||
50 | { | ||
51 | unsigned long end; | ||
52 | |||
53 | address &= ~PGDIR_MASK; | ||
54 | end = address + size; | ||
55 | if (end > PGDIR_SIZE) | ||
56 | end = PGDIR_SIZE; | ||
57 | phys_addr -= address; | ||
58 | if (address >= end) | ||
59 | BUG(); | ||
60 | do { | ||
61 | pte_t * pte = pte_alloc_kernel(pmd, address); | ||
62 | if (!pte) | ||
63 | return -ENOMEM; | ||
64 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
65 | address = (address + PMD_SIZE) & PMD_MASK; | ||
66 | pmd++; | ||
67 | } while (address && (address < end)); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
72 | unsigned long size, unsigned long flags) | ||
73 | { | ||
74 | int error; | ||
75 | pgd_t * dir; | ||
76 | unsigned long end = address + size; | ||
77 | |||
78 | phys_addr -= address; | ||
79 | dir = pgd_offset(&init_mm, address); | ||
80 | flush_cache_all(); | ||
81 | if (address >= end) | ||
82 | BUG(); | ||
83 | do { | ||
84 | pmd_t *pmd; | ||
85 | pmd = pmd_alloc(&init_mm, dir, address); | ||
86 | error = -ENOMEM; | ||
87 | if (!pmd) | ||
88 | break; | ||
89 | if (remap_area_pmd(pmd, address, end - address, | ||
90 | phys_addr + address, flags)) | ||
91 | break; | ||
92 | error = 0; | ||
93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
94 | dir++; | ||
95 | } while (address && (address < end)); | ||
96 | flush_tlb_all(); | ||
97 | return 0; | ||
98 | } | ||
99 | 20 | ||
100 | /* | 21 | /* |
101 | * Generic mapping function (not visible outside): | 22 | * Generic mapping function (not visible outside): |
@@ -122,7 +43,8 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag | |||
122 | if (!area) | 43 | if (!area) |
123 | return NULL; | 44 | return NULL; |
124 | addr = area->addr; | 45 | addr = area->addr; |
125 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | 46 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
47 | phys_addr, __pgprot(flags))) { | ||
126 | vfree(addr); | 48 | vfree(addr); |
127 | return NULL; | 49 | return NULL; |
128 | } | 50 | } |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c new file mode 100644 index 000000000000..7f2944d3ec2a --- /dev/null +++ b/arch/s390/mm/vmem.c | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/vmem.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/bootmem.h> | ||
9 | #include <linux/pfn.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/list.h> | ||
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | #include <asm/setup.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | |||
18 | unsigned long vmalloc_end; | ||
19 | EXPORT_SYMBOL(vmalloc_end); | ||
20 | |||
21 | static struct page *vmem_map; | ||
22 | static DEFINE_MUTEX(vmem_mutex); | ||
23 | |||
24 | struct memory_segment { | ||
25 | struct list_head list; | ||
26 | unsigned long start; | ||
27 | unsigned long size; | ||
28 | }; | ||
29 | |||
30 | static LIST_HEAD(mem_segs); | ||
31 | |||
32 | void memmap_init(unsigned long size, int nid, unsigned long zone, | ||
33 | unsigned long start_pfn) | ||
34 | { | ||
35 | struct page *start, *end; | ||
36 | struct page *map_start, *map_end; | ||
37 | int i; | ||
38 | |||
39 | start = pfn_to_page(start_pfn); | ||
40 | end = start + size; | ||
41 | |||
42 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
43 | unsigned long cstart, cend; | ||
44 | |||
45 | cstart = PFN_DOWN(memory_chunk[i].addr); | ||
46 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | ||
47 | |||
48 | map_start = mem_map + cstart; | ||
49 | map_end = mem_map + cend; | ||
50 | |||
51 | if (map_start < start) | ||
52 | map_start = start; | ||
53 | if (map_end > end) | ||
54 | map_end = end; | ||
55 | |||
56 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | ||
57 | / sizeof(struct page); | ||
58 | map_end += ((PFN_ALIGN((unsigned long) map_end) | ||
59 | - (unsigned long) map_end) | ||
60 | / sizeof(struct page)); | ||
61 | |||
62 | if (map_start < map_end) | ||
63 | memmap_init_zone((unsigned long)(map_end - map_start), | ||
64 | nid, zone, page_to_pfn(map_start)); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static inline void *vmem_alloc_pages(unsigned int order) | ||
69 | { | ||
70 | if (slab_is_available()) | ||
71 | return (void *)__get_free_pages(GFP_KERNEL, order); | ||
72 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | ||
73 | } | ||
74 | |||
75 | static inline pmd_t *vmem_pmd_alloc(void) | ||
76 | { | ||
77 | pmd_t *pmd; | ||
78 | int i; | ||
79 | |||
80 | pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); | ||
81 | if (!pmd) | ||
82 | return NULL; | ||
83 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
84 | pmd_clear(pmd + i); | ||
85 | return pmd; | ||
86 | } | ||
87 | |||
88 | static inline pte_t *vmem_pte_alloc(void) | ||
89 | { | ||
90 | pte_t *pte; | ||
91 | pte_t empty_pte; | ||
92 | int i; | ||
93 | |||
94 | pte = vmem_alloc_pages(PTE_ALLOC_ORDER); | ||
95 | if (!pte) | ||
96 | return NULL; | ||
97 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; | ||
98 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
99 | set_pte(pte + i, empty_pte); | ||
100 | return pte; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Add a physical memory range to the 1:1 mapping. | ||
105 | */ | ||
106 | static int vmem_add_range(unsigned long start, unsigned long size) | ||
107 | { | ||
108 | unsigned long address; | ||
109 | pgd_t *pg_dir; | ||
110 | pmd_t *pm_dir; | ||
111 | pte_t *pt_dir; | ||
112 | pte_t pte; | ||
113 | int ret = -ENOMEM; | ||
114 | |||
115 | for (address = start; address < start + size; address += PAGE_SIZE) { | ||
116 | pg_dir = pgd_offset_k(address); | ||
117 | if (pgd_none(*pg_dir)) { | ||
118 | pm_dir = vmem_pmd_alloc(); | ||
119 | if (!pm_dir) | ||
120 | goto out; | ||
121 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
122 | } | ||
123 | |||
124 | pm_dir = pmd_offset(pg_dir, address); | ||
125 | if (pmd_none(*pm_dir)) { | ||
126 | pt_dir = vmem_pte_alloc(); | ||
127 | if (!pt_dir) | ||
128 | goto out; | ||
129 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
130 | } | ||
131 | |||
132 | pt_dir = pte_offset_kernel(pm_dir, address); | ||
133 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); | ||
134 | set_pte(pt_dir, pte); | ||
135 | } | ||
136 | ret = 0; | ||
137 | out: | ||
138 | flush_tlb_kernel_range(start, start + size); | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Remove a physical memory range from the 1:1 mapping. | ||
144 | * Currently only invalidates page table entries. | ||
145 | */ | ||
146 | static void vmem_remove_range(unsigned long start, unsigned long size) | ||
147 | { | ||
148 | unsigned long address; | ||
149 | pgd_t *pg_dir; | ||
150 | pmd_t *pm_dir; | ||
151 | pte_t *pt_dir; | ||
152 | pte_t pte; | ||
153 | |||
154 | pte_val(pte) = _PAGE_TYPE_EMPTY; | ||
155 | for (address = start; address < start + size; address += PAGE_SIZE) { | ||
156 | pg_dir = pgd_offset_k(address); | ||
157 | if (pgd_none(*pg_dir)) | ||
158 | continue; | ||
159 | pm_dir = pmd_offset(pg_dir, address); | ||
160 | if (pmd_none(*pm_dir)) | ||
161 | continue; | ||
162 | pt_dir = pte_offset_kernel(pm_dir, address); | ||
163 | set_pte(pt_dir, pte); | ||
164 | } | ||
165 | flush_tlb_kernel_range(start, start + size); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Add a backed mem_map array to the virtual mem_map array. | ||
170 | */ | ||
171 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | ||
172 | { | ||
173 | unsigned long address, start_addr, end_addr; | ||
174 | struct page *map_start, *map_end; | ||
175 | pgd_t *pg_dir; | ||
176 | pmd_t *pm_dir; | ||
177 | pte_t *pt_dir; | ||
178 | pte_t pte; | ||
179 | int ret = -ENOMEM; | ||
180 | |||
181 | map_start = vmem_map + PFN_DOWN(start); | ||
182 | map_end = vmem_map + PFN_DOWN(start + size); | ||
183 | |||
184 | start_addr = (unsigned long) map_start & PAGE_MASK; | ||
185 | end_addr = PFN_ALIGN((unsigned long) map_end); | ||
186 | |||
187 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | ||
188 | pg_dir = pgd_offset_k(address); | ||
189 | if (pgd_none(*pg_dir)) { | ||
190 | pm_dir = vmem_pmd_alloc(); | ||
191 | if (!pm_dir) | ||
192 | goto out; | ||
193 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
194 | } | ||
195 | |||
196 | pm_dir = pmd_offset(pg_dir, address); | ||
197 | if (pmd_none(*pm_dir)) { | ||
198 | pt_dir = vmem_pte_alloc(); | ||
199 | if (!pt_dir) | ||
200 | goto out; | ||
201 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
202 | } | ||
203 | |||
204 | pt_dir = pte_offset_kernel(pm_dir, address); | ||
205 | if (pte_none(*pt_dir)) { | ||
206 | unsigned long new_page; | ||
207 | |||
208 | new_page =__pa(vmem_alloc_pages(0)); | ||
209 | if (!new_page) | ||
210 | goto out; | ||
211 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | ||
212 | set_pte(pt_dir, pte); | ||
213 | } | ||
214 | } | ||
215 | ret = 0; | ||
216 | out: | ||
217 | flush_tlb_kernel_range(start_addr, end_addr); | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static int vmem_add_mem(unsigned long start, unsigned long size) | ||
222 | { | ||
223 | int ret; | ||
224 | |||
225 | ret = vmem_add_range(start, size); | ||
226 | if (ret) | ||
227 | return ret; | ||
228 | return vmem_add_mem_map(start, size); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * Add memory segment to the segment list if it doesn't overlap with | ||
233 | * an already present segment. | ||
234 | */ | ||
235 | static int insert_memory_segment(struct memory_segment *seg) | ||
236 | { | ||
237 | struct memory_segment *tmp; | ||
238 | |||
239 | if (PFN_DOWN(seg->start + seg->size) > max_pfn || | ||
240 | seg->start + seg->size < seg->start) | ||
241 | return -ERANGE; | ||
242 | |||
243 | list_for_each_entry(tmp, &mem_segs, list) { | ||
244 | if (seg->start >= tmp->start + tmp->size) | ||
245 | continue; | ||
246 | if (seg->start + seg->size <= tmp->start) | ||
247 | continue; | ||
248 | return -ENOSPC; | ||
249 | } | ||
250 | list_add(&seg->list, &mem_segs); | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Remove memory segment from the segment list. | ||
256 | */ | ||
257 | static void remove_memory_segment(struct memory_segment *seg) | ||
258 | { | ||
259 | list_del(&seg->list); | ||
260 | } | ||
261 | |||
262 | static void __remove_shared_memory(struct memory_segment *seg) | ||
263 | { | ||
264 | remove_memory_segment(seg); | ||
265 | vmem_remove_range(seg->start, seg->size); | ||
266 | } | ||
267 | |||
268 | int remove_shared_memory(unsigned long start, unsigned long size) | ||
269 | { | ||
270 | struct memory_segment *seg; | ||
271 | int ret; | ||
272 | |||
273 | mutex_lock(&vmem_mutex); | ||
274 | |||
275 | ret = -ENOENT; | ||
276 | list_for_each_entry(seg, &mem_segs, list) { | ||
277 | if (seg->start == start && seg->size == size) | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | if (seg->start != start || seg->size != size) | ||
282 | goto out; | ||
283 | |||
284 | ret = 0; | ||
285 | __remove_shared_memory(seg); | ||
286 | kfree(seg); | ||
287 | out: | ||
288 | mutex_unlock(&vmem_mutex); | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | int add_shared_memory(unsigned long start, unsigned long size) | ||
293 | { | ||
294 | struct memory_segment *seg; | ||
295 | struct page *page; | ||
296 | unsigned long pfn, num_pfn, end_pfn; | ||
297 | int ret; | ||
298 | |||
299 | mutex_lock(&vmem_mutex); | ||
300 | ret = -ENOMEM; | ||
301 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | ||
302 | if (!seg) | ||
303 | goto out; | ||
304 | seg->start = start; | ||
305 | seg->size = size; | ||
306 | |||
307 | ret = insert_memory_segment(seg); | ||
308 | if (ret) | ||
309 | goto out_free; | ||
310 | |||
311 | ret = vmem_add_mem(start, size); | ||
312 | if (ret) | ||
313 | goto out_remove; | ||
314 | |||
315 | pfn = PFN_DOWN(start); | ||
316 | num_pfn = PFN_DOWN(size); | ||
317 | end_pfn = pfn + num_pfn; | ||
318 | |||
319 | page = pfn_to_page(pfn); | ||
320 | memset(page, 0, num_pfn * sizeof(struct page)); | ||
321 | |||
322 | for (; pfn < end_pfn; pfn++) { | ||
323 | page = pfn_to_page(pfn); | ||
324 | init_page_count(page); | ||
325 | reset_page_mapcount(page); | ||
326 | SetPageReserved(page); | ||
327 | INIT_LIST_HEAD(&page->lru); | ||
328 | } | ||
329 | goto out; | ||
330 | |||
331 | out_remove: | ||
332 | __remove_shared_memory(seg); | ||
333 | out_free: | ||
334 | kfree(seg); | ||
335 | out: | ||
336 | mutex_unlock(&vmem_mutex); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * map whole physical memory to virtual memory (identity mapping) | ||
342 | */ | ||
343 | void __init vmem_map_init(void) | ||
344 | { | ||
345 | unsigned long map_size; | ||
346 | int i; | ||
347 | |||
348 | map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); | ||
349 | vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); | ||
350 | vmem_map = (struct page *) vmalloc_end; | ||
351 | NODE_DATA(0)->node_mem_map = vmem_map; | ||
352 | |||
353 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) | ||
354 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Convert memory chunk array to a memory segment list so there is a single | ||
359 | * list that contains both r/w memory and shared memory segments. | ||
360 | */ | ||
361 | static int __init vmem_convert_memory_chunk(void) | ||
362 | { | ||
363 | struct memory_segment *seg; | ||
364 | int i; | ||
365 | |||
366 | mutex_lock(&vmem_mutex); | ||
367 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
368 | if (!memory_chunk[i].size) | ||
369 | continue; | ||
370 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | ||
371 | if (!seg) | ||
372 | panic("Out of memory...\n"); | ||
373 | seg->start = memory_chunk[i].addr; | ||
374 | seg->size = memory_chunk[i].size; | ||
375 | insert_memory_segment(seg); | ||
376 | } | ||
377 | mutex_unlock(&vmem_mutex); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | core_initcall(vmem_convert_memory_chunk); | ||