aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/bitmap.S56
-rw-r--r--arch/s390/kernel/bitmap.c54
-rw-r--r--arch/s390/kernel/compat_ptrace.h3
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/debug.c9
-rw-r--r--arch/s390/kernel/early.c23
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/head31.S1
-rw-r--r--arch/s390/kernel/head64.S1
-rw-r--r--arch/s390/kernel/ipl.c74
-rw-r--r--arch/s390/kernel/module.c19
-rw-r--r--arch/s390/kernel/nmi.c376
-rw-r--r--arch/s390/kernel/process.c73
-rw-r--r--arch/s390/kernel/processor.c73
-rw-r--r--arch/s390/kernel/reipl64.S11
-rw-r--r--arch/s390/kernel/s390_ksyms.c44
-rw-r--r--arch/s390/kernel/setup.c52
-rw-r--r--arch/s390/kernel/smp.c68
-rw-r--r--arch/s390/kernel/sysinfo.c428
-rw-r--r--arch/s390/kernel/time.c71
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
25 files changed, 1146 insertions, 308 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 3edc6c6f258b..228e3105ded7 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -17,10 +17,12 @@ CFLAGS_smp.o := -Wno-nonnull
17# 17#
18CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' 18CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
19 19
20CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
21
20obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ 22obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \
21 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 23 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
22 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ 24 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
23 vdso.o vtime.o 25 vdso.o vtime.o sysinfo.o nmi.o
24 26
25obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 27obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
26obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 28obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/bitmap.S b/arch/s390/kernel/bitmap.S
deleted file mode 100644
index dfb41f946e23..000000000000
--- a/arch/s390/kernel/bitmap.S
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * arch/s390/kernel/bitmap.S
3 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
4 * See include/asm-s390/{bitops.h|posix_types.h} for details
5 *
6 * S390 version
7 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
9 */
10
11 .globl _oi_bitmap
12_oi_bitmap:
13 .byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
14
15 .globl _ni_bitmap
16_ni_bitmap:
17 .byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F
18
19 .globl _zb_findmap
20_zb_findmap:
21 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
22 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
23 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
24 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
25 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
26 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
27 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
28 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7
29 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
30 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
31 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
32 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
33 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
34 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
35 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
36 .byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
37
38 .globl _sb_findmap
39_sb_findmap:
40 .byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
41 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
42 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
43 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
44 .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
45 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
46 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
47 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
48 .byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
49 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
50 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
51 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
52 .byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
53 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
54 .byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
55 .byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
56
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
new file mode 100644
index 000000000000..3ae4757b006a
--- /dev/null
+++ b/arch/s390/kernel/bitmap.c
@@ -0,0 +1,54 @@
1/*
2 * Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
3 * See include/asm/{bitops.h|posix_types.h} for details
4 *
5 * Copyright IBM Corp. 1999,2009
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 */
8
9#include <linux/bitops.h>
10#include <linux/module.h>
11
12const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
13EXPORT_SYMBOL(_oi_bitmap);
14
15const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
16EXPORT_SYMBOL(_ni_bitmap);
17
18const char _zb_findmap[] = {
19 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
20 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
21 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
22 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
23 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
24 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
25 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
26 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
27 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
28 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
29 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
30 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
31 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
32 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
33 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
34 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
35EXPORT_SYMBOL(_zb_findmap);
36
37const char _sb_findmap[] = {
38 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
39 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
40 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
41 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
42 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
43 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
44 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
45 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
46 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
47 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
48 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
49 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
50 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
51 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
52 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
53 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
54EXPORT_SYMBOL(_sb_findmap);
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index a2be3a978d5c..123dd660d7fb 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -1,10 +1,11 @@
1#ifndef _PTRACE32_H 1#ifndef _PTRACE32_H
2#define _PTRACE32_H 2#define _PTRACE32_H
3 3
4#include <asm/ptrace.h> /* needed for NUM_CR_WORDS */
4#include "compat_linux.h" /* needed for psw_compat_t */ 5#include "compat_linux.h" /* needed for psw_compat_t */
5 6
6typedef struct { 7typedef struct {
7 __u32 cr[3]; 8 __u32 cr[NUM_CR_WORDS];
8} per_cr_words32; 9} per_cr_words32;
9 10
10typedef struct { 11typedef struct {
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 62c706eb0de6..87cf5a79a351 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -252,7 +252,7 @@ sys32_chroot_wrapper:
252sys32_ustat_wrapper: 252sys32_ustat_wrapper:
253 llgfr %r2,%r2 # dev_t 253 llgfr %r2,%r2 # dev_t
254 llgtr %r3,%r3 # struct ustat * 254 llgtr %r3,%r3 # struct ustat *
255 jg sys_ustat 255 jg compat_sys_ustat
256 256
257 .globl sys32_dup2_wrapper 257 .globl sys32_dup2_wrapper
258sys32_dup2_wrapper: 258sys32_dup2_wrapper:
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ba03fc0a3a56..be8bceaf37d9 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -603,7 +603,7 @@ debug_input(struct file *file, const char __user *user_buf, size_t length,
603static int 603static int
604debug_open(struct inode *inode, struct file *file) 604debug_open(struct inode *inode, struct file *file)
605{ 605{
606 int i = 0, rc = 0; 606 int i, rc = 0;
607 file_private_info_t *p_info; 607 file_private_info_t *p_info;
608 debug_info_t *debug_info, *debug_info_snapshot; 608 debug_info_t *debug_info, *debug_info_snapshot;
609 609
@@ -642,8 +642,7 @@ found:
642 p_info = kmalloc(sizeof(file_private_info_t), 642 p_info = kmalloc(sizeof(file_private_info_t),
643 GFP_KERNEL); 643 GFP_KERNEL);
644 if(!p_info){ 644 if(!p_info){
645 if(debug_info_snapshot) 645 debug_info_free(debug_info_snapshot);
646 debug_info_free(debug_info_snapshot);
647 rc = -ENOMEM; 646 rc = -ENOMEM;
648 goto out; 647 goto out;
649 } 648 }
@@ -698,8 +697,7 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
698 if ((uid != 0) || (gid != 0)) 697 if ((uid != 0) || (gid != 0))
699 pr_warning("Root becomes the owner of all s390dbf files " 698 pr_warning("Root becomes the owner of all s390dbf files "
700 "in sysfs\n"); 699 "in sysfs\n");
701 if (!initialized) 700 BUG_ON(!initialized);
702 BUG();
703 mutex_lock(&debug_mutex); 701 mutex_lock(&debug_mutex);
704 702
705 /* create new debug_info */ 703 /* create new debug_info */
@@ -1156,7 +1154,6 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view)
1156 else { 1154 else {
1157 debugfs_remove(id->debugfs_entries[i]); 1155 debugfs_remove(id->debugfs_entries[i]);
1158 id->views[i] = NULL; 1156 id->views[i] = NULL;
1159 rc = 0;
1160 } 1157 }
1161 spin_unlock_irqrestore(&id->lock, flags); 1158 spin_unlock_irqrestore(&id->lock, flags);
1162out: 1159out:
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2a2ca268b1dd..4d221c81c849 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -6,6 +6,7 @@
6 * Heiko Carstens <heiko.carstens@de.ibm.com> 6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */ 7 */
8 8
9#include <linux/compiler.h>
9#include <linux/init.h> 10#include <linux/init.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -20,6 +21,7 @@
20#include <asm/processor.h> 21#include <asm/processor.h>
21#include <asm/sections.h> 22#include <asm/sections.h>
22#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/sysinfo.h>
23#include <asm/cpcmd.h> 25#include <asm/cpcmd.h>
24#include <asm/sclp.h> 26#include <asm/sclp.h>
25#include "entry.h" 27#include "entry.h"
@@ -173,19 +175,21 @@ static noinline __init void init_kernel_storage_key(void)
173 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 175 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
174} 176}
175 177
178static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
179
176static noinline __init void detect_machine_type(void) 180static noinline __init void detect_machine_type(void)
177{ 181{
178 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data; 182 /* No VM information? Looks like LPAR */
179 183 if (stsi(&vmms, 3, 2, 2) == -ENOSYS)
180 get_cpu_id(&S390_lowcore.cpu_data.cpu_id); 184 return;
181 185 if (!vmms.count)
182 /* Running under z/VM ? */ 186 return;
183 if (cpuinfo->cpu_id.version == 0xff)
184 machine_flags |= MACHINE_FLAG_VM;
185 187
186 /* Running under KVM ? */ 188 /* Running under KVM? If not we assume z/VM */
187 if (cpuinfo->cpu_id.version == 0xfe) 189 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
188 machine_flags |= MACHINE_FLAG_KVM; 190 machine_flags |= MACHINE_FLAG_KVM;
191 else
192 machine_flags |= MACHINE_FLAG_VM;
189} 193}
190 194
191static __init void early_pgm_check_handler(void) 195static __init void early_pgm_check_handler(void)
@@ -348,7 +352,6 @@ static void __init setup_boot_command_line(void)
348 352
349 /* copy arch command line */ 353 /* copy arch command line */
350 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); 354 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
351 boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
352 355
353 /* append IPL PARM data to the boot command line */ 356 /* append IPL PARM data to the boot command line */
354 if (MACHINE_IS_VM) { 357 if (MACHINE_IS_VM) {
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index ec7e35f6055b..1046c2c9f8d1 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -469,6 +469,8 @@ start:
469 .org 0x10000 469 .org 0x10000
470startup:basr %r13,0 # get base 470startup:basr %r13,0 # get base
471.LPG0: 471.LPG0:
472 xc 0x200(256),0x200 # partially clear lowcore
473 xc 0x300(256),0x300
472 474
473#ifndef CONFIG_MARCH_G5 475#ifndef CONFIG_MARCH_G5
474 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10} 476 # check processor version against MARCH_{G5,Z900,Z990,Z9_109,Z10}
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index db476d114caa..2ced846065b7 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -20,7 +20,6 @@ startup_continue:
20 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 20 lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
21 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 21 l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
22 # move IPL device to lowcore 22 # move IPL device to lowcore
23 mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
24# 23#
25# Setup stack 24# Setup stack
26# 25#
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index f9f70aa15244..65667b2e65ce 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -86,7 +86,6 @@ startup_continue:
86 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers 86 lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
87 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area 87 lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
88 # move IPL device to lowcore 88 # move IPL device to lowcore
89 mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
90 lghi %r0,__LC_PASTE 89 lghi %r0,__LC_PASTE
91 stg %r0,__LC_VDSO_PER_CPU 90 stg %r0,__LC_VDSO_PER_CPU
92# 91#
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 2dcf590faba6..6f3711a0eaaa 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -23,7 +23,7 @@
23#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
24#include <asm/reset.h> 24#include <asm/reset.h>
25#include <asm/sclp.h> 25#include <asm/sclp.h>
26#include <asm/setup.h> 26#include <asm/checksum.h>
27 27
28#define IPL_PARM_BLOCK_VERSION 0 28#define IPL_PARM_BLOCK_VERSION 0
29 29
@@ -56,13 +56,14 @@ struct shutdown_trigger {
56}; 56};
57 57
58/* 58/*
59 * Five shutdown action types are supported: 59 * The following shutdown action types are supported:
60 */ 60 */
61#define SHUTDOWN_ACTION_IPL_STR "ipl" 61#define SHUTDOWN_ACTION_IPL_STR "ipl"
62#define SHUTDOWN_ACTION_REIPL_STR "reipl" 62#define SHUTDOWN_ACTION_REIPL_STR "reipl"
63#define SHUTDOWN_ACTION_DUMP_STR "dump" 63#define SHUTDOWN_ACTION_DUMP_STR "dump"
64#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd" 64#define SHUTDOWN_ACTION_VMCMD_STR "vmcmd"
65#define SHUTDOWN_ACTION_STOP_STR "stop" 65#define SHUTDOWN_ACTION_STOP_STR "stop"
66#define SHUTDOWN_ACTION_DUMP_REIPL_STR "dump_reipl"
66 67
67struct shutdown_action { 68struct shutdown_action {
68 char *name; 69 char *name;
@@ -146,6 +147,7 @@ static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
146static struct ipl_parameter_block *reipl_block_fcp; 147static struct ipl_parameter_block *reipl_block_fcp;
147static struct ipl_parameter_block *reipl_block_ccw; 148static struct ipl_parameter_block *reipl_block_ccw;
148static struct ipl_parameter_block *reipl_block_nss; 149static struct ipl_parameter_block *reipl_block_nss;
150static struct ipl_parameter_block *reipl_block_actual;
149 151
150static int dump_capabilities = DUMP_TYPE_NONE; 152static int dump_capabilities = DUMP_TYPE_NONE;
151static enum dump_type dump_type = DUMP_TYPE_NONE; 153static enum dump_type dump_type = DUMP_TYPE_NONE;
@@ -835,6 +837,7 @@ static int reipl_set_type(enum ipl_type type)
835 reipl_method = REIPL_METHOD_CCW_VM; 837 reipl_method = REIPL_METHOD_CCW_VM;
836 else 838 else
837 reipl_method = REIPL_METHOD_CCW_CIO; 839 reipl_method = REIPL_METHOD_CCW_CIO;
840 reipl_block_actual = reipl_block_ccw;
838 break; 841 break;
839 case IPL_TYPE_FCP: 842 case IPL_TYPE_FCP:
840 if (diag308_set_works) 843 if (diag308_set_works)
@@ -843,6 +846,7 @@ static int reipl_set_type(enum ipl_type type)
843 reipl_method = REIPL_METHOD_FCP_RO_VM; 846 reipl_method = REIPL_METHOD_FCP_RO_VM;
844 else 847 else
845 reipl_method = REIPL_METHOD_FCP_RO_DIAG; 848 reipl_method = REIPL_METHOD_FCP_RO_DIAG;
849 reipl_block_actual = reipl_block_fcp;
846 break; 850 break;
847 case IPL_TYPE_FCP_DUMP: 851 case IPL_TYPE_FCP_DUMP:
848 reipl_method = REIPL_METHOD_FCP_DUMP; 852 reipl_method = REIPL_METHOD_FCP_DUMP;
@@ -852,6 +856,7 @@ static int reipl_set_type(enum ipl_type type)
852 reipl_method = REIPL_METHOD_NSS_DIAG; 856 reipl_method = REIPL_METHOD_NSS_DIAG;
853 else 857 else
854 reipl_method = REIPL_METHOD_NSS; 858 reipl_method = REIPL_METHOD_NSS;
859 reipl_block_actual = reipl_block_nss;
855 break; 860 break;
856 case IPL_TYPE_UNKNOWN: 861 case IPL_TYPE_UNKNOWN:
857 reipl_method = REIPL_METHOD_DEFAULT; 862 reipl_method = REIPL_METHOD_DEFAULT;
@@ -960,7 +965,6 @@ static void reipl_run(struct shutdown_trigger *trigger)
960 diag308(DIAG308_IPL, NULL); 965 diag308(DIAG308_IPL, NULL);
961 break; 966 break;
962 case REIPL_METHOD_FCP_DUMP: 967 case REIPL_METHOD_FCP_DUMP:
963 default:
964 break; 968 break;
965 } 969 }
966 disabled_wait((unsigned long) __builtin_return_address(0)); 970 disabled_wait((unsigned long) __builtin_return_address(0));
@@ -1069,10 +1073,12 @@ static int __init reipl_fcp_init(void)
1069{ 1073{
1070 int rc; 1074 int rc;
1071 1075
1072 if ((!diag308_set_works) && (ipl_info.type != IPL_TYPE_FCP)) 1076 if (!diag308_set_works) {
1073 return 0; 1077 if (ipl_info.type == IPL_TYPE_FCP)
1074 if ((!diag308_set_works) && (ipl_info.type == IPL_TYPE_FCP)) 1078 make_attrs_ro(reipl_fcp_attrs);
1075 make_attrs_ro(reipl_fcp_attrs); 1079 else
1080 return 0;
1081 }
1076 1082
1077 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); 1083 reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
1078 if (!reipl_block_fcp) 1084 if (!reipl_block_fcp)
@@ -1253,7 +1259,6 @@ static void dump_run(struct shutdown_trigger *trigger)
1253 diag308(DIAG308_DUMP, NULL); 1259 diag308(DIAG308_DUMP, NULL);
1254 break; 1260 break;
1255 case DUMP_METHOD_NONE: 1261 case DUMP_METHOD_NONE:
1256 default:
1257 return; 1262 return;
1258 } 1263 }
1259 printk(KERN_EMERG "Dump failed!\n"); 1264 printk(KERN_EMERG "Dump failed!\n");
@@ -1332,6 +1337,49 @@ static struct shutdown_action __refdata dump_action = {
1332 .init = dump_init, 1337 .init = dump_init,
1333}; 1338};
1334 1339
1340static void dump_reipl_run(struct shutdown_trigger *trigger)
1341{
1342 preempt_disable();
1343 /*
1344 * Bypass dynamic address translation (DAT) when storing IPL parameter
1345 * information block address and checksum into the prefix area
1346 * (corresponding to absolute addresses 0-8191).
1347 * When enhanced DAT applies and the STE format control in one,
1348 * the absolute address is formed without prefixing. In this case a
1349 * normal store (stg/st) into the prefix area would no more match to
1350 * absolute addresses 0-8191.
1351 */
1352#ifdef CONFIG_64BIT
1353 asm volatile("sturg %0,%1"
1354 :: "a" ((unsigned long) reipl_block_actual),
1355 "a" (&lowcore_ptr[smp_processor_id()]->ipib));
1356#else
1357 asm volatile("stura %0,%1"
1358 :: "a" ((unsigned long) reipl_block_actual),
1359 "a" (&lowcore_ptr[smp_processor_id()]->ipib));
1360#endif
1361 asm volatile("stura %0,%1"
1362 :: "a" (csum_partial(reipl_block_actual,
1363 reipl_block_actual->hdr.len, 0)),
1364 "a" (&lowcore_ptr[smp_processor_id()]->ipib_checksum));
1365 preempt_enable();
1366 dump_run(trigger);
1367}
1368
1369static int __init dump_reipl_init(void)
1370{
1371 if (!diag308_set_works)
1372 return -EOPNOTSUPP;
1373 else
1374 return 0;
1375}
1376
1377static struct shutdown_action __refdata dump_reipl_action = {
1378 .name = SHUTDOWN_ACTION_DUMP_REIPL_STR,
1379 .fn = dump_reipl_run,
1380 .init = dump_reipl_init,
1381};
1382
1335/* 1383/*
1336 * vmcmd shutdown action: Trigger vm command on shutdown. 1384 * vmcmd shutdown action: Trigger vm command on shutdown.
1337 */ 1385 */
@@ -1421,7 +1469,8 @@ static struct shutdown_action stop_action = {SHUTDOWN_ACTION_STOP_STR,
1421/* action list */ 1469/* action list */
1422 1470
1423static struct shutdown_action *shutdown_actions_list[] = { 1471static struct shutdown_action *shutdown_actions_list[] = {
1424 &ipl_action, &reipl_action, &dump_action, &vmcmd_action, &stop_action}; 1472 &ipl_action, &reipl_action, &dump_reipl_action, &dump_action,
1473 &vmcmd_action, &stop_action};
1425#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *)) 1474#define SHUTDOWN_ACTIONS_COUNT (sizeof(shutdown_actions_list) / sizeof(void *))
1426 1475
1427/* 1476/*
@@ -1434,11 +1483,11 @@ static int set_trigger(const char *buf, struct shutdown_trigger *trigger,
1434 size_t len) 1483 size_t len)
1435{ 1484{
1436 int i; 1485 int i;
1486
1437 for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) { 1487 for (i = 0; i < SHUTDOWN_ACTIONS_COUNT; i++) {
1438 if (!shutdown_actions_list[i]) 1488 if (!shutdown_actions_list[i])
1439 continue; 1489 continue;
1440 if (strncmp(buf, shutdown_actions_list[i]->name, 1490 if (sysfs_streq(buf, shutdown_actions_list[i]->name)) {
1441 strlen(shutdown_actions_list[i]->name)) == 0) {
1442 trigger->action = shutdown_actions_list[i]; 1491 trigger->action = shutdown_actions_list[i];
1443 return len; 1492 return len;
1444 } 1493 }
@@ -1672,7 +1721,7 @@ static int on_panic_notify(struct notifier_block *self,
1672 1721
1673static struct notifier_block on_panic_nb = { 1722static struct notifier_block on_panic_nb = {
1674 .notifier_call = on_panic_notify, 1723 .notifier_call = on_panic_notify,
1675 .priority = 0, 1724 .priority = INT_MIN,
1676}; 1725};
1677 1726
1678void __init setup_ipl(void) 1727void __init setup_ipl(void)
@@ -1696,7 +1745,6 @@ void __init setup_ipl(void)
1696 sizeof(ipl_info.data.nss.name)); 1745 sizeof(ipl_info.data.nss.name));
1697 break; 1746 break;
1698 case IPL_TYPE_UNKNOWN: 1747 case IPL_TYPE_UNKNOWN:
1699 default:
1700 /* We have no info to copy */ 1748 /* We have no info to copy */
1701 break; 1749 break;
1702 } 1750 }
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 59b4e796680a..eed4a00cb676 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -310,15 +310,20 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
310 info->plt_initialized = 1; 310 info->plt_initialized = 1;
311 } 311 }
312 if (r_type == R_390_PLTOFF16 || 312 if (r_type == R_390_PLTOFF16 ||
313 r_type == R_390_PLTOFF32 313 r_type == R_390_PLTOFF32 ||
314 || r_type == R_390_PLTOFF64 314 r_type == R_390_PLTOFF64)
315 )
316 val = me->arch.plt_offset - me->arch.got_offset + 315 val = me->arch.plt_offset - me->arch.got_offset +
317 info->plt_offset + rela->r_addend; 316 info->plt_offset + rela->r_addend;
318 else 317 else {
319 val = (Elf_Addr) me->module_core + 318 if (!((r_type == R_390_PLT16DBL &&
320 me->arch.plt_offset + info->plt_offset + 319 val - loc + 0xffffUL < 0x1ffffeUL) ||
321 rela->r_addend - loc; 320 (r_type == R_390_PLT32DBL &&
321 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
322 val = (Elf_Addr) me->module_core +
323 me->arch.plt_offset +
324 info->plt_offset;
325 val += rela->r_addend - loc;
326 }
322 if (r_type == R_390_PLT16DBL) 327 if (r_type == R_390_PLT16DBL)
323 *(unsigned short *) loc = val >> 1; 328 *(unsigned short *) loc = val >> 1;
324 else if (r_type == R_390_PLTOFF16) 329 else if (r_type == R_390_PLTOFF16)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
new file mode 100644
index 000000000000..4bfdc421d7e9
--- /dev/null
+++ b/arch/s390/kernel/nmi.c
@@ -0,0 +1,376 @@
1/*
2 * Machine check handler
3 *
4 * Copyright IBM Corp. 2000,2009
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Cornelia Huck <cornelia.huck@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */
10
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/time.h>
14#include <linux/module.h>
15#include <asm/lowcore.h>
16#include <asm/smp.h>
17#include <asm/etr.h>
18#include <asm/cpu.h>
19#include <asm/nmi.h>
20#include <asm/crw.h>
21
22struct mcck_struct {
23 int kill_task;
24 int channel_report;
25 int warning;
26 unsigned long long mcck_code;
27};
28
29static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
30
31static NORET_TYPE void s390_handle_damage(char *msg)
32{
33 smp_send_stop();
34 disabled_wait((unsigned long) __builtin_return_address(0));
35 while (1);
36}
37
38/*
39 * Main machine check handler function. Will be called with interrupts enabled
40 * or disabled and machine checks enabled or disabled.
41 */
42void s390_handle_mcck(void)
43{
44 unsigned long flags;
45 struct mcck_struct mcck;
46
47 /*
48 * Disable machine checks and get the current state of accumulated
49 * machine checks. Afterwards delete the old state and enable machine
50 * checks again.
51 */
52 local_irq_save(flags);
53 local_mcck_disable();
54 mcck = __get_cpu_var(cpu_mcck);
55 memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
56 clear_thread_flag(TIF_MCCK_PENDING);
57 local_mcck_enable();
58 local_irq_restore(flags);
59
60 if (mcck.channel_report)
61 crw_handle_channel_report();
62 /*
63 * A warning may remain for a prolonged period on the bare iron.
64 * (actually until the machine is powered off, or the problem is gone)
65 * So we just stop listening for the WARNING MCH and avoid continuously
66 * being interrupted. One caveat is however, that we must do this per
67 * processor and cannot use the smp version of ctl_clear_bit().
68 * On VM we only get one interrupt per virtally presented machinecheck.
69 * Though one suffices, we may get one interrupt per (virtual) cpu.
70 */
71 if (mcck.warning) { /* WARNING pending ? */
72 static int mchchk_wng_posted = 0;
73
74 /* Use single cpu clear, as we cannot handle smp here. */
75 __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
76 if (xchg(&mchchk_wng_posted, 1) == 0)
77 kill_cad_pid(SIGPWR, 1);
78 }
79 if (mcck.kill_task) {
80 local_irq_enable();
81 printk(KERN_EMERG "mcck: Terminating task because of machine "
82 "malfunction (code 0x%016llx).\n", mcck.mcck_code);
83 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
84 current->comm, current->pid);
85 do_exit(SIGSEGV);
86 }
87}
88EXPORT_SYMBOL_GPL(s390_handle_mcck);
89
90/*
91 * returns 0 if all registers could be validated
92 * returns 1 otherwise
93 */
94static int notrace s390_revalidate_registers(struct mci *mci)
95{
96 int kill_task;
97 u64 tmpclock;
98 u64 zero;
99 void *fpt_save_area, *fpt_creg_save_area;
100
101 kill_task = 0;
102 zero = 0;
103
104 if (!mci->gr) {
105 /*
106 * General purpose registers couldn't be restored and have
107 * unknown contents. Process needs to be terminated.
108 */
109 kill_task = 1;
110 }
111 if (!mci->fp) {
112 /*
113 * Floating point registers can't be restored and
114 * therefore the process needs to be terminated.
115 */
116 kill_task = 1;
117 }
118#ifndef CONFIG_64BIT
119 asm volatile(
120 " ld 0,0(%0)\n"
121 " ld 2,8(%0)\n"
122 " ld 4,16(%0)\n"
123 " ld 6,24(%0)"
124 : : "a" (&S390_lowcore.floating_pt_save_area));
125#endif
126
127 if (MACHINE_HAS_IEEE) {
128#ifdef CONFIG_64BIT
129 fpt_save_area = &S390_lowcore.floating_pt_save_area;
130 fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
131#else
132 fpt_save_area = (void *) S390_lowcore.extended_save_area_addr;
133 fpt_creg_save_area = fpt_save_area + 128;
134#endif
135 if (!mci->fc) {
136 /*
137 * Floating point control register can't be restored.
138 * Task will be terminated.
139 */
140 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
141 kill_task = 1;
142
143 } else
144 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
145
146 asm volatile(
147 " ld 0,0(%0)\n"
148 " ld 1,8(%0)\n"
149 " ld 2,16(%0)\n"
150 " ld 3,24(%0)\n"
151 " ld 4,32(%0)\n"
152 " ld 5,40(%0)\n"
153 " ld 6,48(%0)\n"
154 " ld 7,56(%0)\n"
155 " ld 8,64(%0)\n"
156 " ld 9,72(%0)\n"
157 " ld 10,80(%0)\n"
158 " ld 11,88(%0)\n"
159 " ld 12,96(%0)\n"
160 " ld 13,104(%0)\n"
161 " ld 14,112(%0)\n"
162 " ld 15,120(%0)\n"
163 : : "a" (fpt_save_area));
164 }
165 /* Revalidate access registers */
166 asm volatile(
167 " lam 0,15,0(%0)"
168 : : "a" (&S390_lowcore.access_regs_save_area));
169 if (!mci->ar) {
170 /*
171 * Access registers have unknown contents.
172 * Terminating task.
173 */
174 kill_task = 1;
175 }
176 /* Revalidate control registers */
177 if (!mci->cr) {
178 /*
179 * Control registers have unknown contents.
180 * Can't recover and therefore stopping machine.
181 */
182 s390_handle_damage("invalid control registers.");
183 } else {
184#ifdef CONFIG_64BIT
185 asm volatile(
186 " lctlg 0,15,0(%0)"
187 : : "a" (&S390_lowcore.cregs_save_area));
188#else
189 asm volatile(
190 " lctl 0,15,0(%0)"
191 : : "a" (&S390_lowcore.cregs_save_area));
192#endif
193 }
194 /*
195 * We don't even try to revalidate the TOD register, since we simply
196 * can't write something sensible into that register.
197 */
198#ifdef CONFIG_64BIT
199 /*
200 * See if we can revalidate the TOD programmable register with its
201 * old contents (should be zero) otherwise set it to zero.
202 */
203 if (!mci->pr)
204 asm volatile(
205 " sr 0,0\n"
206 " sckpf"
207 : : : "0", "cc");
208 else
209 asm volatile(
210 " l 0,0(%0)\n"
211 " sckpf"
212 : : "a" (&S390_lowcore.tod_progreg_save_area)
213 : "0", "cc");
214#endif
215 /* Revalidate clock comparator register */
216 asm volatile(
217 " stck 0(%1)\n"
218 " sckc 0(%1)"
219 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory");
220
221 /* Check if old PSW is valid */
222 if (!mci->wp)
223 /*
224 * Can't tell if we come from user or kernel mode
225 * -> stopping machine.
226 */
227 s390_handle_damage("old psw invalid.");
228
229 if (!mci->ms || !mci->pm || !mci->ia)
230 kill_task = 1;
231
232 return kill_task;
233}
234
235#define MAX_IPD_COUNT 29
236#define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */
237
238#define ED_STP_ISLAND 6 /* External damage STP island check */
239#define ED_STP_SYNC 7 /* External damage STP sync check */
240#define ED_ETR_SYNC 12 /* External damage ETR sync check */
241#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
242
243/*
244 * machine check handler.
245 */
246void notrace s390_do_machine_check(struct pt_regs *regs)
247{
248 static int ipd_count;
249 static DEFINE_SPINLOCK(ipd_lock);
250 static unsigned long long last_ipd;
251 struct mcck_struct *mcck;
252 unsigned long long tmp;
253 struct mci *mci;
254 int umode;
255
256 lockdep_off();
257 s390_idle_check();
258
259 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
260 mcck = &__get_cpu_var(cpu_mcck);
261 umode = user_mode(regs);
262
263 if (mci->sd) {
264 /* System damage -> stopping machine */
265 s390_handle_damage("received system damage machine check.");
266 }
267 if (mci->pd) {
268 if (mci->b) {
269 /* Processing backup -> verify if we can survive this */
270 u64 z_mcic, o_mcic, t_mcic;
271#ifdef CONFIG_64BIT
272 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
273 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
274 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
275 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
276 1ULL<<16);
277#else
278 z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 |
279 1ULL<<29);
280 o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
281 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
282 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16);
283#endif
284 t_mcic = *(u64 *)mci;
285
286 if (((t_mcic & z_mcic) != 0) ||
287 ((t_mcic & o_mcic) != o_mcic)) {
288 s390_handle_damage("processing backup machine "
289 "check with damage.");
290 }
291
292 /*
293 * Nullifying exigent condition, therefore we might
294 * retry this instruction.
295 */
296 spin_lock(&ipd_lock);
297 tmp = get_clock();
298 if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
299 ipd_count++;
300 else
301 ipd_count = 1;
302 last_ipd = tmp;
303 if (ipd_count == MAX_IPD_COUNT)
304 s390_handle_damage("too many ipd retries.");
305 spin_unlock(&ipd_lock);
306 } else {
307 /* Processing damage -> stopping machine */
308 s390_handle_damage("received instruction processing "
309 "damage machine check.");
310 }
311 }
312 if (s390_revalidate_registers(mci)) {
313 if (umode) {
314 /*
315 * Couldn't restore all register contents while in
316 * user mode -> mark task for termination.
317 */
318 mcck->kill_task = 1;
319 mcck->mcck_code = *(unsigned long long *) mci;
320 set_thread_flag(TIF_MCCK_PENDING);
321 } else {
322 /*
323 * Couldn't restore all register contents while in
324 * kernel mode -> stopping machine.
325 */
326 s390_handle_damage("unable to revalidate registers.");
327 }
328 }
329 if (mci->cd) {
330 /* Timing facility damage */
331 s390_handle_damage("TOD clock damaged");
332 }
333 if (mci->ed && mci->ec) {
334 /* External damage */
335 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
336 etr_sync_check();
337 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
338 etr_switch_to_local();
339 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
340 stp_sync_check();
341 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
342 stp_island_check();
343 }
344 if (mci->se)
345 /* Storage error uncorrected */
346 s390_handle_damage("received storage error uncorrected "
347 "machine check.");
348 if (mci->ke)
349 /* Storage key-error uncorrected */
350 s390_handle_damage("received storage key-error uncorrected "
351 "machine check.");
352 if (mci->ds && mci->fa)
353 /* Storage degradation */
354 s390_handle_damage("received storage degradation machine "
355 "check.");
356 if (mci->cp) {
357 /* Channel report word pending */
358 mcck->channel_report = 1;
359 set_thread_flag(TIF_MCCK_PENDING);
360 }
361 if (mci->w) {
362 /* Warning pending */
363 mcck->warning = 1;
364 set_thread_flag(TIF_MCCK_PENDING);
365 }
366 lockdep_on();
367}
368
369static int __init machine_check_init(void)
370{
371 ctl_set_bit(14, 25); /* enable external damage MCH */
372 ctl_set_bit(14, 27); /* enable system recovery MCH */
373 ctl_set_bit(14, 24); /* enable warning MCH */
374 return 0;
375}
376arch_initcall(machine_check_init);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 5cd38a90e64d..b48e961a38f6 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -1,18 +1,10 @@
1/* 1/*
2 * arch/s390/kernel/process.c 2 * This file handles the architecture dependent parts of process handling.
3 * 3 *
4 * S390 version 4 * Copyright IBM Corp. 1999,2009
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Hartmut Penner <hp@de.ibm.com>,
7 * Hartmut Penner (hp@de.ibm.com), 7 * Denis Joseph Barrow,
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
9 *
10 * Derived from "arch/i386/kernel/process.c"
11 * Copyright (C) 1995, Linus Torvalds
12 */
13
14/*
15 * This file handles the architecture-dependent parts of process handling..
16 */ 8 */
17 9
18#include <linux/compiler.h> 10#include <linux/compiler.h>
@@ -47,6 +39,7 @@
47#include <asm/processor.h> 39#include <asm/processor.h>
48#include <asm/irq.h> 40#include <asm/irq.h>
49#include <asm/timer.h> 41#include <asm/timer.h>
42#include <asm/nmi.h>
50#include "entry.h" 43#include "entry.h"
51 44
52asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); 45asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -76,7 +69,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
76 return sf->gprs[8]; 69 return sf->gprs[8];
77} 70}
78 71
79extern void s390_handle_mcck(void);
80/* 72/*
81 * The idle loop on a S390... 73 * The idle loop on a S390...
82 */ 74 */
@@ -149,6 +141,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
149 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 141 return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
150 0, &regs, 0, NULL, NULL); 142 0, &regs, 0, NULL, NULL);
151} 143}
144EXPORT_SYMBOL(kernel_thread);
152 145
153/* 146/*
154 * Free current thread data structures etc.. 147 * Free current thread data structures etc..
@@ -168,34 +161,35 @@ void release_thread(struct task_struct *dead_task)
168} 161}
169 162
170int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp, 163int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
171 unsigned long unused, 164 unsigned long unused,
172 struct task_struct * p, struct pt_regs * regs) 165 struct task_struct *p, struct pt_regs *regs)
173{ 166{
174 struct fake_frame 167 struct thread_info *ti;
175 { 168 struct fake_frame
176 struct stack_frame sf; 169 {
177 struct pt_regs childregs; 170 struct stack_frame sf;
178 } *frame; 171 struct pt_regs childregs;
179 172 } *frame;
180 frame = container_of(task_pt_regs(p), struct fake_frame, childregs); 173
181 p->thread.ksp = (unsigned long) frame; 174 frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
175 p->thread.ksp = (unsigned long) frame;
182 /* Store access registers to kernel stack of new process. */ 176 /* Store access registers to kernel stack of new process. */
183 frame->childregs = *regs; 177 frame->childregs = *regs;
184 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */ 178 frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
185 frame->childregs.gprs[15] = new_stackp; 179 frame->childregs.gprs[15] = new_stackp;
186 frame->sf.back_chain = 0; 180 frame->sf.back_chain = 0;
187 181
188 /* new return point is ret_from_fork */ 182 /* new return point is ret_from_fork */
189 frame->sf.gprs[8] = (unsigned long) ret_from_fork; 183 frame->sf.gprs[8] = (unsigned long) ret_from_fork;
190 184
191 /* fake return stack for resume(), don't go back to schedule */ 185 /* fake return stack for resume(), don't go back to schedule */
192 frame->sf.gprs[9] = (unsigned long) frame; 186 frame->sf.gprs[9] = (unsigned long) frame;
193 187
194 /* Save access registers to new thread structure. */ 188 /* Save access registers to new thread structure. */
195 save_access_regs(&p->thread.acrs[0]); 189 save_access_regs(&p->thread.acrs[0]);
196 190
197#ifndef CONFIG_64BIT 191#ifndef CONFIG_64BIT
198 /* 192 /*
199 * save fprs to current->thread.fp_regs to merge them with 193 * save fprs to current->thread.fp_regs to merge them with
200 * the emulated registers and then copy the result to the child. 194 * the emulated registers and then copy the result to the child.
201 */ 195 */
@@ -220,10 +214,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
220#endif /* CONFIG_64BIT */ 214#endif /* CONFIG_64BIT */
221 /* start new process with ar4 pointing to the correct address space */ 215 /* start new process with ar4 pointing to the correct address space */
222 p->thread.mm_segment = get_fs(); 216 p->thread.mm_segment = get_fs();
223 /* Don't copy debug registers */ 217 /* Don't copy debug registers */
224 memset(&p->thread.per_info,0,sizeof(p->thread.per_info)); 218 memset(&p->thread.per_info, 0, sizeof(p->thread.per_info));
225 219 /* Initialize per thread user and system timer values */
226 return 0; 220 ti = task_thread_info(p);
221 ti->user_timer = 0;
222 ti->system_timer = 0;
223 return 0;
227} 224}
228 225
229SYSCALL_DEFINE0(fork) 226SYSCALL_DEFINE0(fork)
@@ -311,7 +308,7 @@ out:
311int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) 308int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
312{ 309{
313#ifndef CONFIG_64BIT 310#ifndef CONFIG_64BIT
314 /* 311 /*
315 * save fprs to current->thread.fp_regs to merge them with 312 * save fprs to current->thread.fp_regs to merge them with
316 * the emulated registers and then copy the result to the dump. 313 * the emulated registers and then copy the result to the dump.
317 */ 314 */
@@ -322,6 +319,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
322#endif /* CONFIG_64BIT */ 319#endif /* CONFIG_64BIT */
323 return 1; 320 return 1;
324} 321}
322EXPORT_SYMBOL(dump_fpu);
325 323
326unsigned long get_wchan(struct task_struct *p) 324unsigned long get_wchan(struct task_struct *p)
327{ 325{
@@ -346,4 +344,3 @@ unsigned long get_wchan(struct task_struct *p)
346 } 344 }
347 return 0; 345 return 0;
348} 346}
349
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 82c1872cfe80..802c8ab247f3 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,10 +18,11 @@
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
20 20
21void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo) 21void __cpuinit print_cpu_info(void)
22{ 22{
23 pr_info("Processor %d started, address %d, identification %06X\n", 23 pr_info("Processor %d started, address %d, identification %06X\n",
24 cpuinfo->cpu_nr, cpuinfo->cpu_addr, cpuinfo->cpu_id.ident); 24 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr,
25 S390_lowcore.cpu_id.ident);
25} 26}
26 27
27/* 28/*
@@ -30,48 +31,46 @@ void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
30 31
31static int show_cpuinfo(struct seq_file *m, void *v) 32static int show_cpuinfo(struct seq_file *m, void *v)
32{ 33{
33 static const char *hwcap_str[8] = { 34 static const char *hwcap_str[9] = {
34 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 35 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
35 "edat" 36 "edat", "etf3eh"
36 }; 37 };
37 struct cpuinfo_S390 *cpuinfo; 38 struct _lowcore *lc;
38 unsigned long n = (unsigned long) v - 1; 39 unsigned long n = (unsigned long) v - 1;
39 int i; 40 int i;
40 41
41 s390_adjust_jiffies(); 42 s390_adjust_jiffies();
42 preempt_disable(); 43 preempt_disable();
43 if (!n) { 44 if (!n) {
44 seq_printf(m, "vendor_id : IBM/S390\n" 45 seq_printf(m, "vendor_id : IBM/S390\n"
45 "# processors : %i\n" 46 "# processors : %i\n"
46 "bogomips per cpu: %lu.%02lu\n", 47 "bogomips per cpu: %lu.%02lu\n",
47 num_online_cpus(), loops_per_jiffy/(500000/HZ), 48 num_online_cpus(), loops_per_jiffy/(500000/HZ),
48 (loops_per_jiffy/(5000/HZ))%100); 49 (loops_per_jiffy/(5000/HZ))%100);
49 seq_puts(m, "features\t: "); 50 seq_puts(m, "features\t: ");
50 for (i = 0; i < 8; i++) 51 for (i = 0; i < 9; i++)
51 if (hwcap_str[i] && (elf_hwcap & (1UL << i))) 52 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
52 seq_printf(m, "%s ", hwcap_str[i]); 53 seq_printf(m, "%s ", hwcap_str[i]);
53 seq_puts(m, "\n"); 54 seq_puts(m, "\n");
54 } 55 }
55 56
56 if (cpu_online(n)) { 57 if (cpu_online(n)) {
57#ifdef CONFIG_SMP 58#ifdef CONFIG_SMP
58 if (smp_processor_id() == n) 59 lc = (smp_processor_id() == n) ?
59 cpuinfo = &S390_lowcore.cpu_data; 60 &S390_lowcore : lowcore_ptr[n];
60 else
61 cpuinfo = &lowcore_ptr[n]->cpu_data;
62#else 61#else
63 cpuinfo = &S390_lowcore.cpu_data; 62 lc = &S390_lowcore;
64#endif 63#endif
65 seq_printf(m, "processor %li: " 64 seq_printf(m, "processor %li: "
66 "version = %02X, " 65 "version = %02X, "
67 "identification = %06X, " 66 "identification = %06X, "
68 "machine = %04X\n", 67 "machine = %04X\n",
69 n, cpuinfo->cpu_id.version, 68 n, lc->cpu_id.version,
70 cpuinfo->cpu_id.ident, 69 lc->cpu_id.ident,
71 cpuinfo->cpu_id.machine); 70 lc->cpu_id.machine);
72 } 71 }
73 preempt_enable(); 72 preempt_enable();
74 return 0; 73 return 0;
75} 74}
76 75
77static void *c_start(struct seq_file *m, loff_t *pos) 76static void *c_start(struct seq_file *m, loff_t *pos)
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index c41930499a5f..774147824c3d 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -1,10 +1,7 @@
1/* 1/*
2 * arch/s390/kernel/reipl.S 2 * Copyright IBM Corp 2000,2009
3 * 3 * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
4 * S390 version 4 * Denis Joseph Barrow,
5 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
8 */ 5 */
9 6
10#include <asm/lowcore.h> 7#include <asm/lowcore.h>
@@ -30,7 +27,7 @@ do_reipl_asm: basr %r13,0
30 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) 27 mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
31 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) 28 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
32 stckc .Lclkcmp-.Lpg0(%r13) 29 stckc .Lclkcmp-.Lpg0(%r13)
33 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) 30 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(7,%r1),.Lclkcmp-.Lpg0(%r13)
34 stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1) 31 stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
35 stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1) 32 stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
36 33
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 46b90cb03707..656fcbb9bd83 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,49 +1,5 @@
1/*
2 * arch/s390/kernel/s390_ksyms.c
3 *
4 * S390 version
5 */
6#include <linux/highuid.h>
7#include <linux/module.h> 1#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/smp.h>
10#include <linux/syscalls.h>
11#include <linux/interrupt.h>
12#include <asm/checksum.h>
13#include <asm/cpcmd.h>
14#include <asm/delay.h>
15#include <asm/pgalloc.h>
16#include <asm/setup.h>
17#include <asm/ftrace.h> 2#include <asm/ftrace.h>
18#ifdef CONFIG_IP_MULTICAST
19#include <net/arp.h>
20#endif
21
22/*
23 * memory management
24 */
25EXPORT_SYMBOL(_oi_bitmap);
26EXPORT_SYMBOL(_ni_bitmap);
27EXPORT_SYMBOL(_zb_findmap);
28EXPORT_SYMBOL(_sb_findmap);
29
30/*
31 * binfmt_elf loader
32 */
33extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
34EXPORT_SYMBOL(dump_fpu);
35EXPORT_SYMBOL(empty_zero_page);
36
37/*
38 * misc.
39 */
40EXPORT_SYMBOL(machine_flags);
41EXPORT_SYMBOL(__udelay);
42EXPORT_SYMBOL(kernel_thread);
43EXPORT_SYMBOL(csum_fold);
44EXPORT_SYMBOL(console_mode);
45EXPORT_SYMBOL(console_devno);
46EXPORT_SYMBOL(console_irq);
47 3
48#ifdef CONFIG_FUNCTION_TRACER 4#ifdef CONFIG_FUNCTION_TRACER
49EXPORT_SYMBOL(_mcount); 5EXPORT_SYMBOL(_mcount);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c5cfb6185eac..06201b93cbbf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -74,9 +74,17 @@ EXPORT_SYMBOL(uaccess);
74 * Machine setup.. 74 * Machine setup..
75 */ 75 */
76unsigned int console_mode = 0; 76unsigned int console_mode = 0;
77EXPORT_SYMBOL(console_mode);
78
77unsigned int console_devno = -1; 79unsigned int console_devno = -1;
80EXPORT_SYMBOL(console_devno);
81
78unsigned int console_irq = -1; 82unsigned int console_irq = -1;
83EXPORT_SYMBOL(console_irq);
84
79unsigned long machine_flags; 85unsigned long machine_flags;
86EXPORT_SYMBOL(machine_flags);
87
80unsigned long elf_hwcap = 0; 88unsigned long elf_hwcap = 0;
81char elf_platform[ELF_PLATFORM_SIZE]; 89char elf_platform[ELF_PLATFORM_SIZE];
82 90
@@ -86,6 +94,10 @@ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
86int __initdata memory_end_set; 94int __initdata memory_end_set;
87unsigned long __initdata memory_end; 95unsigned long __initdata memory_end;
88 96
97/* An array with a pointer to the lowcore of every CPU. */
98struct _lowcore *lowcore_ptr[NR_CPUS];
99EXPORT_SYMBOL(lowcore_ptr);
100
89/* 101/*
90 * This is set up by the setup-routine at boot-time 102 * This is set up by the setup-routine at boot-time
91 * for S390 need to find out, what we have to setup 103 * for S390 need to find out, what we have to setup
@@ -109,13 +121,10 @@ static struct resource data_resource = {
109 */ 121 */
110void __cpuinit cpu_init(void) 122void __cpuinit cpu_init(void)
111{ 123{
112 int addr = hard_smp_processor_id();
113
114 /* 124 /*
115 * Store processor id in lowcore (used e.g. in timer_interrupt) 125 * Store processor id in lowcore (used e.g. in timer_interrupt)
116 */ 126 */
117 get_cpu_id(&S390_lowcore.cpu_data.cpu_id); 127 get_cpu_id(&S390_lowcore.cpu_id);
118 S390_lowcore.cpu_data.cpu_addr = addr;
119 128
120 /* 129 /*
121 * Force FPU initialization: 130 * Force FPU initialization:
@@ -125,8 +134,7 @@ void __cpuinit cpu_init(void)
125 134
126 atomic_inc(&init_mm.mm_count); 135 atomic_inc(&init_mm.mm_count);
127 current->active_mm = &init_mm; 136 current->active_mm = &init_mm;
128 if (current->mm) 137 BUG_ON(current->mm);
129 BUG();
130 enter_lazy_tlb(&init_mm, current); 138 enter_lazy_tlb(&init_mm, current);
131} 139}
132 140
@@ -217,7 +225,7 @@ static void __init conmode_default(void)
217 } 225 }
218} 226}
219 227
220#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 228#ifdef CONFIG_ZFCPDUMP
221static void __init setup_zfcpdump(unsigned int console_devno) 229static void __init setup_zfcpdump(unsigned int console_devno)
222{ 230{
223 static char str[41]; 231 static char str[41];
@@ -289,11 +297,7 @@ static int __init early_parse_mem(char *p)
289early_param("mem", early_parse_mem); 297early_param("mem", early_parse_mem);
290 298
291#ifdef CONFIG_S390_SWITCH_AMODE 299#ifdef CONFIG_S390_SWITCH_AMODE
292#ifdef CONFIG_PGSTE
293unsigned int switch_amode = 1;
294#else
295unsigned int switch_amode = 0; 300unsigned int switch_amode = 0;
296#endif
297EXPORT_SYMBOL_GPL(switch_amode); 301EXPORT_SYMBOL_GPL(switch_amode);
298 302
299static int set_amode_and_uaccess(unsigned long user_amode, 303static int set_amode_and_uaccess(unsigned long user_amode,
@@ -414,7 +418,6 @@ setup_lowcore(void)
414 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 418 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
415 lc->io_new_psw.mask = psw_kernel_bits; 419 lc->io_new_psw.mask = psw_kernel_bits;
416 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 420 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
417 lc->ipl_device = S390_lowcore.ipl_device;
418 lc->clock_comparator = -1ULL; 421 lc->clock_comparator = -1ULL;
419 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 422 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
420 lc->async_stack = (unsigned long) 423 lc->async_stack = (unsigned long)
@@ -434,6 +437,7 @@ setup_lowcore(void)
434 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; 437 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
435#endif 438#endif
436 set_prefix((u32)(unsigned long) lc); 439 set_prefix((u32)(unsigned long) lc);
440 lowcore_ptr[0] = lc;
437} 441}
438 442
439static void __init 443static void __init
@@ -510,7 +514,7 @@ static void __init setup_memory_end(void)
510 unsigned long max_mem; 514 unsigned long max_mem;
511 int i; 515 int i;
512 516
513#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 517#ifdef CONFIG_ZFCPDUMP
514 if (ipl_info.type == IPL_TYPE_FCP_DUMP) { 518 if (ipl_info.type == IPL_TYPE_FCP_DUMP) {
515 memory_end = ZFCPDUMP_HSA_SIZE; 519 memory_end = ZFCPDUMP_HSA_SIZE;
516 memory_end_set = 1; 520 memory_end_set = 1;
@@ -677,7 +681,6 @@ setup_memory(void)
677static void __init setup_hwcaps(void) 681static void __init setup_hwcaps(void)
678{ 682{
679 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 683 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
680 struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
681 unsigned long long facility_list_extended; 684 unsigned long long facility_list_extended;
682 unsigned int facility_list; 685 unsigned int facility_list;
683 int i; 686 int i;
@@ -693,15 +696,22 @@ static void __init setup_hwcaps(void)
693 * Bit 17: the message-security assist is installed 696 * Bit 17: the message-security assist is installed
694 * Bit 19: the long-displacement facility is installed 697 * Bit 19: the long-displacement facility is installed
695 * Bit 21: the extended-immediate facility is installed 698 * Bit 21: the extended-immediate facility is installed
699 * Bit 22: extended-translation facility 3 is installed
700 * Bit 30: extended-translation facility 3 enhancement facility
696 * These get translated to: 701 * These get translated to:
697 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1, 702 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
698 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3, 703 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
699 * HWCAP_S390_LDISP bit 4, and HWCAP_S390_EIMM bit 5. 704 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
705 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
700 */ 706 */
701 for (i = 0; i < 6; i++) 707 for (i = 0; i < 6; i++)
702 if (facility_list & (1UL << (31 - stfl_bits[i]))) 708 if (facility_list & (1UL << (31 - stfl_bits[i])))
703 elf_hwcap |= 1UL << i; 709 elf_hwcap |= 1UL << i;
704 710
711 if ((facility_list & (1UL << (31 - 22)))
712 && (facility_list & (1UL << (31 - 30))))
713 elf_hwcap |= 1UL << 8;
714
705 /* 715 /*
706 * Check for additional facilities with store-facility-list-extended. 716 * Check for additional facilities with store-facility-list-extended.
707 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0 717 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
@@ -710,20 +720,22 @@ static void __init setup_hwcaps(void)
710 * How many facility words are stored depends on the number of 720 * How many facility words are stored depends on the number of
711 * doublewords passed to the instruction. The additional facilites 721 * doublewords passed to the instruction. The additional facilites
712 * are: 722 * are:
713 * Bit 43: decimal floating point facility is installed 723 * Bit 42: decimal floating point facility is installed
724 * Bit 44: perform floating point operation facility is installed
714 * translated to: 725 * translated to:
715 * HWCAP_S390_DFP bit 6. 726 * HWCAP_S390_DFP bit 6 (42 && 44).
716 */ 727 */
717 if ((elf_hwcap & (1UL << 2)) && 728 if ((elf_hwcap & (1UL << 2)) &&
718 __stfle(&facility_list_extended, 1) > 0) { 729 __stfle(&facility_list_extended, 1) > 0) {
719 if (facility_list_extended & (1ULL << (64 - 43))) 730 if ((facility_list_extended & (1ULL << (63 - 42)))
731 && (facility_list_extended & (1ULL << (63 - 44))))
720 elf_hwcap |= 1UL << 6; 732 elf_hwcap |= 1UL << 6;
721 } 733 }
722 734
723 if (MACHINE_HAS_HPAGE) 735 if (MACHINE_HAS_HPAGE)
724 elf_hwcap |= 1UL << 7; 736 elf_hwcap |= 1UL << 7;
725 737
726 switch (cpuinfo->cpu_id.machine) { 738 switch (S390_lowcore.cpu_id.machine) {
727 case 0x9672: 739 case 0x9672:
728#if !defined(CONFIG_64BIT) 740#if !defined(CONFIG_64BIT)
729 default: /* Use "g5" as default for 31 bit kernels. */ 741 default: /* Use "g5" as default for 31 bit kernels. */
@@ -816,7 +828,7 @@ setup_arch(char **cmdline_p)
816 setup_lowcore(); 828 setup_lowcore();
817 829
818 cpu_init(); 830 cpu_init();
819 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 831 __cpu_logical_map[0] = stap();
820 s390_init_cpu_topology(); 832 s390_init_cpu_topology();
821 833
822 /* 834 /*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2d337cbb9329..006ed5016eb4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/cache.h> 33#include <linux/cache.h>
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/irqflags.h>
35#include <linux/cpu.h> 36#include <linux/cpu.h>
36#include <linux/timex.h> 37#include <linux/timex.h>
37#include <linux/bootmem.h> 38#include <linux/bootmem.h>
@@ -50,12 +51,6 @@
50#include <asm/vdso.h> 51#include <asm/vdso.h>
51#include "entry.h" 52#include "entry.h"
52 53
53/*
54 * An array with a pointer the lowcore of every CPU.
55 */
56struct _lowcore *lowcore_ptr[NR_CPUS];
57EXPORT_SYMBOL(lowcore_ptr);
58
59static struct task_struct *current_set[NR_CPUS]; 54static struct task_struct *current_set[NR_CPUS];
60 55
61static u8 smp_cpu_type; 56static u8 smp_cpu_type;
@@ -81,9 +76,7 @@ void smp_send_stop(void)
81 76
82 /* Disable all interrupts/machine checks */ 77 /* Disable all interrupts/machine checks */
83 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); 78 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
84 79 trace_hardirqs_off();
85 /* write magic number to zero page (absolute 0) */
86 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
87 80
88 /* stop all processors */ 81 /* stop all processors */
89 for_each_online_cpu(cpu) { 82 for_each_online_cpu(cpu) {
@@ -233,7 +226,7 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
233 */ 226 */
234#define CPU_INIT_NO 1 227#define CPU_INIT_NO 1
235 228
236#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 229#ifdef CONFIG_ZFCPDUMP
237 230
238/* 231/*
239 * zfcpdump_prefix_array holds prefix registers for the following scenario: 232 * zfcpdump_prefix_array holds prefix registers for the following scenario:
@@ -274,7 +267,7 @@ EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
274 267
275static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } 268static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
276 269
277#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ 270#endif /* CONFIG_ZFCPDUMP */
278 271
279static int cpu_stopped(int cpu) 272static int cpu_stopped(int cpu)
280{ 273{
@@ -304,8 +297,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
304{ 297{
305 int cpu_id, logical_cpu; 298 int cpu_id, logical_cpu;
306 299
307 logical_cpu = first_cpu(avail); 300 logical_cpu = cpumask_first(&avail);
308 if (logical_cpu == NR_CPUS) 301 if (logical_cpu >= nr_cpu_ids)
309 return 0; 302 return 0;
310 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { 303 for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
311 if (cpu_known(cpu_id)) 304 if (cpu_known(cpu_id))
@@ -316,8 +309,8 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
316 continue; 309 continue;
317 cpu_set(logical_cpu, cpu_present_map); 310 cpu_set(logical_cpu, cpu_present_map);
318 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 311 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
319 logical_cpu = next_cpu(logical_cpu, avail); 312 logical_cpu = cpumask_next(logical_cpu, &avail);
320 if (logical_cpu == NR_CPUS) 313 if (logical_cpu >= nr_cpu_ids)
321 break; 314 break;
322 } 315 }
323 return 0; 316 return 0;
@@ -329,8 +322,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
329 int cpu_id, logical_cpu, cpu; 322 int cpu_id, logical_cpu, cpu;
330 int rc; 323 int rc;
331 324
332 logical_cpu = first_cpu(avail); 325 logical_cpu = cpumask_first(&avail);
333 if (logical_cpu == NR_CPUS) 326 if (logical_cpu >= nr_cpu_ids)
334 return 0; 327 return 0;
335 info = kmalloc(sizeof(*info), GFP_KERNEL); 328 info = kmalloc(sizeof(*info), GFP_KERNEL);
336 if (!info) 329 if (!info)
@@ -351,8 +344,8 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
351 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; 344 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
352 else 345 else
353 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED; 346 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
354 logical_cpu = next_cpu(logical_cpu, avail); 347 logical_cpu = cpumask_next(logical_cpu, &avail);
355 if (logical_cpu == NR_CPUS) 348 if (logical_cpu >= nr_cpu_ids)
356 break; 349 break;
357 } 350 }
358out: 351out:
@@ -379,7 +372,7 @@ static void __init smp_detect_cpus(void)
379 372
380 c_cpus = 1; 373 c_cpus = 1;
381 s_cpus = 0; 374 s_cpus = 0;
382 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; 375 boot_cpu_addr = __cpu_logical_map[0];
383 info = kmalloc(sizeof(*info), GFP_KERNEL); 376 info = kmalloc(sizeof(*info), GFP_KERNEL);
384 if (!info) 377 if (!info)
385 panic("smp_detect_cpus failed to allocate memory\n"); 378 panic("smp_detect_cpus failed to allocate memory\n");
@@ -453,7 +446,7 @@ int __cpuinit start_secondary(void *cpuvoid)
453 /* Switch on interrupts */ 446 /* Switch on interrupts */
454 local_irq_enable(); 447 local_irq_enable();
455 /* Print info about this processor */ 448 /* Print info about this processor */
456 print_cpu_info(&S390_lowcore.cpu_data); 449 print_cpu_info();
457 /* cpu_idle will call schedule for us */ 450 /* cpu_idle will call schedule for us */
458 cpu_idle(); 451 cpu_idle();
459 return 0; 452 return 0;
@@ -515,7 +508,6 @@ out:
515 return -ENOMEM; 508 return -ENOMEM;
516} 509}
517 510
518#ifdef CONFIG_HOTPLUG_CPU
519static void smp_free_lowcore(int cpu) 511static void smp_free_lowcore(int cpu)
520{ 512{
521 struct _lowcore *lowcore; 513 struct _lowcore *lowcore;
@@ -534,7 +526,6 @@ static void smp_free_lowcore(int cpu)
534 free_pages((unsigned long) lowcore, lc_order); 526 free_pages((unsigned long) lowcore, lc_order);
535 lowcore_ptr[cpu] = NULL; 527 lowcore_ptr[cpu] = NULL;
536} 528}
537#endif /* CONFIG_HOTPLUG_CPU */
538 529
539/* Upping and downing of CPUs */ 530/* Upping and downing of CPUs */
540int __cpuinit __cpu_up(unsigned int cpu) 531int __cpuinit __cpu_up(unsigned int cpu)
@@ -543,16 +534,23 @@ int __cpuinit __cpu_up(unsigned int cpu)
543 struct _lowcore *cpu_lowcore; 534 struct _lowcore *cpu_lowcore;
544 struct stack_frame *sf; 535 struct stack_frame *sf;
545 sigp_ccode ccode; 536 sigp_ccode ccode;
537 u32 lowcore;
546 538
547 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED) 539 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
548 return -EIO; 540 return -EIO;
549 if (smp_alloc_lowcore(cpu)) 541 if (smp_alloc_lowcore(cpu))
550 return -ENOMEM; 542 return -ENOMEM;
551 543 do {
552 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), 544 ccode = signal_processor(cpu, sigp_initial_cpu_reset);
553 cpu, sigp_set_prefix); 545 if (ccode == sigp_busy)
554 if (ccode) 546 udelay(10);
555 return -EIO; 547 if (ccode == sigp_not_operational)
548 goto err_out;
549 } while (ccode == sigp_busy);
550
551 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
552 while (signal_processor_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
553 udelay(10);
556 554
557 idle = current_set[cpu]; 555 idle = current_set[cpu];
558 cpu_lowcore = lowcore_ptr[cpu]; 556 cpu_lowcore = lowcore_ptr[cpu];
@@ -571,9 +569,8 @@ int __cpuinit __cpu_up(unsigned int cpu)
571 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); 569 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
572 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; 570 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
573 cpu_lowcore->current_task = (unsigned long) idle; 571 cpu_lowcore->current_task = (unsigned long) idle;
574 cpu_lowcore->cpu_data.cpu_nr = cpu; 572 cpu_lowcore->cpu_nr = cpu;
575 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce; 573 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
576 cpu_lowcore->ipl_device = S390_lowcore.ipl_device;
577 eieio(); 574 eieio();
578 575
579 while (signal_processor(cpu, sigp_restart) == sigp_busy) 576 while (signal_processor(cpu, sigp_restart) == sigp_busy)
@@ -582,6 +579,10 @@ int __cpuinit __cpu_up(unsigned int cpu)
582 while (!cpu_online(cpu)) 579 while (!cpu_online(cpu))
583 cpu_relax(); 580 cpu_relax();
584 return 0; 581 return 0;
582
583err_out:
584 smp_free_lowcore(cpu);
585 return -EIO;
585} 586}
586 587
587static int __init setup_possible_cpus(char *s) 588static int __init setup_possible_cpus(char *s)
@@ -589,9 +590,8 @@ static int __init setup_possible_cpus(char *s)
589 int pcpus, cpu; 590 int pcpus, cpu;
590 591
591 pcpus = simple_strtoul(s, NULL, 0); 592 pcpus = simple_strtoul(s, NULL, 0);
592 cpu_possible_map = cpumask_of_cpu(0); 593 for (cpu = 0; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
593 for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++) 594 set_cpu_possible(cpu, true);
594 cpu_set(cpu, cpu_possible_map);
595 return 0; 595 return 0;
596} 596}
597early_param("possible_cpus", setup_possible_cpus); 597early_param("possible_cpus", setup_possible_cpus);
@@ -663,7 +663,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
663 /* request the 0x1201 emergency signal external interrupt */ 663 /* request the 0x1201 emergency signal external interrupt */
664 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) 664 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
665 panic("Couldn't request external interrupt 0x1201"); 665 panic("Couldn't request external interrupt 0x1201");
666 print_cpu_info(&S390_lowcore.cpu_data); 666 print_cpu_info();
667 667
668 /* Reallocate current lowcore, but keep its contents. */ 668 /* Reallocate current lowcore, but keep its contents. */
669 lc_order = sizeof(long) == 8 ? 1 : 0; 669 lc_order = sizeof(long) == 8 ? 1 : 0;
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
new file mode 100644
index 000000000000..b5e75e1061c8
--- /dev/null
+++ b/arch/s390/kernel/sysinfo.c
@@ -0,0 +1,428 @@
1/*
2 * Copyright IBM Corp. 2001, 2009
3 * Author(s): Ulrich Weigand <Ulrich.Weigand@de.ibm.com>,
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 */
6
7#include <linux/kernel.h>
8#include <linux/mm.h>
9#include <linux/proc_fs.h>
10#include <linux/seq_file.h>
11#include <linux/init.h>
12#include <linux/delay.h>
13#include <linux/module.h>
14#include <asm/ebcdic.h>
15#include <asm/sysinfo.h>
16#include <asm/cpcmd.h>
17
18/* Sigh, math-emu. Don't ask. */
19#include <asm/sfp-util.h>
20#include <math-emu/soft-fp.h>
21#include <math-emu/single.h>
22
23static inline int stsi_0(void)
24{
25 int rc = stsi(NULL, 0, 0, 0);
26 return rc == -ENOSYS ? rc : (((unsigned int) rc) >> 28);
27}
28
29static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
30{
31 if (stsi(info, 1, 1, 1) == -ENOSYS)
32 return len;
33
34 EBCASC(info->manufacturer, sizeof(info->manufacturer));
35 EBCASC(info->type, sizeof(info->type));
36 EBCASC(info->model, sizeof(info->model));
37 EBCASC(info->sequence, sizeof(info->sequence));
38 EBCASC(info->plant, sizeof(info->plant));
39 EBCASC(info->model_capacity, sizeof(info->model_capacity));
40 EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
41 EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
42 len += sprintf(page + len, "Manufacturer: %-16.16s\n",
43 info->manufacturer);
44 len += sprintf(page + len, "Type: %-4.4s\n",
45 info->type);
46 if (info->model[0] != '\0')
47 /*
48 * Sigh: the model field has been renamed with System z9
49 * to model_capacity and a new model field has been added
50 * after the plant field. To avoid confusing older programs
51 * the "Model:" prints "model_capacity model" or just
52 * "model_capacity" if the model string is empty .
53 */
54 len += sprintf(page + len,
55 "Model: %-16.16s %-16.16s\n",
56 info->model_capacity, info->model);
57 else
58 len += sprintf(page + len, "Model: %-16.16s\n",
59 info->model_capacity);
60 len += sprintf(page + len, "Sequence Code: %-16.16s\n",
61 info->sequence);
62 len += sprintf(page + len, "Plant: %-4.4s\n",
63 info->plant);
64 len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
65 info->model_capacity, *(u32 *) info->model_cap_rating);
66 if (info->model_perm_cap[0] != '\0')
67 len += sprintf(page + len,
68 "Model Perm. Capacity: %-16.16s %08u\n",
69 info->model_perm_cap,
70 *(u32 *) info->model_perm_cap_rating);
71 if (info->model_temp_cap[0] != '\0')
72 len += sprintf(page + len,
73 "Model Temp. Capacity: %-16.16s %08u\n",
74 info->model_temp_cap,
75 *(u32 *) info->model_temp_cap_rating);
76 return len;
77}
78
79static int stsi_1_2_2(struct sysinfo_1_2_2 *info, char *page, int len)
80{
81 struct sysinfo_1_2_2_extension *ext;
82 int i;
83
84 if (stsi(info, 1, 2, 2) == -ENOSYS)
85 return len;
86 ext = (struct sysinfo_1_2_2_extension *)
87 ((unsigned long) info + info->acc_offset);
88
89 len += sprintf(page + len, "\n");
90 len += sprintf(page + len, "CPUs Total: %d\n",
91 info->cpus_total);
92 len += sprintf(page + len, "CPUs Configured: %d\n",
93 info->cpus_configured);
94 len += sprintf(page + len, "CPUs Standby: %d\n",
95 info->cpus_standby);
96 len += sprintf(page + len, "CPUs Reserved: %d\n",
97 info->cpus_reserved);
98
99 if (info->format == 1) {
100 /*
101 * Sigh 2. According to the specification the alternate
102 * capability field is a 32 bit floating point number
103 * if the higher order 8 bits are not zero. Printing
104 * a floating point number in the kernel is a no-no,
105 * always print the number as 32 bit unsigned integer.
106 * The user-space needs to know about the strange
107 * encoding of the alternate cpu capability.
108 */
109 len += sprintf(page + len, "Capability: %u %u\n",
110 info->capability, ext->alt_capability);
111 for (i = 2; i <= info->cpus_total; i++)
112 len += sprintf(page + len,
113 "Adjustment %02d-way: %u %u\n",
114 i, info->adjustment[i-2],
115 ext->alt_adjustment[i-2]);
116
117 } else {
118 len += sprintf(page + len, "Capability: %u\n",
119 info->capability);
120 for (i = 2; i <= info->cpus_total; i++)
121 len += sprintf(page + len,
122 "Adjustment %02d-way: %u\n",
123 i, info->adjustment[i-2]);
124 }
125
126 if (info->secondary_capability != 0)
127 len += sprintf(page + len, "Secondary Capability: %d\n",
128 info->secondary_capability);
129 return len;
130}
131
132static int stsi_2_2_2(struct sysinfo_2_2_2 *info, char *page, int len)
133{
134 if (stsi(info, 2, 2, 2) == -ENOSYS)
135 return len;
136
137 EBCASC(info->name, sizeof(info->name));
138
139 len += sprintf(page + len, "\n");
140 len += sprintf(page + len, "LPAR Number: %d\n",
141 info->lpar_number);
142
143 len += sprintf(page + len, "LPAR Characteristics: ");
144 if (info->characteristics & LPAR_CHAR_DEDICATED)
145 len += sprintf(page + len, "Dedicated ");
146 if (info->characteristics & LPAR_CHAR_SHARED)
147 len += sprintf(page + len, "Shared ");
148 if (info->characteristics & LPAR_CHAR_LIMITED)
149 len += sprintf(page + len, "Limited ");
150 len += sprintf(page + len, "\n");
151
152 len += sprintf(page + len, "LPAR Name: %-8.8s\n",
153 info->name);
154
155 len += sprintf(page + len, "LPAR Adjustment: %d\n",
156 info->caf);
157
158 len += sprintf(page + len, "LPAR CPUs Total: %d\n",
159 info->cpus_total);
160 len += sprintf(page + len, "LPAR CPUs Configured: %d\n",
161 info->cpus_configured);
162 len += sprintf(page + len, "LPAR CPUs Standby: %d\n",
163 info->cpus_standby);
164 len += sprintf(page + len, "LPAR CPUs Reserved: %d\n",
165 info->cpus_reserved);
166 len += sprintf(page + len, "LPAR CPUs Dedicated: %d\n",
167 info->cpus_dedicated);
168 len += sprintf(page + len, "LPAR CPUs Shared: %d\n",
169 info->cpus_shared);
170 return len;
171}
172
173static int stsi_3_2_2(struct sysinfo_3_2_2 *info, char *page, int len)
174{
175 int i;
176
177 if (stsi(info, 3, 2, 2) == -ENOSYS)
178 return len;
179 for (i = 0; i < info->count; i++) {
180 EBCASC(info->vm[i].name, sizeof(info->vm[i].name));
181 EBCASC(info->vm[i].cpi, sizeof(info->vm[i].cpi));
182 len += sprintf(page + len, "\n");
183 len += sprintf(page + len, "VM%02d Name: %-8.8s\n",
184 i, info->vm[i].name);
185 len += sprintf(page + len, "VM%02d Control Program: %-16.16s\n",
186 i, info->vm[i].cpi);
187
188 len += sprintf(page + len, "VM%02d Adjustment: %d\n",
189 i, info->vm[i].caf);
190
191 len += sprintf(page + len, "VM%02d CPUs Total: %d\n",
192 i, info->vm[i].cpus_total);
193 len += sprintf(page + len, "VM%02d CPUs Configured: %d\n",
194 i, info->vm[i].cpus_configured);
195 len += sprintf(page + len, "VM%02d CPUs Standby: %d\n",
196 i, info->vm[i].cpus_standby);
197 len += sprintf(page + len, "VM%02d CPUs Reserved: %d\n",
198 i, info->vm[i].cpus_reserved);
199 }
200 return len;
201}
202
203static int proc_read_sysinfo(char *page, char **start,
204 off_t off, int count,
205 int *eof, void *data)
206{
207 unsigned long info = get_zeroed_page(GFP_KERNEL);
208 int level, len;
209
210 if (!info)
211 return 0;
212
213 len = 0;
214 level = stsi_0();
215 if (level >= 1)
216 len = stsi_1_1_1((struct sysinfo_1_1_1 *) info, page, len);
217
218 if (level >= 1)
219 len = stsi_1_2_2((struct sysinfo_1_2_2 *) info, page, len);
220
221 if (level >= 2)
222 len = stsi_2_2_2((struct sysinfo_2_2_2 *) info, page, len);
223
224 if (level >= 3)
225 len = stsi_3_2_2((struct sysinfo_3_2_2 *) info, page, len);
226
227 free_page(info);
228 return len;
229}
230
231static __init int create_proc_sysinfo(void)
232{
233 create_proc_read_entry("sysinfo", 0444, NULL,
234 proc_read_sysinfo, NULL);
235 return 0;
236}
237device_initcall(create_proc_sysinfo);
238
239/*
240 * Service levels interface.
241 */
242
243static DECLARE_RWSEM(service_level_sem);
244static LIST_HEAD(service_level_list);
245
246int register_service_level(struct service_level *slr)
247{
248 struct service_level *ptr;
249
250 down_write(&service_level_sem);
251 list_for_each_entry(ptr, &service_level_list, list)
252 if (ptr == slr) {
253 up_write(&service_level_sem);
254 return -EEXIST;
255 }
256 list_add_tail(&slr->list, &service_level_list);
257 up_write(&service_level_sem);
258 return 0;
259}
260EXPORT_SYMBOL(register_service_level);
261
262int unregister_service_level(struct service_level *slr)
263{
264 struct service_level *ptr, *next;
265 int rc = -ENOENT;
266
267 down_write(&service_level_sem);
268 list_for_each_entry_safe(ptr, next, &service_level_list, list) {
269 if (ptr != slr)
270 continue;
271 list_del(&ptr->list);
272 rc = 0;
273 break;
274 }
275 up_write(&service_level_sem);
276 return rc;
277}
278EXPORT_SYMBOL(unregister_service_level);
279
280static void *service_level_start(struct seq_file *m, loff_t *pos)
281{
282 down_read(&service_level_sem);
283 return seq_list_start(&service_level_list, *pos);
284}
285
286static void *service_level_next(struct seq_file *m, void *p, loff_t *pos)
287{
288 return seq_list_next(p, &service_level_list, pos);
289}
290
291static void service_level_stop(struct seq_file *m, void *p)
292{
293 up_read(&service_level_sem);
294}
295
296static int service_level_show(struct seq_file *m, void *p)
297{
298 struct service_level *slr;
299
300 slr = list_entry(p, struct service_level, list);
301 slr->seq_print(m, slr);
302 return 0;
303}
304
305static const struct seq_operations service_level_seq_ops = {
306 .start = service_level_start,
307 .next = service_level_next,
308 .stop = service_level_stop,
309 .show = service_level_show
310};
311
312static int service_level_open(struct inode *inode, struct file *file)
313{
314 return seq_open(file, &service_level_seq_ops);
315}
316
317static const struct file_operations service_level_ops = {
318 .open = service_level_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = seq_release
322};
323
324static void service_level_vm_print(struct seq_file *m,
325 struct service_level *slr)
326{
327 char *query_buffer, *str;
328
329 query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA);
330 if (!query_buffer)
331 return;
332 cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL);
333 str = strchr(query_buffer, '\n');
334 if (str)
335 *str = 0;
336 seq_printf(m, "VM: %s\n", query_buffer);
337 kfree(query_buffer);
338}
339
340static struct service_level service_level_vm = {
341 .seq_print = service_level_vm_print
342};
343
344static __init int create_proc_service_level(void)
345{
346 proc_create("service_levels", 0, NULL, &service_level_ops);
347 if (MACHINE_IS_VM)
348 register_service_level(&service_level_vm);
349 return 0;
350}
351subsys_initcall(create_proc_service_level);
352
353/*
354 * Bogomips calculation based on cpu capability.
355 */
356int get_cpu_capability(unsigned int *capability)
357{
358 struct sysinfo_1_2_2 *info;
359 int rc;
360
361 info = (void *) get_zeroed_page(GFP_KERNEL);
362 if (!info)
363 return -ENOMEM;
364 rc = stsi(info, 1, 2, 2);
365 if (rc == -ENOSYS)
366 goto out;
367 rc = 0;
368 *capability = info->capability;
369out:
370 free_page((unsigned long) info);
371 return rc;
372}
373
374/*
375 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
376 */
377void s390_adjust_jiffies(void)
378{
379 struct sysinfo_1_2_2 *info;
380 const unsigned int fmil = 0x4b189680; /* 1e7 as 32-bit float. */
381 FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
382 FP_DECL_EX;
383 unsigned int capability;
384
385 info = (void *) get_zeroed_page(GFP_KERNEL);
386 if (!info)
387 return;
388
389 if (stsi(info, 1, 2, 2) != -ENOSYS) {
390 /*
391 * Major sigh. The cpu capability encoding is "special".
392 * If the first 9 bits of info->capability are 0 then it
393 * is a 32 bit unsigned integer in the range 0 .. 2^23.
394 * If the first 9 bits are != 0 then it is a 32 bit float.
395 * In addition a lower value indicates a proportionally
396 * higher cpu capacity. Bogomips are the other way round.
397 * To get to a halfway suitable number we divide 1e7
398 * by the cpu capability number. Yes, that means a floating
399 * point division .. math-emu here we come :-)
400 */
401 FP_UNPACK_SP(SA, &fmil);
402 if ((info->capability >> 23) == 0)
403 FP_FROM_INT_S(SB, info->capability, 32, int);
404 else
405 FP_UNPACK_SP(SB, &info->capability);
406 FP_DIV_S(SR, SA, SB);
407 FP_TO_INT_S(capability, SR, 32, 0);
408 } else
409 /*
410 * Really old machine without stsi block for basic
411 * cpu information. Report 42.0 bogomips.
412 */
413 capability = 42;
414 loops_per_jiffy = capability * (500000/HZ);
415 free_page((unsigned long) info);
416}
417
418/*
419 * calibrate the delay loop
420 */
421void __cpuinit calibrate_delay(void)
422{
423 s390_adjust_jiffies();
424 /* Print the good old Bogomips line .. */
425 printk(KERN_DEBUG "Calibrating delay loop (skipped)... "
426 "%lu.%02lu BogoMIPS preset\n", loops_per_jiffy/(500000/HZ),
427 (loops_per_jiffy/(5000/HZ)) % 100);
428}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index fc468cae4460..f72d41068dc2 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -331,6 +331,7 @@ static unsigned long long adjust_time(unsigned long long old,
331} 331}
332 332
333static DEFINE_PER_CPU(atomic_t, clock_sync_word); 333static DEFINE_PER_CPU(atomic_t, clock_sync_word);
334static DEFINE_MUTEX(clock_sync_mutex);
334static unsigned long clock_sync_flags; 335static unsigned long clock_sync_flags;
335 336
336#define CLOCK_SYNC_HAS_ETR 0 337#define CLOCK_SYNC_HAS_ETR 0
@@ -394,6 +395,20 @@ static void enable_sync_clock(void)
394 atomic_set_mask(0x80000000, sw_ptr); 395 atomic_set_mask(0x80000000, sw_ptr);
395} 396}
396 397
398/*
399 * Function to check if the clock is in sync.
400 */
401static inline int check_sync_clock(void)
402{
403 atomic_t *sw_ptr;
404 int rc;
405
406 sw_ptr = &get_cpu_var(clock_sync_word);
407 rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
408 put_cpu_var(clock_sync_sync);
409 return rc;
410}
411
397/* Single threaded workqueue used for etr and stp sync events */ 412/* Single threaded workqueue used for etr and stp sync events */
398static struct workqueue_struct *time_sync_wq; 413static struct workqueue_struct *time_sync_wq;
399 414
@@ -485,6 +500,8 @@ static void etr_reset(void)
485 if (etr_setr(&etr_eacr) == 0) { 500 if (etr_setr(&etr_eacr) == 0) {
486 etr_tolec = get_clock(); 501 etr_tolec = get_clock();
487 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); 502 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
503 if (etr_port0_online && etr_port1_online)
504 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
488 } else if (etr_port0_online || etr_port1_online) { 505 } else if (etr_port0_online || etr_port1_online) {
489 pr_warning("The real or virtual hardware system does " 506 pr_warning("The real or virtual hardware system does "
490 "not provide an ETR interface\n"); 507 "not provide an ETR interface\n");
@@ -533,8 +550,7 @@ void etr_switch_to_local(void)
533{ 550{
534 if (!etr_eacr.sl) 551 if (!etr_eacr.sl)
535 return; 552 return;
536 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 553 disable_sync_clock(NULL);
537 disable_sync_clock(NULL);
538 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 554 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
539 queue_work(time_sync_wq, &etr_work); 555 queue_work(time_sync_wq, &etr_work);
540} 556}
@@ -549,8 +565,7 @@ void etr_sync_check(void)
549{ 565{
550 if (!etr_eacr.es) 566 if (!etr_eacr.es)
551 return; 567 return;
552 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 568 disable_sync_clock(NULL);
553 disable_sync_clock(NULL);
554 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 569 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
555 queue_work(time_sync_wq, &etr_work); 570 queue_work(time_sync_wq, &etr_work);
556} 571}
@@ -914,7 +929,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
914 * Do not try to get the alternate port aib if the clock 929 * Do not try to get the alternate port aib if the clock
915 * is not in sync yet. 930 * is not in sync yet.
916 */ 931 */
917 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es) 932 if (!check_sync_clock())
918 return eacr; 933 return eacr;
919 934
920 /* 935 /*
@@ -997,7 +1012,6 @@ static void etr_work_fn(struct work_struct *work)
997 on_each_cpu(disable_sync_clock, NULL, 1); 1012 on_each_cpu(disable_sync_clock, NULL, 1);
998 del_timer_sync(&etr_timer); 1013 del_timer_sync(&etr_timer);
999 etr_update_eacr(eacr); 1014 etr_update_eacr(eacr);
1000 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1001 goto out_unlock; 1015 goto out_unlock;
1002 } 1016 }
1003 1017
@@ -1071,18 +1085,13 @@ static void etr_work_fn(struct work_struct *work)
1071 /* Both ports not usable. */ 1085 /* Both ports not usable. */
1072 eacr.es = eacr.sl = 0; 1086 eacr.es = eacr.sl = 0;
1073 sync_port = -1; 1087 sync_port = -1;
1074 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1075 } 1088 }
1076 1089
1077 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1078 eacr.es = 0;
1079
1080 /* 1090 /*
1081 * If the clock is in sync just update the eacr and return. 1091 * If the clock is in sync just update the eacr and return.
1082 * If there is no valid sync port wait for a port update. 1092 * If there is no valid sync port wait for a port update.
1083 */ 1093 */
1084 if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) || 1094 if (check_sync_clock() || sync_port < 0) {
1085 eacr.es || sync_port < 0) {
1086 etr_update_eacr(eacr); 1095 etr_update_eacr(eacr);
1087 etr_set_tolec_timeout(now); 1096 etr_set_tolec_timeout(now);
1088 goto out_unlock; 1097 goto out_unlock;
@@ -1103,13 +1112,11 @@ static void etr_work_fn(struct work_struct *work)
1103 * and set up a timer to try again after 0.5 seconds 1112 * and set up a timer to try again after 0.5 seconds
1104 */ 1113 */
1105 etr_update_eacr(eacr); 1114 etr_update_eacr(eacr);
1106 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1107 if (now < etr_tolec + (1600000 << 12) || 1115 if (now < etr_tolec + (1600000 << 12) ||
1108 etr_sync_clock_stop(&aib, sync_port) != 0) { 1116 etr_sync_clock_stop(&aib, sync_port) != 0) {
1109 /* Sync failed. Try again in 1/2 second. */ 1117 /* Sync failed. Try again in 1/2 second. */
1110 eacr.es = 0; 1118 eacr.es = 0;
1111 etr_update_eacr(eacr); 1119 etr_update_eacr(eacr);
1112 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1113 etr_set_sync_timeout(); 1120 etr_set_sync_timeout();
1114 } else 1121 } else
1115 etr_set_tolec_timeout(now); 1122 etr_set_tolec_timeout(now);
@@ -1191,19 +1198,30 @@ static ssize_t etr_online_store(struct sys_device *dev,
1191 return -EINVAL; 1198 return -EINVAL;
1192 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) 1199 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
1193 return -EOPNOTSUPP; 1200 return -EOPNOTSUPP;
1201 mutex_lock(&clock_sync_mutex);
1194 if (dev == &etr_port0_dev) { 1202 if (dev == &etr_port0_dev) {
1195 if (etr_port0_online == value) 1203 if (etr_port0_online == value)
1196 return count; /* Nothing to do. */ 1204 goto out; /* Nothing to do. */
1197 etr_port0_online = value; 1205 etr_port0_online = value;
1206 if (etr_port0_online && etr_port1_online)
1207 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1208 else
1209 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1198 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 1210 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1199 queue_work(time_sync_wq, &etr_work); 1211 queue_work(time_sync_wq, &etr_work);
1200 } else { 1212 } else {
1201 if (etr_port1_online == value) 1213 if (etr_port1_online == value)
1202 return count; /* Nothing to do. */ 1214 goto out; /* Nothing to do. */
1203 etr_port1_online = value; 1215 etr_port1_online = value;
1216 if (etr_port0_online && etr_port1_online)
1217 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1218 else
1219 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1204 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 1220 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1205 queue_work(time_sync_wq, &etr_work); 1221 queue_work(time_sync_wq, &etr_work);
1206 } 1222 }
1223out:
1224 mutex_unlock(&clock_sync_mutex);
1207 return count; 1225 return count;
1208} 1226}
1209 1227
@@ -1471,8 +1489,6 @@ static void stp_timing_alert(struct stp_irq_parm *intparm)
1471 */ 1489 */
1472void stp_sync_check(void) 1490void stp_sync_check(void)
1473{ 1491{
1474 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1475 return;
1476 disable_sync_clock(NULL); 1492 disable_sync_clock(NULL);
1477 queue_work(time_sync_wq, &stp_work); 1493 queue_work(time_sync_wq, &stp_work);
1478} 1494}
@@ -1485,8 +1501,6 @@ void stp_sync_check(void)
1485 */ 1501 */
1486void stp_island_check(void) 1502void stp_island_check(void)
1487{ 1503{
1488 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1489 return;
1490 disable_sync_clock(NULL); 1504 disable_sync_clock(NULL);
1491 queue_work(time_sync_wq, &stp_work); 1505 queue_work(time_sync_wq, &stp_work);
1492} 1506}
@@ -1513,10 +1527,6 @@ static int stp_sync_clock(void *data)
1513 1527
1514 enable_sync_clock(); 1528 enable_sync_clock();
1515 1529
1516 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1517 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1518 queue_work(time_sync_wq, &etr_work);
1519
1520 rc = 0; 1530 rc = 0;
1521 if (stp_info.todoff[0] || stp_info.todoff[1] || 1531 if (stp_info.todoff[0] || stp_info.todoff[1] ||
1522 stp_info.todoff[2] || stp_info.todoff[3] || 1532 stp_info.todoff[2] || stp_info.todoff[3] ||
@@ -1535,9 +1545,6 @@ static int stp_sync_clock(void *data)
1535 if (rc) { 1545 if (rc) {
1536 disable_sync_clock(NULL); 1546 disable_sync_clock(NULL);
1537 stp_sync->in_sync = -EAGAIN; 1547 stp_sync->in_sync = -EAGAIN;
1538 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1539 if (etr_port0_online || etr_port1_online)
1540 queue_work(time_sync_wq, &etr_work);
1541 } else 1548 } else
1542 stp_sync->in_sync = 1; 1549 stp_sync->in_sync = 1;
1543 xchg(&first, 0); 1550 xchg(&first, 0);
@@ -1569,6 +1576,10 @@ static void stp_work_fn(struct work_struct *work)
1569 if (rc || stp_info.c == 0) 1576 if (rc || stp_info.c == 0)
1570 goto out_unlock; 1577 goto out_unlock;
1571 1578
1579 /* Skip synchronization if the clock is already in sync. */
1580 if (check_sync_clock())
1581 goto out_unlock;
1582
1572 memset(&stp_sync, 0, sizeof(stp_sync)); 1583 memset(&stp_sync, 0, sizeof(stp_sync));
1573 get_online_cpus(); 1584 get_online_cpus();
1574 atomic_set(&stp_sync.cpus, num_online_cpus() - 1); 1585 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
@@ -1684,8 +1695,14 @@ static ssize_t stp_online_store(struct sysdev_class *class,
1684 return -EINVAL; 1695 return -EINVAL;
1685 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 1696 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1686 return -EOPNOTSUPP; 1697 return -EOPNOTSUPP;
1698 mutex_lock(&clock_sync_mutex);
1687 stp_online = value; 1699 stp_online = value;
1700 if (stp_online)
1701 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1702 else
1703 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1688 queue_work(time_sync_wq, &stp_work); 1704 queue_work(time_sync_wq, &stp_work);
1705 mutex_unlock(&clock_sync_mutex);
1689 return count; 1706 return count;
1690} 1707}
1691 1708
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index cc362c9ea8f1..3c72c9cf22b6 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -74,7 +74,7 @@ static DEFINE_SPINLOCK(topology_lock);
74 74
75cpumask_t cpu_core_map[NR_CPUS]; 75cpumask_t cpu_core_map[NR_CPUS];
76 76
77cpumask_t cpu_coregroup_map(unsigned int cpu) 77static cpumask_t cpu_coregroup_map(unsigned int cpu)
78{ 78{
79 struct core_info *core = &core_info; 79 struct core_info *core = &core_info;
80 unsigned long flags; 80 unsigned long flags;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4584d81984c0..c2e42cc65ce7 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -61,9 +61,11 @@ extern pgm_check_handler_t do_asce_exception;
61#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 61#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
62 62
63#ifndef CONFIG_64BIT 63#ifndef CONFIG_64BIT
64#define LONG "%08lx "
64#define FOURLONG "%08lx %08lx %08lx %08lx\n" 65#define FOURLONG "%08lx %08lx %08lx %08lx\n"
65static int kstack_depth_to_print = 12; 66static int kstack_depth_to_print = 12;
66#else /* CONFIG_64BIT */ 67#else /* CONFIG_64BIT */
68#define LONG "%016lx "
67#define FOURLONG "%016lx %016lx %016lx %016lx\n" 69#define FOURLONG "%016lx %016lx %016lx %016lx\n"
68static int kstack_depth_to_print = 20; 70static int kstack_depth_to_print = 20;
69#endif /* CONFIG_64BIT */ 71#endif /* CONFIG_64BIT */
@@ -155,7 +157,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
155 break; 157 break;
156 if (i && ((i * sizeof (long) % 32) == 0)) 158 if (i && ((i * sizeof (long) % 32) == 0))
157 printk("\n "); 159 printk("\n ");
158 printk("%p ", (void *)*stack++); 160 printk(LONG, *stack++);
159 } 161 }
160 printk("\n"); 162 printk("\n");
161 show_trace(task, sp); 163 show_trace(task, sp);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 690e17819686..89b2e7f1b7a9 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -144,7 +144,6 @@ out:
144 return -ENOMEM; 144 return -ENOMEM;
145} 145}
146 146
147#ifdef CONFIG_HOTPLUG_CPU
148void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore) 147void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
149{ 148{
150 unsigned long segment_table, page_table, page_frame; 149 unsigned long segment_table, page_table, page_frame;
@@ -163,7 +162,6 @@ void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
163 free_page(page_table); 162 free_page(page_table);
164 free_pages(segment_table, SEGMENT_ORDER); 163 free_pages(segment_table, SEGMENT_ORDER);
165} 164}
166#endif /* CONFIG_HOTPLUG_CPU */
167 165
168static void __vdso_init_cr5(void *dummy) 166static void __vdso_init_cr5(void *dummy)
169{ 167{
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index d796d05c9c01..7a2063eb88f0 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -108,6 +108,8 @@ SECTIONS
108 EXIT_TEXT 108 EXIT_TEXT
109 } 109 }
110 110
111 /* early.c uses stsi, which requires page aligned data. */
112 . = ALIGN(PAGE_SIZE);
111 .init.data : { 113 .init.data : {
112 INIT_DATA 114 INIT_DATA
113 } 115 }