aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile9
-rw-r--r--arch/s390/kernel/binfmt_elf32.c214
-rw-r--r--arch/s390/kernel/compat_ptrace.h4
-rw-r--r--arch/s390/kernel/debug.c9
-rw-r--r--arch/s390/kernel/early.c211
-rw-r--r--arch/s390/kernel/ipl.c462
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/mem_detect.c100
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/ptrace.c363
-rw-r--r--arch/s390/kernel/setup.c51
-rw-r--r--arch/s390/kernel/time.c634
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/vtime.c81
15 files changed, 1410 insertions, 767 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 6302f5082588..50f657e77344 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -7,9 +7,14 @@
7# 7#
8CFLAGS_smp.o := -Wno-nonnull 8CFLAGS_smp.o := -Wno-nonnull
9 9
10#
11# Pass UTS_MACHINE for user_regset definition
12#
13CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
14
10obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 15obj-y := bitmap.o traps.o time.o process.o base.o early.o \
11 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 16 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
12 s390_ext.o debug.o irq.o ipl.o dis.o diag.o 17 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
13 18
14obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 19obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
15obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 20obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o
23compat-obj-$(CONFIG_AUDIT) += compat_audit.o 28compat-obj-$(CONFIG_AUDIT) += compat_audit.o
24obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ 29obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
25 compat_wrapper.o compat_exec_domain.o \ 30 compat_wrapper.o compat_exec_domain.o \
26 binfmt_elf32.o $(compat-obj-y) 31 $(compat-obj-y)
27 32
28obj-$(CONFIG_VIRT_TIMER) += vtime.o 33obj-$(CONFIG_VIRT_TIMER) += vtime.o
29obj-$(CONFIG_STACKTRACE) += stacktrace.o 34obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
deleted file mode 100644
index 3e1c315b736d..000000000000
--- a/arch/s390/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,214 +0,0 @@
1/*
2 * Support for 32-bit Linux for S390 ELF binaries.
3 *
4 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Gerhard Tonn (ton@de.ibm.com)
6 *
7 * Heavily inspired by the 32-bit Sparc compat code which is
8 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
10 */
11
12#define __ASMS390_ELF_H
13
14#include <linux/time.h>
15
16/*
17 * These are used to set parameters in the core dumps.
18 */
19#define ELF_CLASS ELFCLASS32
20#define ELF_DATA ELFDATA2MSB
21#define ELF_ARCH EM_S390
22
23/*
24 * This is used to ensure we don't load something for the wrong architecture.
25 */
26#define elf_check_arch(x) \
27 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
28 && (x)->e_ident[EI_CLASS] == ELF_CLASS)
29
30/* ELF register definitions */
31#define NUM_GPRS 16
32#define NUM_FPRS 16
33#define NUM_ACRS 16
34
35/* For SVR4/S390 the function pointer to be registered with `atexit` is
36 passed in R14. */
37#define ELF_PLAT_INIT(_r, load_addr) \
38 do { \
39 _r->gprs[14] = 0; \
40 } while(0)
41
42#define USE_ELF_CORE_DUMP
43#define ELF_EXEC_PAGESIZE 4096
44
45/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
46 use of this is to invoke "./ld.so someprog" to test out a new version of
47 the loader. We need to make sure that it is out of the way of the program
48 that it will "exec", and that there is sufficient room for the brk. */
49
50#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
51
52/* Wow, the "main" arch needs arch dependent functions too.. :) */
53
54/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
55 now struct_user_regs, they are different) */
56
57#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
58
59#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
60
61#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
62
63/* This yields a mask that user programs can use to figure out what
64 instruction set this CPU supports. */
65
66#define ELF_HWCAP (0)
67
68/* This yields a string that ld.so will use to load implementation
69 specific libraries for optimization. This is more specific in
70 intent than poking at uname or /proc/cpuinfo.
71
72 For the moment, we have only optimizations for the Intel generations,
73 but that could change... */
74
75#define ELF_PLATFORM (NULL)
76
77#define SET_PERSONALITY(ex, ibcs2) \
78do { \
79 if (ibcs2) \
80 set_personality(PER_SVR4); \
81 else if (current->personality != PER_LINUX32) \
82 set_personality(PER_LINUX); \
83 set_thread_flag(TIF_31BIT); \
84} while (0)
85
86#include "compat_linux.h"
87
88typedef _s390_fp_regs32 elf_fpregset_t;
89
90typedef struct
91{
92
93 _psw_t32 psw;
94 __u32 gprs[__NUM_GPRS];
95 __u32 acrs[__NUM_ACRS];
96 __u32 orig_gpr2;
97} s390_regs32;
98typedef s390_regs32 elf_gregset_t;
99
100static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
101{
102 int i;
103
104 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
105 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
106 for (i = 0; i < NUM_GPRS; i++)
107 regs->gprs[i] = ptregs->gprs[i];
108 save_access_regs(regs->acrs);
109 regs->orig_gpr2 = ptregs->orig_gpr2;
110 return 1;
111}
112
113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
114{
115 struct pt_regs *ptregs = task_pt_regs(tsk);
116 int i;
117
118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
119 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
120 for (i = 0; i < NUM_GPRS; i++)
121 regs->gprs[i] = ptregs->gprs[i];
122 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
123 regs->orig_gpr2 = ptregs->orig_gpr2;
124 return 1;
125}
126
127static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
128{
129 if (tsk == current)
130 save_fp_regs((s390_fp_regs *) fpregs);
131 else
132 memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
133 return 1;
134}
135
136#include <asm/processor.h>
137#include <asm/pgalloc.h>
138#include <linux/module.h>
139#include <linux/elfcore.h>
140#include <linux/binfmts.h>
141#include <linux/compat.h>
142
143#define elf_prstatus elf_prstatus32
144struct elf_prstatus32
145{
146 struct elf_siginfo pr_info; /* Info associated with signal */
147 short pr_cursig; /* Current signal */
148 u32 pr_sigpend; /* Set of pending signals */
149 u32 pr_sighold; /* Set of held signals */
150 pid_t pr_pid;
151 pid_t pr_ppid;
152 pid_t pr_pgrp;
153 pid_t pr_sid;
154 struct compat_timeval pr_utime; /* User time */
155 struct compat_timeval pr_stime; /* System time */
156 struct compat_timeval pr_cutime; /* Cumulative user time */
157 struct compat_timeval pr_cstime; /* Cumulative system time */
158 elf_gregset_t pr_reg; /* GP registers */
159 int pr_fpvalid; /* True if math co-processor being used. */
160};
161
162#define elf_prpsinfo elf_prpsinfo32
163struct elf_prpsinfo32
164{
165 char pr_state; /* numeric process state */
166 char pr_sname; /* char for pr_state */
167 char pr_zomb; /* zombie */
168 char pr_nice; /* nice val */
169 u32 pr_flag; /* flags */
170 u16 pr_uid;
171 u16 pr_gid;
172 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
173 /* Lots missing */
174 char pr_fname[16]; /* filename of executable */
175 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
176};
177
178#include <linux/highuid.h>
179
180/*
181#define init_elf_binfmt init_elf32_binfmt
182*/
183
184#undef start_thread
185#define start_thread start_thread31
186
187static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw,
188 unsigned long new_stackp)
189{
190 set_fs(USER_DS);
191 regs->psw.mask = psw_user32_bits;
192 regs->psw.addr = new_psw;
193 regs->gprs[15] = new_stackp;
194 crst_table_downgrade(current->mm, 1UL << 31);
195}
196
197MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
198 " Copyright 2000 IBM Corporation");
199MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
200
201#undef MODULE_DESCRIPTION
202#undef MODULE_AUTHOR
203
204#undef cputime_to_timeval
205#define cputime_to_timeval cputime_to_compat_timeval
206static inline void
207cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
208{
209 value->tv_usec = cputime % 1000000;
210 value->tv_sec = cputime / 1000000;
211}
212
213#include "../../../fs/binfmt_elf.c"
214
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 419aef913ee1..cde81fa64f89 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -1,7 +1,7 @@
1#ifndef _PTRACE32_H 1#ifndef _PTRACE32_H
2#define _PTRACE32_H 2#define _PTRACE32_H
3 3
4#include "compat_linux.h" /* needed for _psw_t32 */ 4#include "compat_linux.h" /* needed for psw_compat_t */
5 5
6typedef struct { 6typedef struct {
7 __u32 cr[3]; 7 __u32 cr[3];
@@ -38,7 +38,7 @@ typedef struct {
38 38
39struct user_regs_struct32 39struct user_regs_struct32
40{ 40{
41 _psw_t32 psw; 41 psw_compat_t psw;
42 u32 gprs[NUM_GPRS]; 42 u32 gprs[NUM_GPRS];
43 u32 acrs[NUM_ACRS]; 43 u32 acrs[NUM_ACRS];
44 u32 orig_gpr2; 44 u32 orig_gpr2;
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c93d1296cc0a..d80fcd4a7fe1 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1079,7 +1079,6 @@ __init debug_init(void)
1079 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); 1079 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
1080 mutex_lock(&debug_mutex); 1080 mutex_lock(&debug_mutex);
1081 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); 1081 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
1082 printk(KERN_INFO "debug: Initialization complete\n");
1083 initialized = 1; 1082 initialized = 1;
1084 mutex_unlock(&debug_mutex); 1083 mutex_unlock(&debug_mutex);
1085 1084
@@ -1193,7 +1192,6 @@ debug_get_uint(char *buf)
1193 for(; isspace(*buf); buf++); 1192 for(; isspace(*buf); buf++);
1194 rc = simple_strtoul(buf, &buf, 10); 1193 rc = simple_strtoul(buf, &buf, 10);
1195 if(*buf){ 1194 if(*buf){
1196 printk("debug: no integer specified!\n");
1197 rc = -EINVAL; 1195 rc = -EINVAL;
1198 } 1196 }
1199 return rc; 1197 return rc;
@@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area)
1340 memset(id->areas[i][j], 0, PAGE_SIZE); 1338 memset(id->areas[i][j], 0, PAGE_SIZE);
1341 } 1339 }
1342 } 1340 }
1343 printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
1344 } else if(area >= 0 && area < id->nr_areas) { 1341 } else if(area >= 0 && area < id->nr_areas) {
1345 id->active_entries[area] = 0; 1342 id->active_entries[area] = 0;
1346 id->active_pages[area] = 0; 1343 id->active_pages[area] = 0;
1347 for(i = 0; i < id->pages_per_area; i++) { 1344 for(i = 0; i < id->pages_per_area; i++) {
1348 memset(id->areas[area][i],0,PAGE_SIZE); 1345 memset(id->areas[area][i],0,PAGE_SIZE);
1349 } 1346 }
1350 printk(KERN_INFO "debug: %s: area %i has been flushed\n",
1351 id->name, area);
1352 } else {
1353 printk(KERN_INFO
1354 "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
1355 id->name, area, 0, id->nr_areas-1);
1356 } 1347 }
1357 spin_unlock_irqrestore(&id->lock,flags); 1348 spin_unlock_irqrestore(&id->lock,flags);
1358} 1349}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d0e09684b9ce..2a2ca268b1dd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/pfn.h> 15#include <linux/pfn.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <asm/ebcdic.h>
17#include <asm/ipl.h> 18#include <asm/ipl.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19#include <asm/processor.h> 20#include <asm/processor.h>
@@ -26,12 +27,40 @@
26/* 27/*
27 * Create a Kernel NSS if the SAVESYS= parameter is defined 28 * Create a Kernel NSS if the SAVESYS= parameter is defined
28 */ 29 */
29#define DEFSYS_CMD_SIZE 96 30#define DEFSYS_CMD_SIZE 128
30#define SAVESYS_CMD_SIZE 32 31#define SAVESYS_CMD_SIZE 32
31 32
32char kernel_nss_name[NSS_NAME_SIZE + 1]; 33char kernel_nss_name[NSS_NAME_SIZE + 1];
33 34
35static void __init setup_boot_command_line(void);
36
37
34#ifdef CONFIG_SHARED_KERNEL 38#ifdef CONFIG_SHARED_KERNEL
39int __init savesys_ipl_nss(char *cmd, const int cmdlen);
40
41asm(
42 " .section .init.text,\"ax\",@progbits\n"
43 " .align 4\n"
44 " .type savesys_ipl_nss, @function\n"
45 "savesys_ipl_nss:\n"
46#ifdef CONFIG_64BIT
47 " stmg 6,15,48(15)\n"
48 " lgr 14,3\n"
49 " sam31\n"
50 " diag 2,14,0x8\n"
51 " sam64\n"
52 " lgr 2,14\n"
53 " lmg 6,15,48(15)\n"
54#else
55 " stm 6,15,24(15)\n"
56 " lr 14,3\n"
57 " diag 2,14,0x8\n"
58 " lr 2,14\n"
59 " lm 6,15,24(15)\n"
60#endif
61 " br 14\n"
62 " .size savesys_ipl_nss, .-savesys_ipl_nss\n");
63
35static noinline __init void create_kernel_nss(void) 64static noinline __init void create_kernel_nss(void)
36{ 65{
37 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; 66 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void)
39 unsigned int sinitrd_pfn, einitrd_pfn; 68 unsigned int sinitrd_pfn, einitrd_pfn;
40#endif 69#endif
41 int response; 70 int response;
71 size_t len;
42 char *savesys_ptr; 72 char *savesys_ptr;
43 char upper_command_line[COMMAND_LINE_SIZE]; 73 char upper_command_line[COMMAND_LINE_SIZE];
44 char defsys_cmd[DEFSYS_CMD_SIZE]; 74 char defsys_cmd[DEFSYS_CMD_SIZE];
@@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void)
49 return; 79 return;
50 80
51 /* Convert COMMAND_LINE to upper case */ 81 /* Convert COMMAND_LINE to upper case */
52 for (i = 0; i < strlen(COMMAND_LINE); i++) 82 for (i = 0; i < strlen(boot_command_line); i++)
53 upper_command_line[i] = toupper(COMMAND_LINE[i]); 83 upper_command_line[i] = toupper(boot_command_line[i]);
54 84
55 savesys_ptr = strstr(upper_command_line, "SAVESYS="); 85 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
56 86
@@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void)
83 } 113 }
84#endif 114#endif
85 115
86 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); 116 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
117 defsys_cmd, min_size);
87 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", 118 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
88 kernel_nss_name, kernel_nss_name); 119 kernel_nss_name, kernel_nss_name);
89 120
@@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void)
94 return; 125 return;
95 } 126 }
96 127
97 __cpcmd(savesys_cmd, NULL, 0, &response); 128 len = strlen(savesys_cmd);
129 ASCEBC(savesys_cmd, len);
130 response = savesys_ipl_nss(savesys_cmd, len);
98 131
99 if (response != strlen(savesys_cmd)) { 132 /* On success: response is equal to the command size,
133 * max SAVESYS_CMD_SIZE
134 * On error: response contains the numeric portion of cp error message.
135 * for SAVESYS it will be >= 263
136 */
137 if (response > SAVESYS_CMD_SIZE) {
100 kernel_nss_name[0] = '\0'; 138 kernel_nss_name[0] = '\0';
101 return; 139 return;
102 } 140 }
103 141
142 /* re-setup boot command line with new ipl vm parms */
143 ipl_update_parameters();
144 setup_boot_command_line();
145
104 ipl_flags = IPL_NSS_VALID; 146 ipl_flags = IPL_NSS_VALID;
105} 147}
106 148
@@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void)
141 if (cpuinfo->cpu_id.version == 0xff) 183 if (cpuinfo->cpu_id.version == 0xff)
142 machine_flags |= MACHINE_FLAG_VM; 184 machine_flags |= MACHINE_FLAG_VM;
143 185
144 /* Running on a P/390 ? */
145 if (cpuinfo->cpu_id.machine == 0x7490)
146 machine_flags |= MACHINE_FLAG_P390;
147
148 /* Running under KVM ? */ 186 /* Running under KVM ? */
149 if (cpuinfo->cpu_id.version == 0xfe) 187 if (cpuinfo->cpu_id.version == 0xfe)
150 machine_flags |= MACHINE_FLAG_KVM; 188 machine_flags |= MACHINE_FLAG_KVM;
151} 189}
152 190
153#ifdef CONFIG_64BIT
154static noinline __init int memory_fast_detect(void)
155{
156 unsigned long val0 = 0;
157 unsigned long val1 = 0xc;
158 int ret = -ENOSYS;
159
160 if (ipl_flags & IPL_NSS_VALID)
161 return -ENOSYS;
162
163 asm volatile(
164 " diag %1,%2,0x260\n"
165 "0: lhi %0,0\n"
166 "1:\n"
167 EX_TABLE(0b,1b)
168 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
169
170 if (ret || val0 != val1)
171 return -ENOSYS;
172
173 memory_chunk[0].size = val0 + 1;
174 return 0;
175}
176#else
177static inline int memory_fast_detect(void)
178{
179 return -ENOSYS;
180}
181#endif
182
183static inline __init unsigned long __tprot(unsigned long addr)
184{
185 int cc = -1;
186
187 asm volatile(
188 " tprot 0(%1),0\n"
189 "0: ipm %0\n"
190 " srl %0,28\n"
191 "1:\n"
192 EX_TABLE(0b,1b)
193 : "+d" (cc) : "a" (addr) : "cc");
194 return (unsigned long)cc;
195}
196
197/* Checking memory in 128KB increments. */
198#define CHUNK_INCR (1UL << 17)
199#define ADDR2G (1UL << 31)
200
201static noinline __init void find_memory_chunks(unsigned long memsize)
202{
203 unsigned long addr = 0, old_addr = 0;
204 unsigned long old_cc = CHUNK_READ_WRITE;
205 unsigned long cc;
206 int chunk = 0;
207
208 while (chunk < MEMORY_CHUNKS) {
209 cc = __tprot(addr);
210 while (cc == old_cc) {
211 addr += CHUNK_INCR;
212 if (memsize && addr >= memsize)
213 break;
214#ifndef CONFIG_64BIT
215 if (addr == ADDR2G)
216 break;
217#endif
218 cc = __tprot(addr);
219 }
220
221 if (old_addr != addr &&
222 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
223 memory_chunk[chunk].addr = old_addr;
224 memory_chunk[chunk].size = addr - old_addr;
225 memory_chunk[chunk].type = old_cc;
226 chunk++;
227 }
228
229 old_addr = addr;
230 old_cc = cc;
231
232#ifndef CONFIG_64BIT
233 if (addr == ADDR2G)
234 break;
235#endif
236 /*
237 * Finish memory detection at the first hole
238 * if storage size is unknown.
239 */
240 if (cc == -1UL && !memsize)
241 break;
242 if (memsize && addr >= memsize)
243 break;
244 }
245}
246
247static __init void early_pgm_check_handler(void) 191static __init void early_pgm_check_handler(void)
248{ 192{
249 unsigned long addr; 193 unsigned long addr;
@@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void)
380#endif 324#endif
381} 325}
382 326
327static __init void rescue_initrd(void)
328{
329#ifdef CONFIG_BLK_DEV_INITRD
330 /*
331 * Move the initrd right behind the bss section in case it starts
332 * within the bss section. So we don't overwrite it when the bss
333 * section gets cleared.
334 */
335 if (!INITRD_START || !INITRD_SIZE)
336 return;
337 if (INITRD_START >= (unsigned long) __bss_stop)
338 return;
339 memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE);
340 INITRD_START = (unsigned long) __bss_stop;
341#endif
342}
343
344/* Set up boot command line */
345static void __init setup_boot_command_line(void)
346{
347 char *parm = NULL;
348
349 /* copy arch command line */
350 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
351 boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
352
353 /* append IPL PARM data to the boot command line */
354 if (MACHINE_IS_VM) {
355 parm = boot_command_line + strlen(boot_command_line);
356 *parm++ = ' ';
357 get_ipl_vmparm(parm);
358 if (parm[0] == '=')
359 memmove(boot_command_line, parm + 1, strlen(parm));
360 }
361}
362
363
383/* 364/*
384 * Save ipl parameters, clear bss memory, initialize storage keys 365 * Save ipl parameters, clear bss memory, initialize storage keys
385 * and create a kernel NSS at startup if the SAVESYS= parm is defined 366 * and create a kernel NSS at startup if the SAVESYS= parm is defined
386 */ 367 */
387void __init startup_init(void) 368void __init startup_init(void)
388{ 369{
389 unsigned long long memsize;
390
391 ipl_save_parameters(); 370 ipl_save_parameters();
371 rescue_initrd();
392 clear_bss_section(); 372 clear_bss_section();
393 init_kernel_storage_key(); 373 init_kernel_storage_key();
394 lockdep_init(); 374 lockdep_init();
395 lockdep_off(); 375 lockdep_off();
396 detect_machine_type();
397 create_kernel_nss();
398 sort_main_extable(); 376 sort_main_extable();
399 setup_lowcore_early(); 377 setup_lowcore_early();
378 detect_machine_type();
379 ipl_update_parameters();
380 setup_boot_command_line();
381 create_kernel_nss();
400 detect_mvpg(); 382 detect_mvpg();
401 detect_ieee(); 383 detect_ieee();
402 detect_csp(); 384 detect_csp();
@@ -404,18 +386,7 @@ void __init startup_init(void)
404 detect_diag44(); 386 detect_diag44();
405 detect_machine_facilities(); 387 detect_machine_facilities();
406 setup_hpage(); 388 setup_hpage();
407 sclp_read_info_early();
408 sclp_facilities_detect(); 389 sclp_facilities_detect();
409 memsize = sclp_memory_detect(); 390 detect_memory_layout(memory_chunk);
410#ifndef CONFIG_64BIT
411 /*
412 * Can't deal with more than 2G in 31 bit addressing mode, so
413 * limit the value in order to avoid strange side effects.
414 */
415 if (memsize > ADDR2G)
416 memsize = ADDR2G;
417#endif
418 if (memory_fast_detect() < 0)
419 find_memory_chunks((unsigned long) memsize);
420 lockdep_on(); 391 lockdep_on();
421} 392}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 532542447d66..54b2779b5e2f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h>
17#include <asm/ipl.h> 18#include <asm/ipl.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
@@ -22,6 +23,7 @@
22#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
23#include <asm/reset.h> 24#include <asm/reset.h>
24#include <asm/sclp.h> 25#include <asm/sclp.h>
26#include <asm/setup.h>
25 27
26#define IPL_PARM_BLOCK_VERSION 0 28#define IPL_PARM_BLOCK_VERSION 0
27 29
@@ -121,6 +123,7 @@ enum ipl_method {
121 REIPL_METHOD_FCP_RO_VM, 123 REIPL_METHOD_FCP_RO_VM,
122 REIPL_METHOD_FCP_DUMP, 124 REIPL_METHOD_FCP_DUMP,
123 REIPL_METHOD_NSS, 125 REIPL_METHOD_NSS,
126 REIPL_METHOD_NSS_DIAG,
124 REIPL_METHOD_DEFAULT, 127 REIPL_METHOD_DEFAULT,
125}; 128};
126 129
@@ -134,14 +137,15 @@ enum dump_method {
134 137
135static int diag308_set_works = 0; 138static int diag308_set_works = 0;
136 139
140static struct ipl_parameter_block ipl_block;
141
137static int reipl_capabilities = IPL_TYPE_UNKNOWN; 142static int reipl_capabilities = IPL_TYPE_UNKNOWN;
138 143
139static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 144static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
140static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; 145static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
141static struct ipl_parameter_block *reipl_block_fcp; 146static struct ipl_parameter_block *reipl_block_fcp;
142static struct ipl_parameter_block *reipl_block_ccw; 147static struct ipl_parameter_block *reipl_block_ccw;
143 148static struct ipl_parameter_block *reipl_block_nss;
144static char reipl_nss_name[NSS_NAME_SIZE + 1];
145 149
146static int dump_capabilities = DUMP_TYPE_NONE; 150static int dump_capabilities = DUMP_TYPE_NONE;
147static enum dump_type dump_type = DUMP_TYPE_NONE; 151static enum dump_type dump_type = DUMP_TYPE_NONE;
@@ -263,6 +267,56 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
263 267
264static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 268static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
265 269
270/* VM IPL PARM routines */
271static void reipl_get_ascii_vmparm(char *dest,
272 const struct ipl_parameter_block *ipb)
273{
274 int i;
275 int len = 0;
276 char has_lowercase = 0;
277
278 if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
279 (ipb->ipl_info.ccw.vm_parm_len > 0)) {
280
281 len = ipb->ipl_info.ccw.vm_parm_len;
282 memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
283 /* If at least one character is lowercase, we assume mixed
284 * case; otherwise we convert everything to lowercase.
285 */
286 for (i = 0; i < len; i++)
287 if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
288 (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
289 (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
290 has_lowercase = 1;
291 break;
292 }
293 if (!has_lowercase)
294 EBC_TOLOWER(dest, len);
295 EBCASC(dest, len);
296 }
297 dest[len] = 0;
298}
299
300void get_ipl_vmparm(char *dest)
301{
302 if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
303 reipl_get_ascii_vmparm(dest, &ipl_block);
304 else
305 dest[0] = 0;
306}
307
308static ssize_t ipl_vm_parm_show(struct kobject *kobj,
309 struct kobj_attribute *attr, char *page)
310{
311 char parm[DIAG308_VMPARM_SIZE + 1] = {};
312
313 get_ipl_vmparm(parm);
314 return sprintf(page, "%s\n", parm);
315}
316
317static struct kobj_attribute sys_ipl_vm_parm_attr =
318 __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
319
266static ssize_t sys_ipl_device_show(struct kobject *kobj, 320static ssize_t sys_ipl_device_show(struct kobject *kobj,
267 struct kobj_attribute *attr, char *page) 321 struct kobj_attribute *attr, char *page)
268{ 322{
@@ -285,14 +339,8 @@ static struct kobj_attribute sys_ipl_device_attr =
285static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, 339static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
286 char *buf, loff_t off, size_t count) 340 char *buf, loff_t off, size_t count)
287{ 341{
288 unsigned int size = IPL_PARMBLOCK_SIZE; 342 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
289 343 IPL_PARMBLOCK_SIZE);
290 if (off > size)
291 return 0;
292 if (off + count > size)
293 count = size - off;
294 memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
295 return count;
296} 344}
297 345
298static struct bin_attribute ipl_parameter_attr = { 346static struct bin_attribute ipl_parameter_attr = {
@@ -310,12 +358,7 @@ static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *att
310 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; 358 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
311 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; 359 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
312 360
313 if (off > size) 361 return memory_read_from_buffer(buf, count, &off, scp_data, size);
314 return 0;
315 if (off + count > size)
316 count = size - off;
317 memcpy(buf, scp_data + off, count);
318 return count;
319} 362}
320 363
321static struct bin_attribute ipl_scp_data_attr = { 364static struct bin_attribute ipl_scp_data_attr = {
@@ -370,15 +413,27 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
370static struct kobj_attribute sys_ipl_ccw_loadparm_attr = 413static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
371 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); 414 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
372 415
373static struct attribute *ipl_ccw_attrs[] = { 416static struct attribute *ipl_ccw_attrs_vm[] = {
374 &sys_ipl_type_attr.attr, 417 &sys_ipl_type_attr.attr,
375 &sys_ipl_device_attr.attr, 418 &sys_ipl_device_attr.attr,
376 &sys_ipl_ccw_loadparm_attr.attr, 419 &sys_ipl_ccw_loadparm_attr.attr,
420 &sys_ipl_vm_parm_attr.attr,
377 NULL, 421 NULL,
378}; 422};
379 423
380static struct attribute_group ipl_ccw_attr_group = { 424static struct attribute *ipl_ccw_attrs_lpar[] = {
381 .attrs = ipl_ccw_attrs, 425 &sys_ipl_type_attr.attr,
426 &sys_ipl_device_attr.attr,
427 &sys_ipl_ccw_loadparm_attr.attr,
428 NULL,
429};
430
431static struct attribute_group ipl_ccw_attr_group_vm = {
432 .attrs = ipl_ccw_attrs_vm,
433};
434
435static struct attribute_group ipl_ccw_attr_group_lpar = {
436 .attrs = ipl_ccw_attrs_lpar
382}; 437};
383 438
384/* NSS ipl device attributes */ 439/* NSS ipl device attributes */
@@ -388,6 +443,8 @@ DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
388static struct attribute *ipl_nss_attrs[] = { 443static struct attribute *ipl_nss_attrs[] = {
389 &sys_ipl_type_attr.attr, 444 &sys_ipl_type_attr.attr,
390 &sys_ipl_nss_name_attr.attr, 445 &sys_ipl_nss_name_attr.attr,
446 &sys_ipl_ccw_loadparm_attr.attr,
447 &sys_ipl_vm_parm_attr.attr,
391 NULL, 448 NULL,
392}; 449};
393 450
@@ -450,7 +507,12 @@ static int __init ipl_init(void)
450 } 507 }
451 switch (ipl_info.type) { 508 switch (ipl_info.type) {
452 case IPL_TYPE_CCW: 509 case IPL_TYPE_CCW:
453 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group); 510 if (MACHINE_IS_VM)
511 rc = sysfs_create_group(&ipl_kset->kobj,
512 &ipl_ccw_attr_group_vm);
513 else
514 rc = sysfs_create_group(&ipl_kset->kobj,
515 &ipl_ccw_attr_group_lpar);
454 break; 516 break;
455 case IPL_TYPE_FCP: 517 case IPL_TYPE_FCP:
456 case IPL_TYPE_FCP_DUMP: 518 case IPL_TYPE_FCP_DUMP:
@@ -481,6 +543,83 @@ static struct shutdown_action __refdata ipl_action = {
481 * reipl shutdown action: Reboot Linux on shutdown. 543 * reipl shutdown action: Reboot Linux on shutdown.
482 */ 544 */
483 545
546/* VM IPL PARM attributes */
547static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
548 char *page)
549{
550 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
551
552 reipl_get_ascii_vmparm(vmparm, ipb);
553 return sprintf(page, "%s\n", vmparm);
554}
555
556static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
557 size_t vmparm_max,
558 const char *buf, size_t len)
559{
560 int i, ip_len;
561
562 /* ignore trailing newline */
563 ip_len = len;
564 if ((len > 0) && (buf[len - 1] == '\n'))
565 ip_len--;
566
567 if (ip_len > vmparm_max)
568 return -EINVAL;
569
570 /* parm is used to store kernel options, check for common chars */
571 for (i = 0; i < ip_len; i++)
572 if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
573 return -EINVAL;
574
575 memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
576 ipb->ipl_info.ccw.vm_parm_len = ip_len;
577 if (ip_len > 0) {
578 ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
579 memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
580 ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
581 } else {
582 ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
583 }
584
585 return len;
586}
587
588/* NSS wrapper */
589static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
590 struct kobj_attribute *attr, char *page)
591{
592 return reipl_generic_vmparm_show(reipl_block_nss, page);
593}
594
595static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
596 struct kobj_attribute *attr,
597 const char *buf, size_t len)
598{
599 return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
600}
601
602/* CCW wrapper */
603static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
604 struct kobj_attribute *attr, char *page)
605{
606 return reipl_generic_vmparm_show(reipl_block_ccw, page);
607}
608
609static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
610 struct kobj_attribute *attr,
611 const char *buf, size_t len)
612{
613 return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
614}
615
616static struct kobj_attribute sys_reipl_nss_vmparm_attr =
617 __ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show,
618 reipl_nss_vmparm_store);
619static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
620 __ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show,
621 reipl_ccw_vmparm_store);
622
484/* FCP reipl device attributes */ 623/* FCP reipl device attributes */
485 624
486DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 625DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
@@ -513,27 +652,26 @@ static struct attribute_group reipl_fcp_attr_group = {
513DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", 652DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
514 reipl_block_ccw->ipl_info.ccw.devno); 653 reipl_block_ccw->ipl_info.ccw.devno);
515 654
516static void reipl_get_ascii_loadparm(char *loadparm) 655static void reipl_get_ascii_loadparm(char *loadparm,
656 struct ipl_parameter_block *ibp)
517{ 657{
518 memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param, 658 memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
519 LOADPARM_LEN);
520 EBCASC(loadparm, LOADPARM_LEN); 659 EBCASC(loadparm, LOADPARM_LEN);
521 loadparm[LOADPARM_LEN] = 0; 660 loadparm[LOADPARM_LEN] = 0;
522 strstrip(loadparm); 661 strstrip(loadparm);
523} 662}
524 663
525static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj, 664static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
526 struct kobj_attribute *attr, char *page) 665 char *page)
527{ 666{
528 char buf[LOADPARM_LEN + 1]; 667 char buf[LOADPARM_LEN + 1];
529 668
530 reipl_get_ascii_loadparm(buf); 669 reipl_get_ascii_loadparm(buf, ipb);
531 return sprintf(page, "%s\n", buf); 670 return sprintf(page, "%s\n", buf);
532} 671}
533 672
534static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, 673static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
535 struct kobj_attribute *attr, 674 const char *buf, size_t len)
536 const char *buf, size_t len)
537{ 675{
538 int i, lp_len; 676 int i, lp_len;
539 677
@@ -552,35 +690,128 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
552 return -EINVAL; 690 return -EINVAL;
553 } 691 }
554 /* initialize loadparm with blanks */ 692 /* initialize loadparm with blanks */
555 memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN); 693 memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
556 /* copy and convert to ebcdic */ 694 /* copy and convert to ebcdic */
557 memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len); 695 memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
558 ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN); 696 ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
559 return len; 697 return len;
560} 698}
561 699
700/* NSS wrapper */
701static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
702 struct kobj_attribute *attr, char *page)
703{
704 return reipl_generic_loadparm_show(reipl_block_nss, page);
705}
706
707static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
708 struct kobj_attribute *attr,
709 const char *buf, size_t len)
710{
711 return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
712}
713
714/* CCW wrapper */
715static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
716 struct kobj_attribute *attr, char *page)
717{
718 return reipl_generic_loadparm_show(reipl_block_ccw, page);
719}
720
721static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
722 struct kobj_attribute *attr,
723 const char *buf, size_t len)
724{
725 return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
726}
727
562static struct kobj_attribute sys_reipl_ccw_loadparm_attr = 728static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
563 __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, 729 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
564 reipl_ccw_loadparm_store); 730 reipl_ccw_loadparm_store);
565 731
566static struct attribute *reipl_ccw_attrs[] = { 732static struct attribute *reipl_ccw_attrs_vm[] = {
567 &sys_reipl_ccw_device_attr.attr, 733 &sys_reipl_ccw_device_attr.attr,
568 &sys_reipl_ccw_loadparm_attr.attr, 734 &sys_reipl_ccw_loadparm_attr.attr,
735 &sys_reipl_ccw_vmparm_attr.attr,
569 NULL, 736 NULL,
570}; 737};
571 738
572static struct attribute_group reipl_ccw_attr_group = { 739static struct attribute *reipl_ccw_attrs_lpar[] = {
740 &sys_reipl_ccw_device_attr.attr,
741 &sys_reipl_ccw_loadparm_attr.attr,
742 NULL,
743};
744
745static struct attribute_group reipl_ccw_attr_group_vm = {
746 .name = IPL_CCW_STR,
747 .attrs = reipl_ccw_attrs_vm,
748};
749
750static struct attribute_group reipl_ccw_attr_group_lpar = {
573 .name = IPL_CCW_STR, 751 .name = IPL_CCW_STR,
574 .attrs = reipl_ccw_attrs, 752 .attrs = reipl_ccw_attrs_lpar,
575}; 753};
576 754
577 755
578/* NSS reipl device attributes */ 756/* NSS reipl device attributes */
757static void reipl_get_ascii_nss_name(char *dst,
758 struct ipl_parameter_block *ipb)
759{
760 memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
761 EBCASC(dst, NSS_NAME_SIZE);
762 dst[NSS_NAME_SIZE] = 0;
763}
764
765static ssize_t reipl_nss_name_show(struct kobject *kobj,
766 struct kobj_attribute *attr, char *page)
767{
768 char nss_name[NSS_NAME_SIZE + 1] = {};
579 769
580DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name); 770 reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
771 return sprintf(page, "%s\n", nss_name);
772}
773
774static ssize_t reipl_nss_name_store(struct kobject *kobj,
775 struct kobj_attribute *attr,
776 const char *buf, size_t len)
777{
778 int nss_len;
779
780 /* ignore trailing newline */
781 nss_len = len;
782 if ((len > 0) && (buf[len - 1] == '\n'))
783 nss_len--;
784
785 if (nss_len > NSS_NAME_SIZE)
786 return -EINVAL;
787
788 memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
789 if (nss_len > 0) {
790 reipl_block_nss->ipl_info.ccw.vm_flags |=
791 DIAG308_VM_FLAGS_NSS_VALID;
792 memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
793 ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
794 EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
795 } else {
796 reipl_block_nss->ipl_info.ccw.vm_flags &=
797 ~DIAG308_VM_FLAGS_NSS_VALID;
798 }
799
800 return len;
801}
802
803static struct kobj_attribute sys_reipl_nss_name_attr =
804 __ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show,
805 reipl_nss_name_store);
806
807static struct kobj_attribute sys_reipl_nss_loadparm_attr =
808 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show,
809 reipl_nss_loadparm_store);
581 810
582static struct attribute *reipl_nss_attrs[] = { 811static struct attribute *reipl_nss_attrs[] = {
583 &sys_reipl_nss_name_attr.attr, 812 &sys_reipl_nss_name_attr.attr,
813 &sys_reipl_nss_loadparm_attr.attr,
814 &sys_reipl_nss_vmparm_attr.attr,
584 NULL, 815 NULL,
585}; 816};
586 817
@@ -617,7 +848,10 @@ static int reipl_set_type(enum ipl_type type)
617 reipl_method = REIPL_METHOD_FCP_DUMP; 848 reipl_method = REIPL_METHOD_FCP_DUMP;
618 break; 849 break;
619 case IPL_TYPE_NSS: 850 case IPL_TYPE_NSS:
620 reipl_method = REIPL_METHOD_NSS; 851 if (diag308_set_works)
852 reipl_method = REIPL_METHOD_NSS_DIAG;
853 else
854 reipl_method = REIPL_METHOD_NSS;
621 break; 855 break;
622 case IPL_TYPE_UNKNOWN: 856 case IPL_TYPE_UNKNOWN:
623 reipl_method = REIPL_METHOD_DEFAULT; 857 reipl_method = REIPL_METHOD_DEFAULT;
@@ -655,11 +889,38 @@ static struct kobj_attribute reipl_type_attr =
655 889
656static struct kset *reipl_kset; 890static struct kset *reipl_kset;
657 891
892static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
893 const enum ipl_method m)
894{
895 char loadparm[LOADPARM_LEN + 1] = {};
896 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
897 char nss_name[NSS_NAME_SIZE + 1] = {};
898 size_t pos = 0;
899
900 reipl_get_ascii_loadparm(loadparm, ipb);
901 reipl_get_ascii_nss_name(nss_name, ipb);
902 reipl_get_ascii_vmparm(vmparm, ipb);
903
904 switch (m) {
905 case REIPL_METHOD_CCW_VM:
906 pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
907 break;
908 case REIPL_METHOD_NSS:
909 pos = sprintf(dst, "IPL %s", nss_name);
910 break;
911 default:
912 break;
913 }
914 if (strlen(loadparm) > 0)
915 pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
916 if (strlen(vmparm) > 0)
917 sprintf(dst + pos, " PARM %s", vmparm);
918}
919
658static void reipl_run(struct shutdown_trigger *trigger) 920static void reipl_run(struct shutdown_trigger *trigger)
659{ 921{
660 struct ccw_dev_id devid; 922 struct ccw_dev_id devid;
661 static char buf[100]; 923 static char buf[128];
662 char loadparm[LOADPARM_LEN + 1];
663 924
664 switch (reipl_method) { 925 switch (reipl_method) {
665 case REIPL_METHOD_CCW_CIO: 926 case REIPL_METHOD_CCW_CIO:
@@ -668,13 +929,7 @@ static void reipl_run(struct shutdown_trigger *trigger)
668 reipl_ccw_dev(&devid); 929 reipl_ccw_dev(&devid);
669 break; 930 break;
670 case REIPL_METHOD_CCW_VM: 931 case REIPL_METHOD_CCW_VM:
671 reipl_get_ascii_loadparm(loadparm); 932 get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
672 if (strlen(loadparm) == 0)
673 sprintf(buf, "IPL %X CLEAR",
674 reipl_block_ccw->ipl_info.ccw.devno);
675 else
676 sprintf(buf, "IPL %X CLEAR LOADPARM '%s'",
677 reipl_block_ccw->ipl_info.ccw.devno, loadparm);
678 __cpcmd(buf, NULL, 0, NULL); 933 __cpcmd(buf, NULL, 0, NULL);
679 break; 934 break;
680 case REIPL_METHOD_CCW_DIAG: 935 case REIPL_METHOD_CCW_DIAG:
@@ -691,8 +946,12 @@ static void reipl_run(struct shutdown_trigger *trigger)
691 case REIPL_METHOD_FCP_RO_VM: 946 case REIPL_METHOD_FCP_RO_VM:
692 __cpcmd("IPL", NULL, 0, NULL); 947 __cpcmd("IPL", NULL, 0, NULL);
693 break; 948 break;
949 case REIPL_METHOD_NSS_DIAG:
950 diag308(DIAG308_SET, reipl_block_nss);
951 diag308(DIAG308_IPL, NULL);
952 break;
694 case REIPL_METHOD_NSS: 953 case REIPL_METHOD_NSS:
695 sprintf(buf, "IPL %s", reipl_nss_name); 954 get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
696 __cpcmd(buf, NULL, 0, NULL); 955 __cpcmd(buf, NULL, 0, NULL);
697 break; 956 break;
698 case REIPL_METHOD_DEFAULT: 957 case REIPL_METHOD_DEFAULT:
@@ -707,16 +966,36 @@ static void reipl_run(struct shutdown_trigger *trigger)
707 disabled_wait((unsigned long) __builtin_return_address(0)); 966 disabled_wait((unsigned long) __builtin_return_address(0));
708} 967}
709 968
710static void __init reipl_probe(void) 969static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
711{ 970{
712 void *buffer; 971 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
972 ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
973 ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
974 ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
975}
713 976
714 buffer = (void *) get_zeroed_page(GFP_KERNEL); 977static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
715 if (!buffer) 978{
716 return; 979 /* LOADPARM */
717 if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK) 980 /* check if read scp info worked and set loadparm */
718 diag308_set_works = 1; 981 if (sclp_ipl_info.is_valid)
719 free_page((unsigned long)buffer); 982 memcpy(ipb->ipl_info.ccw.load_parm,
983 &sclp_ipl_info.loadparm, LOADPARM_LEN);
984 else
985 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
986 memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
987 ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
988
989 /* VM PARM */
990 if (MACHINE_IS_VM && diag308_set_works &&
991 (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
992
993 ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
994 ipb->ipl_info.ccw.vm_parm_len =
995 ipl_block.ipl_info.ccw.vm_parm_len;
996 memcpy(ipb->ipl_info.ccw.vm_parm,
997 ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
998 }
720} 999}
721 1000
722static int __init reipl_nss_init(void) 1001static int __init reipl_nss_init(void)
@@ -725,10 +1004,31 @@ static int __init reipl_nss_init(void)
725 1004
726 if (!MACHINE_IS_VM) 1005 if (!MACHINE_IS_VM)
727 return 0; 1006 return 0;
1007
1008 reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
1009 if (!reipl_block_nss)
1010 return -ENOMEM;
1011
1012 if (!diag308_set_works)
1013 sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
1014
728 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group); 1015 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
729 if (rc) 1016 if (rc)
730 return rc; 1017 return rc;
731 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); 1018
1019 reipl_block_ccw_init(reipl_block_nss);
1020 if (ipl_info.type == IPL_TYPE_NSS) {
1021 memset(reipl_block_nss->ipl_info.ccw.nss_name,
1022 ' ', NSS_NAME_SIZE);
1023 memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
1024 kernel_nss_name, strlen(kernel_nss_name));
1025 ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
1026 reipl_block_nss->ipl_info.ccw.vm_flags |=
1027 DIAG308_VM_FLAGS_NSS_VALID;
1028
1029 reipl_block_ccw_fill_parms(reipl_block_nss);
1030 }
1031
732 reipl_capabilities |= IPL_TYPE_NSS; 1032 reipl_capabilities |= IPL_TYPE_NSS;
733 return 0; 1033 return 0;
734} 1034}
@@ -740,28 +1040,27 @@ static int __init reipl_ccw_init(void)
740 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); 1040 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
741 if (!reipl_block_ccw) 1041 if (!reipl_block_ccw)
742 return -ENOMEM; 1042 return -ENOMEM;
743 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group); 1043
744 if (rc) { 1044 if (MACHINE_IS_VM) {
745 free_page((unsigned long)reipl_block_ccw); 1045 if (!diag308_set_works)
746 return rc; 1046 sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
1047 rc = sysfs_create_group(&reipl_kset->kobj,
1048 &reipl_ccw_attr_group_vm);
1049 } else {
1050 if(!diag308_set_works)
1051 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
1052 rc = sysfs_create_group(&reipl_kset->kobj,
1053 &reipl_ccw_attr_group_lpar);
747 } 1054 }
748 reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN; 1055 if (rc)
749 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 1056 return rc;
750 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 1057
751 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 1058 reipl_block_ccw_init(reipl_block_ccw);
752 reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID; 1059 if (ipl_info.type == IPL_TYPE_CCW) {
753 /* check if read scp info worked and set loadparm */
754 if (sclp_ipl_info.is_valid)
755 memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
756 &sclp_ipl_info.loadparm, LOADPARM_LEN);
757 else
758 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
759 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
760 LOADPARM_LEN);
761 if (!MACHINE_IS_VM && !diag308_set_works)
762 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
763 if (ipl_info.type == IPL_TYPE_CCW)
764 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 1060 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
1061 reipl_block_ccw_fill_parms(reipl_block_ccw);
1062 }
1063
765 reipl_capabilities |= IPL_TYPE_CCW; 1064 reipl_capabilities |= IPL_TYPE_CCW;
766 return 0; 1065 return 0;
767} 1066}
@@ -1298,7 +1597,6 @@ static void __init shutdown_actions_init(void)
1298 1597
1299static int __init s390_ipl_init(void) 1598static int __init s390_ipl_init(void)
1300{ 1599{
1301 reipl_probe();
1302 sclp_get_ipl_info(&sclp_ipl_info); 1600 sclp_get_ipl_info(&sclp_ipl_info);
1303 shutdown_actions_init(); 1601 shutdown_actions_init();
1304 shutdown_triggers_init(); 1602 shutdown_triggers_init();
@@ -1405,6 +1703,12 @@ void __init setup_ipl(void)
1405 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); 1703 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
1406} 1704}
1407 1705
1706void __init ipl_update_parameters(void)
1707{
1708 if (diag308(DIAG308_STORE, &ipl_block) == DIAG308_RC_OK)
1709 diag308_set_works = 1;
1710}
1711
1408void __init ipl_save_parameters(void) 1712void __init ipl_save_parameters(void)
1409{ 1713{
1410 struct cio_iplinfo iplinfo; 1714 struct cio_iplinfo iplinfo;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index ed04d1372d5d..288ad490a6dd 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) 41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
42 return -EINVAL; 42 return -EINVAL;
43 43
44 if ((unsigned long)p->addr & 0x01) { 44 if ((unsigned long)p->addr & 0x01)
45 printk("Attempt to register kprobe at an unaligned address\n");
46 return -EINVAL; 45 return -EINVAL;
47 }
48 46
49 /* Use the get_insn_slot() facility for correctness */ 47 /* Use the get_insn_slot() facility for correctness */
50 if (!(p->ainsn.insn = get_insn_slot())) 48 if (!(p->ainsn.insn = get_insn_slot()))
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3c77dd36994c..131d7ee8b416 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image)
52 52
53void machine_shutdown(void) 53void machine_shutdown(void)
54{ 54{
55 printk(KERN_INFO "kexec: machine_shutdown called\n");
56} 55}
57 56
58void machine_kexec(struct kimage *image) 57void machine_kexec(struct kimage *image)
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
new file mode 100644
index 000000000000..18ed7abe16c5
--- /dev/null
+++ b/arch/s390/kernel/mem_detect.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright IBM Corp. 2008
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <asm/ipl.h>
9#include <asm/sclp.h>
10#include <asm/setup.h>
11
12static int memory_fast_detect(struct mem_chunk *chunk)
13{
14 unsigned long val0 = 0;
15 unsigned long val1 = 0xc;
16 int rc = -EOPNOTSUPP;
17
18 if (ipl_flags & IPL_NSS_VALID)
19 return -EOPNOTSUPP;
20 asm volatile(
21 " diag %1,%2,0x260\n"
22 "0: lhi %0,0\n"
23 "1:\n"
24 EX_TABLE(0b,1b)
25 : "+d" (rc), "+d" (val0), "+d" (val1) : : "cc");
26
27 if (rc || val0 != val1)
28 return -EOPNOTSUPP;
29 chunk->size = val0 + 1;
30 return 0;
31}
32
33static inline int tprot(unsigned long addr)
34{
35 int rc = -EFAULT;
36
37 asm volatile(
38 " tprot 0(%1),0\n"
39 "0: ipm %0\n"
40 " srl %0,28\n"
41 "1:\n"
42 EX_TABLE(0b,1b)
43 : "+d" (rc) : "a" (addr) : "cc");
44 return rc;
45}
46
47#define ADDR2G (1ULL << 31)
48
49static void find_memory_chunks(struct mem_chunk chunk[])
50{
51 unsigned long long memsize, rnmax, rzm;
52 unsigned long addr = 0, size;
53 int i = 0, type;
54
55 rzm = sclp_get_rzm();
56 rnmax = sclp_get_rnmax();
57 memsize = rzm * rnmax;
58 if (!rzm)
59 rzm = 1ULL << 17;
60 if (sizeof(long) == 4) {
61 rzm = min(ADDR2G, rzm);
62 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
63 }
64 do {
65 size = 0;
66 type = tprot(addr);
67 do {
68 size += rzm;
69 if (memsize && addr + size >= memsize)
70 break;
71 } while (type == tprot(addr + size));
72 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
73 chunk[i].addr = addr;
74 chunk[i].size = size;
75 chunk[i].type = type;
76 i++;
77 }
78 addr += size;
79 } while (addr < memsize && i < MEMORY_CHUNKS);
80}
81
82void detect_memory_layout(struct mem_chunk chunk[])
83{
84 unsigned long flags, cr0;
85
86 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
87 if (memory_fast_detect(&chunk[0]) == 0)
88 return;
89 /* Disable IRQs, DAT and low address protection so tprot does the
90 * right thing and we don't get scheduled away with low address
91 * protection disabled.
92 */
93 flags = __raw_local_irq_stnsm(0xf8);
94 __ctl_store(cr0, 0, 0);
95 __ctl_clear_bit(0, 28);
96 find_memory_chunks(chunk);
97 __ctl_load(cr0, 0, 0);
98 __raw_local_irq_ssm(flags);
99}
100EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7920861109d2..85defd01d293 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
75 return sf->gprs[8]; 75 return sf->gprs[8];
76} 76}
77 77
78/*
79 * Need to know about CPUs going idle?
80 */
81static ATOMIC_NOTIFIER_HEAD(idle_chain);
82DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 78DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
83 79
84int register_idle_notifier(struct notifier_block *nb)
85{
86 return atomic_notifier_chain_register(&idle_chain, nb);
87}
88EXPORT_SYMBOL(register_idle_notifier);
89
90int unregister_idle_notifier(struct notifier_block *nb)
91{
92 return atomic_notifier_chain_unregister(&idle_chain, nb);
93}
94EXPORT_SYMBOL(unregister_idle_notifier);
95
96static int s390_idle_enter(void) 80static int s390_idle_enter(void)
97{ 81{
98 struct s390_idle_data *idle; 82 struct s390_idle_data *idle;
99 int nr_calls = 0;
100 void *hcpu;
101 int rc;
102 83
103 hcpu = (void *)(long)smp_processor_id();
104 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
105 &nr_calls);
106 if (rc == NOTIFY_BAD) {
107 nr_calls--;
108 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
109 hcpu, nr_calls, NULL);
110 return rc;
111 }
112 idle = &__get_cpu_var(s390_idle); 84 idle = &__get_cpu_var(s390_idle);
113 spin_lock(&idle->lock); 85 spin_lock(&idle->lock);
114 idle->idle_count++; 86 idle->idle_count++;
115 idle->in_idle = 1; 87 idle->in_idle = 1;
116 idle->idle_enter = get_clock(); 88 idle->idle_enter = get_clock();
117 spin_unlock(&idle->lock); 89 spin_unlock(&idle->lock);
90 vtime_stop_cpu_timer();
118 return NOTIFY_OK; 91 return NOTIFY_OK;
119} 92}
120 93
@@ -122,13 +95,12 @@ void s390_idle_leave(void)
122{ 95{
123 struct s390_idle_data *idle; 96 struct s390_idle_data *idle;
124 97
98 vtime_start_cpu_timer();
125 idle = &__get_cpu_var(s390_idle); 99 idle = &__get_cpu_var(s390_idle);
126 spin_lock(&idle->lock); 100 spin_lock(&idle->lock);
127 idle->idle_time += get_clock() - idle->idle_enter; 101 idle->idle_time += get_clock() - idle->idle_enter;
128 idle->in_idle = 0; 102 idle->in_idle = 0;
129 spin_unlock(&idle->lock); 103 spin_unlock(&idle->lock);
130 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
131 (void *)(long) smp_processor_id());
132} 104}
133 105
134extern void s390_handle_mcck(void); 106extern void s390_handle_mcck(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 35827b9bd4d1..2815bfe348a6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -33,6 +33,8 @@
33#include <linux/security.h> 33#include <linux/security.h>
34#include <linux/audit.h> 34#include <linux/audit.h>
35#include <linux/signal.h> 35#include <linux/signal.h>
36#include <linux/elf.h>
37#include <linux/regset.h>
36 38
37#include <asm/segment.h> 39#include <asm/segment.h>
38#include <asm/page.h> 40#include <asm/page.h>
@@ -47,6 +49,11 @@
47#include "compat_ptrace.h" 49#include "compat_ptrace.h"
48#endif 50#endif
49 51
52enum s390_regset {
53 REGSET_GENERAL,
54 REGSET_FP,
55};
56
50static void 57static void
51FixPerRegisters(struct task_struct *task) 58FixPerRegisters(struct task_struct *task)
52{ 59{
@@ -126,24 +133,10 @@ ptrace_disable(struct task_struct *child)
126 * struct user contain pad bytes that should be read as zeroes. 133 * struct user contain pad bytes that should be read as zeroes.
127 * Lovely... 134 * Lovely...
128 */ 135 */
129static int 136static unsigned long __peek_user(struct task_struct *child, addr_t addr)
130peek_user(struct task_struct *child, addr_t addr, addr_t data)
131{ 137{
132 struct user *dummy = NULL; 138 struct user *dummy = NULL;
133 addr_t offset, tmp, mask; 139 addr_t offset, tmp;
134
135 /*
136 * Stupid gdb peeks/pokes the access registers in 64 bit with
137 * an alignment of 4. Programmers from hell...
138 */
139 mask = __ADDR_MASK;
140#ifdef CONFIG_64BIT
141 if (addr >= (addr_t) &dummy->regs.acrs &&
142 addr < (addr_t) &dummy->regs.orig_gpr2)
143 mask = 3;
144#endif
145 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
146 return -EIO;
147 140
148 if (addr < (addr_t) &dummy->regs.acrs) { 141 if (addr < (addr_t) &dummy->regs.acrs) {
149 /* 142 /*
@@ -197,24 +190,18 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
197 } else 190 } else
198 tmp = 0; 191 tmp = 0;
199 192
200 return put_user(tmp, (addr_t __user *) data); 193 return tmp;
201} 194}
202 195
203/*
204 * Write a word to the user area of a process at location addr. This
205 * operation does have an additional problem compared to peek_user.
206 * Stores to the program status word and on the floating point
207 * control register needs to get checked for validity.
208 */
209static int 196static int
210poke_user(struct task_struct *child, addr_t addr, addr_t data) 197peek_user(struct task_struct *child, addr_t addr, addr_t data)
211{ 198{
212 struct user *dummy = NULL; 199 struct user *dummy = NULL;
213 addr_t offset, mask; 200 addr_t tmp, mask;
214 201
215 /* 202 /*
216 * Stupid gdb peeks/pokes the access registers in 64 bit with 203 * Stupid gdb peeks/pokes the access registers in 64 bit with
217 * an alignment of 4. Programmers from hell indeed... 204 * an alignment of 4. Programmers from hell...
218 */ 205 */
219 mask = __ADDR_MASK; 206 mask = __ADDR_MASK;
220#ifdef CONFIG_64BIT 207#ifdef CONFIG_64BIT
@@ -225,6 +212,21 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
225 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 212 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
226 return -EIO; 213 return -EIO;
227 214
215 tmp = __peek_user(child, addr);
216 return put_user(tmp, (addr_t __user *) data);
217}
218
219/*
220 * Write a word to the user area of a process at location addr. This
221 * operation does have an additional problem compared to peek_user.
222 * Stores to the program status word and on the floating point
223 * control register needs to get checked for validity.
224 */
225static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
226{
227 struct user *dummy = NULL;
228 addr_t offset;
229
228 if (addr < (addr_t) &dummy->regs.acrs) { 230 if (addr < (addr_t) &dummy->regs.acrs) {
229 /* 231 /*
230 * psw and gprs are stored on the stack 232 * psw and gprs are stored on the stack
@@ -292,6 +294,28 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
292 return 0; 294 return 0;
293} 295}
294 296
297static int
298poke_user(struct task_struct *child, addr_t addr, addr_t data)
299{
300 struct user *dummy = NULL;
301 addr_t mask;
302
303 /*
304 * Stupid gdb peeks/pokes the access registers in 64 bit with
305 * an alignment of 4. Programmers from hell indeed...
306 */
307 mask = __ADDR_MASK;
308#ifdef CONFIG_64BIT
309 if (addr >= (addr_t) &dummy->regs.acrs &&
310 addr < (addr_t) &dummy->regs.orig_gpr2)
311 mask = 3;
312#endif
313 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
314 return -EIO;
315
316 return __poke_user(child, addr, data);
317}
318
295long arch_ptrace(struct task_struct *child, long request, long addr, long data) 319long arch_ptrace(struct task_struct *child, long request, long addr, long data)
296{ 320{
297 ptrace_area parea; 321 ptrace_area parea;
@@ -367,18 +391,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
367/* 391/*
368 * Same as peek_user but for a 31 bit program. 392 * Same as peek_user but for a 31 bit program.
369 */ 393 */
370static int 394static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
371peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
372{ 395{
373 struct user32 *dummy32 = NULL; 396 struct user32 *dummy32 = NULL;
374 per_struct32 *dummy_per32 = NULL; 397 per_struct32 *dummy_per32 = NULL;
375 addr_t offset; 398 addr_t offset;
376 __u32 tmp; 399 __u32 tmp;
377 400
378 if (!test_thread_flag(TIF_31BIT) ||
379 (addr & 3) || addr > sizeof(struct user) - 3)
380 return -EIO;
381
382 if (addr < (addr_t) &dummy32->regs.acrs) { 401 if (addr < (addr_t) &dummy32->regs.acrs) {
383 /* 402 /*
384 * psw and gprs are stored on the stack 403 * psw and gprs are stored on the stack
@@ -435,25 +454,32 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
435 } else 454 } else
436 tmp = 0; 455 tmp = 0;
437 456
457 return tmp;
458}
459
460static int peek_user_compat(struct task_struct *child,
461 addr_t addr, addr_t data)
462{
463 __u32 tmp;
464
465 if (!test_thread_flag(TIF_31BIT) ||
466 (addr & 3) || addr > sizeof(struct user) - 3)
467 return -EIO;
468
469 tmp = __peek_user_compat(child, addr);
438 return put_user(tmp, (__u32 __user *) data); 470 return put_user(tmp, (__u32 __user *) data);
439} 471}
440 472
441/* 473/*
442 * Same as poke_user but for a 31 bit program. 474 * Same as poke_user but for a 31 bit program.
443 */ 475 */
444static int 476static int __poke_user_compat(struct task_struct *child,
445poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) 477 addr_t addr, addr_t data)
446{ 478{
447 struct user32 *dummy32 = NULL; 479 struct user32 *dummy32 = NULL;
448 per_struct32 *dummy_per32 = NULL; 480 per_struct32 *dummy_per32 = NULL;
481 __u32 tmp = (__u32) data;
449 addr_t offset; 482 addr_t offset;
450 __u32 tmp;
451
452 if (!test_thread_flag(TIF_31BIT) ||
453 (addr & 3) || addr > sizeof(struct user32) - 3)
454 return -EIO;
455
456 tmp = (__u32) data;
457 483
458 if (addr < (addr_t) &dummy32->regs.acrs) { 484 if (addr < (addr_t) &dummy32->regs.acrs) {
459 /* 485 /*
@@ -528,6 +554,16 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
528 return 0; 554 return 0;
529} 555}
530 556
557static int poke_user_compat(struct task_struct *child,
558 addr_t addr, addr_t data)
559{
560 if (!test_thread_flag(TIF_31BIT) ||
561 (addr & 3) || addr > sizeof(struct user32) - 3)
562 return -EIO;
563
564 return __poke_user_compat(child, addr, data);
565}
566
531long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 567long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
532 compat_ulong_t caddr, compat_ulong_t cdata) 568 compat_ulong_t caddr, compat_ulong_t cdata)
533{ 569{
@@ -539,11 +575,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
539 switch (request) { 575 switch (request) {
540 case PTRACE_PEEKUSR: 576 case PTRACE_PEEKUSR:
541 /* read the word at location addr in the USER area. */ 577 /* read the word at location addr in the USER area. */
542 return peek_user_emu31(child, addr, data); 578 return peek_user_compat(child, addr, data);
543 579
544 case PTRACE_POKEUSR: 580 case PTRACE_POKEUSR:
545 /* write the word at location addr in the USER area */ 581 /* write the word at location addr in the USER area */
546 return poke_user_emu31(child, addr, data); 582 return poke_user_compat(child, addr, data);
547 583
548 case PTRACE_PEEKUSR_AREA: 584 case PTRACE_PEEKUSR_AREA:
549 case PTRACE_POKEUSR_AREA: 585 case PTRACE_POKEUSR_AREA:
@@ -555,13 +591,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
555 copied = 0; 591 copied = 0;
556 while (copied < parea.len) { 592 while (copied < parea.len) {
557 if (request == PTRACE_PEEKUSR_AREA) 593 if (request == PTRACE_PEEKUSR_AREA)
558 ret = peek_user_emu31(child, addr, data); 594 ret = peek_user_compat(child, addr, data);
559 else { 595 else {
560 __u32 utmp; 596 __u32 utmp;
561 if (get_user(utmp, 597 if (get_user(utmp,
562 (__u32 __force __user *) data)) 598 (__u32 __force __user *) data))
563 return -EFAULT; 599 return -EFAULT;
564 ret = poke_user_emu31(child, addr, utmp); 600 ret = poke_user_compat(child, addr, utmp);
565 } 601 }
566 if (ret) 602 if (ret)
567 return ret; 603 return ret;
@@ -610,3 +646,240 @@ syscall_trace(struct pt_regs *regs, int entryexit)
610 regs->gprs[2], regs->orig_gpr2, regs->gprs[3], 646 regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
611 regs->gprs[4], regs->gprs[5]); 647 regs->gprs[4], regs->gprs[5]);
612} 648}
649
650/*
651 * user_regset definitions.
652 */
653
654static int s390_regs_get(struct task_struct *target,
655 const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 void *kbuf, void __user *ubuf)
658{
659 if (target == current)
660 save_access_regs(target->thread.acrs);
661
662 if (kbuf) {
663 unsigned long *k = kbuf;
664 while (count > 0) {
665 *k++ = __peek_user(target, pos);
666 count -= sizeof(*k);
667 pos += sizeof(*k);
668 }
669 } else {
670 unsigned long __user *u = ubuf;
671 while (count > 0) {
672 if (__put_user(__peek_user(target, pos), u++))
673 return -EFAULT;
674 count -= sizeof(*u);
675 pos += sizeof(*u);
676 }
677 }
678 return 0;
679}
680
681static int s390_regs_set(struct task_struct *target,
682 const struct user_regset *regset,
683 unsigned int pos, unsigned int count,
684 const void *kbuf, const void __user *ubuf)
685{
686 int rc = 0;
687
688 if (target == current)
689 save_access_regs(target->thread.acrs);
690
691 if (kbuf) {
692 const unsigned long *k = kbuf;
693 while (count > 0 && !rc) {
694 rc = __poke_user(target, pos, *k++);
695 count -= sizeof(*k);
696 pos += sizeof(*k);
697 }
698 } else {
699 const unsigned long __user *u = ubuf;
700 while (count > 0 && !rc) {
701 unsigned long word;
702 rc = __get_user(word, u++);
703 if (rc)
704 break;
705 rc = __poke_user(target, pos, word);
706 count -= sizeof(*u);
707 pos += sizeof(*u);
708 }
709 }
710
711 if (rc == 0 && target == current)
712 restore_access_regs(target->thread.acrs);
713
714 return rc;
715}
716
717static int s390_fpregs_get(struct task_struct *target,
718 const struct user_regset *regset, unsigned int pos,
719 unsigned int count, void *kbuf, void __user *ubuf)
720{
721 if (target == current)
722 save_fp_regs(&target->thread.fp_regs);
723
724 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
725 &target->thread.fp_regs, 0, -1);
726}
727
728static int s390_fpregs_set(struct task_struct *target,
729 const struct user_regset *regset, unsigned int pos,
730 unsigned int count, const void *kbuf,
731 const void __user *ubuf)
732{
733 int rc = 0;
734
735 if (target == current)
736 save_fp_regs(&target->thread.fp_regs);
737
738 /* If setting FPC, must validate it first. */
739 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
740 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
741 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
742 0, offsetof(s390_fp_regs, fprs));
743 if (rc)
744 return rc;
745 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
746 return -EINVAL;
747 target->thread.fp_regs.fpc = fpc[0];
748 }
749
750 if (rc == 0 && count > 0)
751 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
752 target->thread.fp_regs.fprs,
753 offsetof(s390_fp_regs, fprs), -1);
754
755 if (rc == 0 && target == current)
756 restore_fp_regs(&target->thread.fp_regs);
757
758 return rc;
759}
760
761static const struct user_regset s390_regsets[] = {
762 [REGSET_GENERAL] = {
763 .core_note_type = NT_PRSTATUS,
764 .n = sizeof(s390_regs) / sizeof(long),
765 .size = sizeof(long),
766 .align = sizeof(long),
767 .get = s390_regs_get,
768 .set = s390_regs_set,
769 },
770 [REGSET_FP] = {
771 .core_note_type = NT_PRFPREG,
772 .n = sizeof(s390_fp_regs) / sizeof(long),
773 .size = sizeof(long),
774 .align = sizeof(long),
775 .get = s390_fpregs_get,
776 .set = s390_fpregs_set,
777 },
778};
779
780static const struct user_regset_view user_s390_view = {
781 .name = UTS_MACHINE,
782 .e_machine = EM_S390,
783 .regsets = s390_regsets,
784 .n = ARRAY_SIZE(s390_regsets)
785};
786
787#ifdef CONFIG_COMPAT
788static int s390_compat_regs_get(struct task_struct *target,
789 const struct user_regset *regset,
790 unsigned int pos, unsigned int count,
791 void *kbuf, void __user *ubuf)
792{
793 if (target == current)
794 save_access_regs(target->thread.acrs);
795
796 if (kbuf) {
797 compat_ulong_t *k = kbuf;
798 while (count > 0) {
799 *k++ = __peek_user_compat(target, pos);
800 count -= sizeof(*k);
801 pos += sizeof(*k);
802 }
803 } else {
804 compat_ulong_t __user *u = ubuf;
805 while (count > 0) {
806 if (__put_user(__peek_user_compat(target, pos), u++))
807 return -EFAULT;
808 count -= sizeof(*u);
809 pos += sizeof(*u);
810 }
811 }
812 return 0;
813}
814
815static int s390_compat_regs_set(struct task_struct *target,
816 const struct user_regset *regset,
817 unsigned int pos, unsigned int count,
818 const void *kbuf, const void __user *ubuf)
819{
820 int rc = 0;
821
822 if (target == current)
823 save_access_regs(target->thread.acrs);
824
825 if (kbuf) {
826 const compat_ulong_t *k = kbuf;
827 while (count > 0 && !rc) {
828 rc = __poke_user_compat(target, pos, *k++);
829 count -= sizeof(*k);
830 pos += sizeof(*k);
831 }
832 } else {
833 const compat_ulong_t __user *u = ubuf;
834 while (count > 0 && !rc) {
835 compat_ulong_t word;
836 rc = __get_user(word, u++);
837 if (rc)
838 break;
839 rc = __poke_user_compat(target, pos, word);
840 count -= sizeof(*u);
841 pos += sizeof(*u);
842 }
843 }
844
845 if (rc == 0 && target == current)
846 restore_access_regs(target->thread.acrs);
847
848 return rc;
849}
850
851static const struct user_regset s390_compat_regsets[] = {
852 [REGSET_GENERAL] = {
853 .core_note_type = NT_PRSTATUS,
854 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
855 .size = sizeof(compat_long_t),
856 .align = sizeof(compat_long_t),
857 .get = s390_compat_regs_get,
858 .set = s390_compat_regs_set,
859 },
860 [REGSET_FP] = {
861 .core_note_type = NT_PRFPREG,
862 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
863 .size = sizeof(compat_long_t),
864 .align = sizeof(compat_long_t),
865 .get = s390_fpregs_get,
866 .set = s390_fpregs_set,
867 },
868};
869
870static const struct user_regset_view user_s390_compat_view = {
871 .name = "s390",
872 .e_machine = EM_S390,
873 .regsets = s390_compat_regsets,
874 .n = ARRAY_SIZE(s390_compat_regsets)
875};
876#endif
877
878const struct user_regset_view *task_user_regset_view(struct task_struct *task)
879{
880#ifdef CONFIG_COMPAT
881 if (test_tsk_thread_flag(task, TIF_31BIT))
882 return &user_s390_compat_view;
883#endif
884 return &user_s390_view;
885}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2bc70b6e876a..b358e18273b0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -77,7 +77,7 @@ unsigned long machine_flags;
77unsigned long elf_hwcap = 0; 77unsigned long elf_hwcap = 0;
78char elf_platform[ELF_PLATFORM_SIZE]; 78char elf_platform[ELF_PLATFORM_SIZE];
79 79
80struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS]; 80struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
81volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 81volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
82static unsigned long __initdata memory_end; 82static unsigned long __initdata memory_end;
83 83
@@ -205,12 +205,6 @@ static void __init conmode_default(void)
205 SET_CONSOLE_SCLP; 205 SET_CONSOLE_SCLP;
206#endif 206#endif
207 } 207 }
208 } else if (MACHINE_IS_P390) {
209#if defined(CONFIG_TN3215_CONSOLE)
210 SET_CONSOLE_3215;
211#elif defined(CONFIG_TN3270_CONSOLE)
212 SET_CONSOLE_3270;
213#endif
214 } else { 208 } else {
215#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 209#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
216 SET_CONSOLE_SCLP; 210 SET_CONSOLE_SCLP;
@@ -221,18 +215,17 @@ static void __init conmode_default(void)
221#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 215#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
222static void __init setup_zfcpdump(unsigned int console_devno) 216static void __init setup_zfcpdump(unsigned int console_devno)
223{ 217{
224 static char str[64]; 218 static char str[41];
225 219
226 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 220 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
227 return; 221 return;
228 if (console_devno != -1) 222 if (console_devno != -1)
229 sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", 223 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
230 ipl_info.data.fcp.dev_id.devno, console_devno); 224 ipl_info.data.fcp.dev_id.devno, console_devno);
231 else 225 else
232 sprintf(str, "cio_ignore=all,!0.0.%04x", 226 sprintf(str, " cio_ignore=all,!0.0.%04x",
233 ipl_info.data.fcp.dev_id.devno); 227 ipl_info.data.fcp.dev_id.devno);
234 strcat(COMMAND_LINE, " "); 228 strcat(boot_command_line, str);
235 strcat(COMMAND_LINE, str);
236 console_loglevel = 2; 229 console_loglevel = 2;
237} 230}
238#else 231#else
@@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p)
289} 282}
290early_param("mem", early_parse_mem); 283early_param("mem", early_parse_mem);
291 284
292/*
293 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
294 */
295static int __init early_parse_ipldelay(char *p)
296{
297 unsigned long delay = 0;
298
299 delay = simple_strtoul(p, &p, 0);
300
301 switch (*p) {
302 case 's':
303 case 'S':
304 delay *= 1000000;
305 break;
306 case 'm':
307 case 'M':
308 delay *= 60 * 1000000;
309 }
310
311 /* now wait for the requested amount of time */
312 udelay(delay);
313
314 return 0;
315}
316early_param("ipldelay", early_parse_ipldelay);
317
318#ifdef CONFIG_S390_SWITCH_AMODE 285#ifdef CONFIG_S390_SWITCH_AMODE
319#ifdef CONFIG_PGSTE 286#ifdef CONFIG_PGSTE
320unsigned int switch_amode = 1; 287unsigned int switch_amode = 1;
@@ -804,11 +771,9 @@ setup_arch(char **cmdline_p)
804 printk("We are running native (64 bit mode)\n"); 771 printk("We are running native (64 bit mode)\n");
805#endif /* CONFIG_64BIT */ 772#endif /* CONFIG_64BIT */
806 773
807 /* Save unparsed command line copy for /proc/cmdline */ 774 /* Have one command line that is parsed and saved in /proc/cmdline */
808 strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); 775 /* boot_command_line has been already set up in early.c */
809 776 *cmdline_p = boot_command_line;
810 *cmdline_p = COMMAND_LINE;
811 *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
812 777
813 ROOT_DEV = Root_RAM0; 778 ROOT_DEV = Root_RAM0;
814 779
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7aec676fefd5..7418bebb547f 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -3,7 +3,7 @@
3 * Time of day based timer functions. 3 * Time of day based timer functions.
4 * 4 *
5 * S390 version 5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 1999, 2008
7 * Author(s): Hartmut Penner (hp@de.ibm.com), 7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com), 8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
@@ -31,6 +31,7 @@
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/clocksource.h> 32#include <linux/clocksource.h>
33#include <linux/clockchips.h> 33#include <linux/clockchips.h>
34#include <linux/bootmem.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/delay.h> 36#include <asm/delay.h>
36#include <asm/s390_ext.h> 37#include <asm/s390_ext.h>
@@ -162,7 +163,7 @@ void init_cpu_timer(void)
162 /* Enable clock comparator timer interrupt. */ 163 /* Enable clock comparator timer interrupt. */
163 __ctl_set_bit(0,11); 164 __ctl_set_bit(0,11);
164 165
165 /* Always allow ETR external interrupts, even without an ETR. */ 166 /* Always allow the timing alert external interrupt. */
166 __ctl_set_bit(0, 4); 167 __ctl_set_bit(0, 4);
167} 168}
168 169
@@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code)
170{ 171{
171} 172}
172 173
174static void etr_timing_alert(struct etr_irq_parm *);
175static void stp_timing_alert(struct stp_irq_parm *);
176
177static void timing_alert_interrupt(__u16 code)
178{
179 if (S390_lowcore.ext_params & 0x00c40000)
180 etr_timing_alert((struct etr_irq_parm *)
181 &S390_lowcore.ext_params);
182 if (S390_lowcore.ext_params & 0x00038000)
183 stp_timing_alert((struct stp_irq_parm *)
184 &S390_lowcore.ext_params);
185}
186
173static void etr_reset(void); 187static void etr_reset(void);
174static void etr_ext_handler(__u16); 188static void stp_reset(void);
175 189
176/* 190/*
177 * Get the TOD clock running. 191 * Get the TOD clock running.
@@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void)
181 u64 time; 195 u64 time;
182 196
183 etr_reset(); 197 etr_reset();
198 stp_reset();
184 if (store_clock(&time) == 0) 199 if (store_clock(&time) == 0)
185 return time; 200 return time;
186 /* TOD clock not running. Set the clock to Unix Epoch. */ 201 /* TOD clock not running. Set the clock to Unix Epoch. */
@@ -231,8 +246,9 @@ void __init time_init(void)
231 if (clocksource_register(&clocksource_tod) != 0) 246 if (clocksource_register(&clocksource_tod) != 0)
232 panic("Could not register TOD clock source"); 247 panic("Could not register TOD clock source");
233 248
234 /* request the etr external interrupt */ 249 /* request the timing alert external interrupt */
235 if (register_early_external_interrupt(0x1406, etr_ext_handler, 250 if (register_early_external_interrupt(0x1406,
251 timing_alert_interrupt,
236 &ext_int_etr_cc) != 0) 252 &ext_int_etr_cc) != 0)
237 panic("Couldn't request external interrupt 0x1406"); 253 panic("Couldn't request external interrupt 0x1406");
238 254
@@ -245,10 +261,112 @@ void __init time_init(void)
245} 261}
246 262
247/* 263/*
264 * The time is "clock". old is what we think the time is.
265 * Adjust the value by a multiple of jiffies and add the delta to ntp.
266 * "delay" is an approximation how long the synchronization took. If
267 * the time correction is positive, then "delay" is subtracted from
268 * the time difference and only the remaining part is passed to ntp.
269 */
270static unsigned long long adjust_time(unsigned long long old,
271 unsigned long long clock,
272 unsigned long long delay)
273{
274 unsigned long long delta, ticks;
275 struct timex adjust;
276
277 if (clock > old) {
278 /* It is later than we thought. */
279 delta = ticks = clock - old;
280 delta = ticks = (delta < delay) ? 0 : delta - delay;
281 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
282 adjust.offset = ticks * (1000000 / HZ);
283 } else {
284 /* It is earlier than we thought. */
285 delta = ticks = old - clock;
286 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
287 delta = -delta;
288 adjust.offset = -ticks * (1000000 / HZ);
289 }
290 jiffies_timer_cc += delta;
291 if (adjust.offset != 0) {
292 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
293 adjust.offset);
294 adjust.modes = ADJ_OFFSET_SINGLESHOT;
295 do_adjtimex(&adjust);
296 }
297 return delta;
298}
299
300static DEFINE_PER_CPU(atomic_t, clock_sync_word);
301static unsigned long clock_sync_flags;
302
303#define CLOCK_SYNC_HAS_ETR 0
304#define CLOCK_SYNC_HAS_STP 1
305#define CLOCK_SYNC_ETR 2
306#define CLOCK_SYNC_STP 3
307
308/*
309 * The synchronous get_clock function. It will write the current clock
310 * value to the clock pointer and return 0 if the clock is in sync with
311 * the external time source. If the clock mode is local it will return
312 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
313 * reference.
314 */
315int get_sync_clock(unsigned long long *clock)
316{
317 atomic_t *sw_ptr;
318 unsigned int sw0, sw1;
319
320 sw_ptr = &get_cpu_var(clock_sync_word);
321 sw0 = atomic_read(sw_ptr);
322 *clock = get_clock();
323 sw1 = atomic_read(sw_ptr);
324 put_cpu_var(clock_sync_sync);
325 if (sw0 == sw1 && (sw0 & 0x80000000U))
326 /* Success: time is in sync. */
327 return 0;
328 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
329 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
330 return -ENOSYS;
331 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
332 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
333 return -EACCES;
334 return -EAGAIN;
335}
336EXPORT_SYMBOL(get_sync_clock);
337
338/*
339 * Make get_sync_clock return -EAGAIN.
340 */
341static void disable_sync_clock(void *dummy)
342{
343 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
344 /*
345 * Clear the in-sync bit 2^31. All get_sync_clock calls will
346 * fail until the sync bit is turned back on. In addition
347 * increase the "sequence" counter to avoid the race of an
348 * etr event and the complete recovery against get_sync_clock.
349 */
350 atomic_clear_mask(0x80000000, sw_ptr);
351 atomic_inc(sw_ptr);
352}
353
354/*
355 * Make get_sync_clock return 0 again.
356 * Needs to be called from a context disabled for preemption.
357 */
358static void enable_sync_clock(void)
359{
360 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
361 atomic_set_mask(0x80000000, sw_ptr);
362}
363
364/*
248 * External Time Reference (ETR) code. 365 * External Time Reference (ETR) code.
249 */ 366 */
250static int etr_port0_online; 367static int etr_port0_online;
251static int etr_port1_online; 368static int etr_port1_online;
369static int etr_steai_available;
252 370
253static int __init early_parse_etr(char *p) 371static int __init early_parse_etr(char *p)
254{ 372{
@@ -273,12 +391,6 @@ enum etr_event {
273 ETR_EVENT_UPDATE, 391 ETR_EVENT_UPDATE,
274}; 392};
275 393
276enum etr_flags {
277 ETR_FLAG_ENOSYS,
278 ETR_FLAG_EACCES,
279 ETR_FLAG_STEAI,
280};
281
282/* 394/*
283 * Valid bit combinations of the eacr register are (x = don't care): 395 * Valid bit combinations of the eacr register are (x = don't care):
284 * e0 e1 dp p0 p1 ea es sl 396 * e0 e1 dp p0 p1 ea es sl
@@ -305,74 +417,18 @@ enum etr_flags {
305 */ 417 */
306static struct etr_eacr etr_eacr; 418static struct etr_eacr etr_eacr;
307static u64 etr_tolec; /* time of last eacr update */ 419static u64 etr_tolec; /* time of last eacr update */
308static unsigned long etr_flags;
309static struct etr_aib etr_port0; 420static struct etr_aib etr_port0;
310static int etr_port0_uptodate; 421static int etr_port0_uptodate;
311static struct etr_aib etr_port1; 422static struct etr_aib etr_port1;
312static int etr_port1_uptodate; 423static int etr_port1_uptodate;
313static unsigned long etr_events; 424static unsigned long etr_events;
314static struct timer_list etr_timer; 425static struct timer_list etr_timer;
315static DEFINE_PER_CPU(atomic_t, etr_sync_word);
316 426
317static void etr_timeout(unsigned long dummy); 427static void etr_timeout(unsigned long dummy);
318static void etr_work_fn(struct work_struct *work); 428static void etr_work_fn(struct work_struct *work);
319static DECLARE_WORK(etr_work, etr_work_fn); 429static DECLARE_WORK(etr_work, etr_work_fn);
320 430
321/* 431/*
322 * The etr get_clock function. It will write the current clock value
323 * to the clock pointer and return 0 if the clock is in sync with the
324 * external time source. If the clock mode is local it will return
325 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
326 * reference. This function is what ETR is all about..
327 */
328int get_sync_clock(unsigned long long *clock)
329{
330 atomic_t *sw_ptr;
331 unsigned int sw0, sw1;
332
333 sw_ptr = &get_cpu_var(etr_sync_word);
334 sw0 = atomic_read(sw_ptr);
335 *clock = get_clock();
336 sw1 = atomic_read(sw_ptr);
337 put_cpu_var(etr_sync_sync);
338 if (sw0 == sw1 && (sw0 & 0x80000000U))
339 /* Success: time is in sync. */
340 return 0;
341 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
342 return -ENOSYS;
343 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
344 return -EACCES;
345 return -EAGAIN;
346}
347EXPORT_SYMBOL(get_sync_clock);
348
349/*
350 * Make get_sync_clock return -EAGAIN.
351 */
352static void etr_disable_sync_clock(void *dummy)
353{
354 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
355 /*
356 * Clear the in-sync bit 2^31. All get_sync_clock calls will
357 * fail until the sync bit is turned back on. In addition
358 * increase the "sequence" counter to avoid the race of an
359 * etr event and the complete recovery against get_sync_clock.
360 */
361 atomic_clear_mask(0x80000000, sw_ptr);
362 atomic_inc(sw_ptr);
363}
364
365/*
366 * Make get_sync_clock return 0 again.
367 * Needs to be called from a context disabled for preemption.
368 */
369static void etr_enable_sync_clock(void)
370{
371 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
372 atomic_set_mask(0x80000000, sw_ptr);
373}
374
375/*
376 * Reset ETR attachment. 432 * Reset ETR attachment.
377 */ 433 */
378static void etr_reset(void) 434static void etr_reset(void)
@@ -381,15 +437,13 @@ static void etr_reset(void)
381 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, 437 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
382 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, 438 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
383 .es = 0, .sl = 0 }; 439 .es = 0, .sl = 0 };
384 if (etr_setr(&etr_eacr) == 0) 440 if (etr_setr(&etr_eacr) == 0) {
385 etr_tolec = get_clock(); 441 etr_tolec = get_clock();
386 else { 442 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
387 set_bit(ETR_FLAG_ENOSYS, &etr_flags); 443 } else if (etr_port0_online || etr_port1_online) {
388 if (etr_port0_online || etr_port1_online) { 444 printk(KERN_WARNING "Running on non ETR capable "
389 printk(KERN_WARNING "Running on non ETR capable " 445 "machine, only local mode available.\n");
390 "machine, only local mode available.\n"); 446 etr_port0_online = etr_port1_online = 0;
391 etr_port0_online = etr_port1_online = 0;
392 }
393 } 447 }
394} 448}
395 449
@@ -397,14 +451,12 @@ static int __init etr_init(void)
397{ 451{
398 struct etr_aib aib; 452 struct etr_aib aib;
399 453
400 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 454 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
401 return 0; 455 return 0;
402 /* Check if this machine has the steai instruction. */ 456 /* Check if this machine has the steai instruction. */
403 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 457 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
404 set_bit(ETR_FLAG_STEAI, &etr_flags); 458 etr_steai_available = 1;
405 setup_timer(&etr_timer, etr_timeout, 0UL); 459 setup_timer(&etr_timer, etr_timeout, 0UL);
406 if (!etr_port0_online && !etr_port1_online)
407 set_bit(ETR_FLAG_EACCES, &etr_flags);
408 if (etr_port0_online) { 460 if (etr_port0_online) {
409 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 461 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
410 schedule_work(&etr_work); 462 schedule_work(&etr_work);
@@ -435,7 +487,8 @@ void etr_switch_to_local(void)
435{ 487{
436 if (!etr_eacr.sl) 488 if (!etr_eacr.sl)
437 return; 489 return;
438 etr_disable_sync_clock(NULL); 490 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
491 disable_sync_clock(NULL);
439 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 492 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
440 schedule_work(&etr_work); 493 schedule_work(&etr_work);
441} 494}
@@ -450,23 +503,21 @@ void etr_sync_check(void)
450{ 503{
451 if (!etr_eacr.es) 504 if (!etr_eacr.es)
452 return; 505 return;
453 etr_disable_sync_clock(NULL); 506 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
507 disable_sync_clock(NULL);
454 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 508 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
455 schedule_work(&etr_work); 509 schedule_work(&etr_work);
456} 510}
457 511
458/* 512/*
459 * ETR external interrupt. There are two causes: 513 * ETR timing alert. There are two causes:
460 * 1) port state change, check the usability of the port 514 * 1) port state change, check the usability of the port
461 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the 515 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
462 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) 516 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
463 * or ETR-data word 4 (edf4) has changed. 517 * or ETR-data word 4 (edf4) has changed.
464 */ 518 */
465static void etr_ext_handler(__u16 code) 519static void etr_timing_alert(struct etr_irq_parm *intparm)
466{ 520{
467 struct etr_interruption_parameter *intparm =
468 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
469
470 if (intparm->pc0) 521 if (intparm->pc0)
471 /* ETR port 0 state change. */ 522 /* ETR port 0 state change. */
472 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 523 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
@@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
591 return 1; 642 return 1;
592} 643}
593 644
594/* 645struct clock_sync_data {
595 * The time is "clock". old is what we think the time is.
596 * Adjust the value by a multiple of jiffies and add the delta to ntp.
597 * "delay" is an approximation how long the synchronization took. If
598 * the time correction is positive, then "delay" is subtracted from
599 * the time difference and only the remaining part is passed to ntp.
600 */
601static unsigned long long etr_adjust_time(unsigned long long old,
602 unsigned long long clock,
603 unsigned long long delay)
604{
605 unsigned long long delta, ticks;
606 struct timex adjust;
607
608 if (clock > old) {
609 /* It is later than we thought. */
610 delta = ticks = clock - old;
611 delta = ticks = (delta < delay) ? 0 : delta - delay;
612 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
613 adjust.offset = ticks * (1000000 / HZ);
614 } else {
615 /* It is earlier than we thought. */
616 delta = ticks = old - clock;
617 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
618 delta = -delta;
619 adjust.offset = -ticks * (1000000 / HZ);
620 }
621 jiffies_timer_cc += delta;
622 if (adjust.offset != 0) {
623 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
624 adjust.offset);
625 adjust.modes = ADJ_OFFSET_SINGLESHOT;
626 do_adjtimex(&adjust);
627 }
628 return delta;
629}
630
631static struct {
632 int in_sync; 646 int in_sync;
633 unsigned long long fixup_cc; 647 unsigned long long fixup_cc;
634} etr_sync; 648};
635 649
636static void etr_sync_cpu_start(void *dummy) 650static void clock_sync_cpu_start(void *dummy)
637{ 651{
638 etr_enable_sync_clock(); 652 struct clock_sync_data *sync = dummy;
653
654 enable_sync_clock();
639 /* 655 /*
640 * This looks like a busy wait loop but it isn't. etr_sync_cpus 656 * This looks like a busy wait loop but it isn't. etr_sync_cpus
641 * is called on all other cpus while the TOD clocks is stopped. 657 * is called on all other cpus while the TOD clocks is stopped.
642 * __udelay will stop the cpu on an enabled wait psw until the 658 * __udelay will stop the cpu on an enabled wait psw until the
643 * TOD is running again. 659 * TOD is running again.
644 */ 660 */
645 while (etr_sync.in_sync == 0) { 661 while (sync->in_sync == 0) {
646 __udelay(1); 662 __udelay(1);
647 /* 663 /*
648 * A different cpu changes *in_sync. Therefore use 664 * A different cpu changes *in_sync. Therefore use
@@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy)
650 */ 666 */
651 barrier(); 667 barrier();
652 } 668 }
653 if (etr_sync.in_sync != 1) 669 if (sync->in_sync != 1)
654 /* Didn't work. Clear per-cpu in sync bit again. */ 670 /* Didn't work. Clear per-cpu in sync bit again. */
655 etr_disable_sync_clock(NULL); 671 disable_sync_clock(NULL);
656 /* 672 /*
657 * This round of TOD syncing is done. Set the clock comparator 673 * This round of TOD syncing is done. Set the clock comparator
658 * to the next tick and let the processor continue. 674 * to the next tick and let the processor continue.
659 */ 675 */
660 fixup_clock_comparator(etr_sync.fixup_cc); 676 fixup_clock_comparator(sync->fixup_cc);
661} 677}
662 678
663static void etr_sync_cpu_end(void *dummy) 679static void clock_sync_cpu_end(void *dummy)
664{ 680{
665} 681}
666 682
@@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy)
672static int etr_sync_clock(struct etr_aib *aib, int port) 688static int etr_sync_clock(struct etr_aib *aib, int port)
673{ 689{
674 struct etr_aib *sync_port; 690 struct etr_aib *sync_port;
691 struct clock_sync_data etr_sync;
675 unsigned long long clock, old_clock, delay, delta; 692 unsigned long long clock, old_clock, delay, delta;
676 int follows; 693 int follows;
677 int rc; 694 int rc;
@@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
690 */ 707 */
691 memset(&etr_sync, 0, sizeof(etr_sync)); 708 memset(&etr_sync, 0, sizeof(etr_sync));
692 preempt_disable(); 709 preempt_disable();
693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0); 710 smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0);
694 local_irq_disable(); 711 local_irq_disable();
695 etr_enable_sync_clock(); 712 enable_sync_clock();
696 713
697 /* Set clock to next OTE. */ 714 /* Set clock to next OTE. */
698 __ctl_set_bit(14, 21); 715 __ctl_set_bit(14, 21);
@@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
707 /* Adjust Linux timing variables. */ 724 /* Adjust Linux timing variables. */
708 delay = (unsigned long long) 725 delay = (unsigned long long)
709 (aib->edf2.etv - sync_port->edf2.etv) << 32; 726 (aib->edf2.etv - sync_port->edf2.etv) << 32;
710 delta = etr_adjust_time(old_clock, clock, delay); 727 delta = adjust_time(old_clock, clock, delay);
711 etr_sync.fixup_cc = delta; 728 etr_sync.fixup_cc = delta;
712 fixup_clock_comparator(delta); 729 fixup_clock_comparator(delta);
713 /* Verify that the clock is properly set. */ 730 /* Verify that the clock is properly set. */
714 if (!etr_aib_follows(sync_port, aib, port)) { 731 if (!etr_aib_follows(sync_port, aib, port)) {
715 /* Didn't work. */ 732 /* Didn't work. */
716 etr_disable_sync_clock(NULL); 733 disable_sync_clock(NULL);
717 etr_sync.in_sync = -EAGAIN; 734 etr_sync.in_sync = -EAGAIN;
718 rc = -EAGAIN; 735 rc = -EAGAIN;
719 } else { 736 } else {
@@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
724 /* Could not set the clock ?!? */ 741 /* Could not set the clock ?!? */
725 __ctl_clear_bit(0, 29); 742 __ctl_clear_bit(0, 29);
726 __ctl_clear_bit(14, 21); 743 __ctl_clear_bit(14, 21);
727 etr_disable_sync_clock(NULL); 744 disable_sync_clock(NULL);
728 etr_sync.in_sync = -EAGAIN; 745 etr_sync.in_sync = -EAGAIN;
729 rc = -EAGAIN; 746 rc = -EAGAIN;
730 } 747 }
731 local_irq_enable(); 748 local_irq_enable();
732 smp_call_function(etr_sync_cpu_end,NULL,0,0); 749 smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
733 preempt_enable(); 750 preempt_enable();
734 return rc; 751 return rc;
735} 752}
@@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
832 * Do not try to get the alternate port aib if the clock 849 * Do not try to get the alternate port aib if the clock
833 * is not in sync yet. 850 * is not in sync yet.
834 */ 851 */
835 if (!eacr.es) 852 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es)
836 return eacr; 853 return eacr;
837 854
838 /* 855 /*
@@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
840 * the other port immediately. If only stetr is available the 857 * the other port immediately. If only stetr is available the
841 * data-port bit toggle has to be used. 858 * data-port bit toggle has to be used.
842 */ 859 */
843 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { 860 if (etr_steai_available) {
844 if (eacr.p0 && !etr_port0_uptodate) { 861 if (eacr.p0 && !etr_port0_uptodate) {
845 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); 862 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
846 etr_port0_uptodate = 1; 863 etr_port0_uptodate = 1;
@@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work)
909 if (!eacr.ea) { 926 if (!eacr.ea) {
910 /* Both ports offline. Reset everything. */ 927 /* Both ports offline. Reset everything. */
911 eacr.dp = eacr.es = eacr.sl = 0; 928 eacr.dp = eacr.es = eacr.sl = 0;
912 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); 929 on_each_cpu(disable_sync_clock, NULL, 0, 1);
913 del_timer_sync(&etr_timer); 930 del_timer_sync(&etr_timer);
914 etr_update_eacr(eacr); 931 etr_update_eacr(eacr);
915 set_bit(ETR_FLAG_EACCES, &etr_flags); 932 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
916 return; 933 return;
917 } 934 }
918 935
@@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work)
953 eacr.e1 = 1; 970 eacr.e1 = 1;
954 sync_port = (etr_port0_uptodate && 971 sync_port = (etr_port0_uptodate &&
955 etr_port_valid(&etr_port0, 0)) ? 0 : -1; 972 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
956 clear_bit(ETR_FLAG_EACCES, &etr_flags);
957 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { 973 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
958 eacr.sl = 0; 974 eacr.sl = 0;
959 eacr.e0 = 0; 975 eacr.e0 = 0;
@@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work)
962 eacr.es = 0; 978 eacr.es = 0;
963 sync_port = (etr_port1_uptodate && 979 sync_port = (etr_port1_uptodate &&
964 etr_port_valid(&etr_port1, 1)) ? 1 : -1; 980 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
965 clear_bit(ETR_FLAG_EACCES, &etr_flags);
966 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { 981 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
967 eacr.sl = 1; 982 eacr.sl = 1;
968 eacr.e0 = 1; 983 eacr.e0 = 1;
@@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work)
976 eacr.e1 = 1; 991 eacr.e1 = 1;
977 sync_port = (etr_port0_uptodate && 992 sync_port = (etr_port0_uptodate &&
978 etr_port_valid(&etr_port0, 0)) ? 0 : -1; 993 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
979 clear_bit(ETR_FLAG_EACCES, &etr_flags);
980 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { 994 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
981 eacr.sl = 1; 995 eacr.sl = 1;
982 eacr.e0 = 0; 996 eacr.e0 = 0;
@@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work)
985 eacr.es = 0; 999 eacr.es = 0;
986 sync_port = (etr_port1_uptodate && 1000 sync_port = (etr_port1_uptodate &&
987 etr_port_valid(&etr_port1, 1)) ? 1 : -1; 1001 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
988 clear_bit(ETR_FLAG_EACCES, &etr_flags);
989 } else { 1002 } else {
990 /* Both ports not usable. */ 1003 /* Both ports not usable. */
991 eacr.es = eacr.sl = 0; 1004 eacr.es = eacr.sl = 0;
992 sync_port = -1; 1005 sync_port = -1;
993 set_bit(ETR_FLAG_EACCES, &etr_flags); 1006 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
994 } 1007 }
995 1008
1009 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1010 eacr.es = 0;
1011
996 /* 1012 /*
997 * If the clock is in sync just update the eacr and return. 1013 * If the clock is in sync just update the eacr and return.
998 * If there is no valid sync port wait for a port update. 1014 * If there is no valid sync port wait for a port update.
999 */ 1015 */
1000 if (eacr.es || sync_port < 0) { 1016 if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) ||
1017 eacr.es || sync_port < 0) {
1001 etr_update_eacr(eacr); 1018 etr_update_eacr(eacr);
1002 etr_set_tolec_timeout(now); 1019 etr_set_tolec_timeout(now);
1003 return; 1020 return;
@@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work)
1018 * and set up a timer to try again after 0.5 seconds 1035 * and set up a timer to try again after 0.5 seconds
1019 */ 1036 */
1020 etr_update_eacr(eacr); 1037 etr_update_eacr(eacr);
1038 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1021 if (now < etr_tolec + (1600000 << 12) || 1039 if (now < etr_tolec + (1600000 << 12) ||
1022 etr_sync_clock(&aib, sync_port) != 0) { 1040 etr_sync_clock(&aib, sync_port) != 0) {
1023 /* Sync failed. Try again in 1/2 second. */ 1041 /* Sync failed. Try again in 1/2 second. */
1024 eacr.es = 0; 1042 eacr.es = 0;
1025 etr_update_eacr(eacr); 1043 etr_update_eacr(eacr);
1044 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1026 etr_set_sync_timeout(); 1045 etr_set_sync_timeout();
1027 } else 1046 } else
1028 etr_set_tolec_timeout(now); 1047 etr_set_tolec_timeout(now);
@@ -1097,8 +1116,8 @@ static ssize_t etr_online_store(struct sys_device *dev,
1097 value = simple_strtoul(buf, NULL, 0); 1116 value = simple_strtoul(buf, NULL, 0);
1098 if (value != 0 && value != 1) 1117 if (value != 0 && value != 1)
1099 return -EINVAL; 1118 return -EINVAL;
1100 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 1119 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
1101 return -ENOSYS; 1120 return -EOPNOTSUPP;
1102 if (dev == &etr_port0_dev) { 1121 if (dev == &etr_port0_dev) {
1103 if (etr_port0_online == value) 1122 if (etr_port0_online == value)
1104 return count; /* Nothing to do. */ 1123 return count; /* Nothing to do. */
@@ -1292,3 +1311,318 @@ out:
1292} 1311}
1293 1312
1294device_initcall(etr_init_sysfs); 1313device_initcall(etr_init_sysfs);
1314
1315/*
1316 * Server Time Protocol (STP) code.
1317 */
1318static int stp_online;
1319static struct stp_sstpi stp_info;
1320static void *stp_page;
1321
1322static void stp_work_fn(struct work_struct *work);
1323static DECLARE_WORK(stp_work, stp_work_fn);
1324
1325static int __init early_parse_stp(char *p)
1326{
1327 if (strncmp(p, "off", 3) == 0)
1328 stp_online = 0;
1329 else if (strncmp(p, "on", 2) == 0)
1330 stp_online = 1;
1331 return 0;
1332}
1333early_param("stp", early_parse_stp);
1334
1335/*
1336 * Reset STP attachment.
1337 */
1338static void stp_reset(void)
1339{
1340 int rc;
1341
1342 stp_page = alloc_bootmem_pages(PAGE_SIZE);
1343 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1344 if (rc == 1)
1345 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
1346 else if (stp_online) {
1347 printk(KERN_WARNING "Running on non STP capable machine.\n");
1348 free_bootmem((unsigned long) stp_page, PAGE_SIZE);
1349 stp_page = NULL;
1350 stp_online = 0;
1351 }
1352}
1353
1354static int __init stp_init(void)
1355{
1356 if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
1357 schedule_work(&stp_work);
1358 return 0;
1359}
1360
1361arch_initcall(stp_init);
1362
1363/*
1364 * STP timing alert. There are three causes:
1365 * 1) timing status change
1366 * 2) link availability change
1367 * 3) time control parameter change
1368 * In all three cases we are only interested in the clock source state.
1369 * If a STP clock source is now available use it.
1370 */
1371static void stp_timing_alert(struct stp_irq_parm *intparm)
1372{
1373 if (intparm->tsc || intparm->lac || intparm->tcpc)
1374 schedule_work(&stp_work);
1375}
1376
1377/*
1378 * STP sync check machine check. This is called when the timing state
1379 * changes from the synchronized state to the unsynchronized state.
1380 * After a STP sync check the clock is not in sync. The machine check
1381 * is broadcasted to all cpus at the same time.
1382 */
1383void stp_sync_check(void)
1384{
1385 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1386 return;
1387 disable_sync_clock(NULL);
1388 schedule_work(&stp_work);
1389}
1390
1391/*
1392 * STP island condition machine check. This is called when an attached
1393 * server attempts to communicate over an STP link and the servers
1394 * have matching CTN ids and have a valid stratum-1 configuration
1395 * but the configurations do not match.
1396 */
1397void stp_island_check(void)
1398{
1399 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1400 return;
1401 disable_sync_clock(NULL);
1402 schedule_work(&stp_work);
1403}
1404
1405/*
1406 * STP tasklet. Check for the STP state and take over the clock
1407 * synchronization if the STP clock source is usable.
1408 */
1409static void stp_work_fn(struct work_struct *work)
1410{
1411 struct clock_sync_data stp_sync;
1412 unsigned long long old_clock, delta;
1413 int rc;
1414
1415 if (!stp_online) {
1416 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1417 return;
1418 }
1419
1420 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1421 if (rc)
1422 return;
1423
1424 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1425 if (rc || stp_info.c == 0)
1426 return;
1427
1428 /*
1429 * Catch all other cpus and make them wait until we have
1430 * successfully synced the clock. smp_call_function will
1431 * return after all other cpus are in clock_sync_cpu_start.
1432 */
1433 memset(&stp_sync, 0, sizeof(stp_sync));
1434 preempt_disable();
1435 smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0);
1436 local_irq_disable();
1437 enable_sync_clock();
1438
1439 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1440 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1441 schedule_work(&etr_work);
1442
1443 rc = 0;
1444 if (stp_info.todoff[0] || stp_info.todoff[1] ||
1445 stp_info.todoff[2] || stp_info.todoff[3] ||
1446 stp_info.tmd != 2) {
1447 old_clock = get_clock();
1448 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
1449 if (rc == 0) {
1450 delta = adjust_time(old_clock, get_clock(), 0);
1451 fixup_clock_comparator(delta);
1452 rc = chsc_sstpi(stp_page, &stp_info,
1453 sizeof(struct stp_sstpi));
1454 if (rc == 0 && stp_info.tmd != 2)
1455 rc = -EAGAIN;
1456 }
1457 }
1458 if (rc) {
1459 disable_sync_clock(NULL);
1460 stp_sync.in_sync = -EAGAIN;
1461 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1462 if (etr_port0_online || etr_port1_online)
1463 schedule_work(&etr_work);
1464 } else
1465 stp_sync.in_sync = 1;
1466
1467 local_irq_enable();
1468 smp_call_function(clock_sync_cpu_end, NULL, 0, 0);
1469 preempt_enable();
1470}
1471
1472/*
1473 * STP class sysfs interface functions
1474 */
1475static struct sysdev_class stp_sysclass = {
1476 .name = "stp",
1477};
1478
1479static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
1480{
1481 if (!stp_online)
1482 return -ENODATA;
1483 return sprintf(buf, "%016llx\n",
1484 *(unsigned long long *) stp_info.ctnid);
1485}
1486
1487static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1488
1489static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
1490{
1491 if (!stp_online)
1492 return -ENODATA;
1493 return sprintf(buf, "%i\n", stp_info.ctn);
1494}
1495
1496static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1497
1498static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
1499{
1500 if (!stp_online || !(stp_info.vbits & 0x2000))
1501 return -ENODATA;
1502 return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
1503}
1504
1505static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1506
1507static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
1508{
1509 if (!stp_online || !(stp_info.vbits & 0x8000))
1510 return -ENODATA;
1511 return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
1512}
1513
1514static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1515
1516static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
1517{
1518 if (!stp_online)
1519 return -ENODATA;
1520 return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
1521}
1522
1523static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1524
1525static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
1526{
1527 if (!stp_online || !(stp_info.vbits & 0x0800))
1528 return -ENODATA;
1529 return sprintf(buf, "%i\n", (int) stp_info.tto);
1530}
1531
1532static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1533
1534static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
1535{
1536 if (!stp_online || !(stp_info.vbits & 0x4000))
1537 return -ENODATA;
1538 return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
1539}
1540
1541static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1542 stp_time_zone_offset_show, NULL);
1543
1544static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
1545{
1546 if (!stp_online)
1547 return -ENODATA;
1548 return sprintf(buf, "%i\n", stp_info.tmd);
1549}
1550
1551static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1552
1553static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
1554{
1555 if (!stp_online)
1556 return -ENODATA;
1557 return sprintf(buf, "%i\n", stp_info.tst);
1558}
1559
1560static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1561
1562static ssize_t stp_online_show(struct sysdev_class *class, char *buf)
1563{
1564 return sprintf(buf, "%i\n", stp_online);
1565}
1566
1567static ssize_t stp_online_store(struct sysdev_class *class,
1568 const char *buf, size_t count)
1569{
1570 unsigned int value;
1571
1572 value = simple_strtoul(buf, NULL, 0);
1573 if (value != 0 && value != 1)
1574 return -EINVAL;
1575 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1576 return -EOPNOTSUPP;
1577 stp_online = value;
1578 schedule_work(&stp_work);
1579 return count;
1580}
1581
1582/*
1583 * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
1584 * stp/online but attr_online already exists in this file ..
1585 */
1586static struct sysdev_class_attribute attr_stp_online = {
1587 .attr = { .name = "online", .mode = 0600 },
1588 .show = stp_online_show,
1589 .store = stp_online_store,
1590};
1591
1592static struct sysdev_class_attribute *stp_attributes[] = {
1593 &attr_ctn_id,
1594 &attr_ctn_type,
1595 &attr_dst_offset,
1596 &attr_leap_seconds,
1597 &attr_stp_online,
1598 &attr_stratum,
1599 &attr_time_offset,
1600 &attr_time_zone_offset,
1601 &attr_timing_mode,
1602 &attr_timing_state,
1603 NULL
1604};
1605
1606static int __init stp_init_sysfs(void)
1607{
1608 struct sysdev_class_attribute **attr;
1609 int rc;
1610
1611 rc = sysdev_class_register(&stp_sysclass);
1612 if (rc)
1613 goto out;
1614 for (attr = stp_attributes; *attr; attr++) {
1615 rc = sysdev_class_create_file(&stp_sysclass, *attr);
1616 if (rc)
1617 goto out_unreg;
1618 }
1619 return 0;
1620out_unreg:
1621 for (; attr >= stp_attributes; attr--)
1622 sysdev_class_remove_file(&stp_sysclass, *attr);
1623 sysdev_class_unregister(&stp_sysclass);
1624out:
1625 return rc;
1626}
1627
1628device_initcall(stp_init_sysfs);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 661a07217057..212d618b0095 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void)
313 machine_has_topology_irq = 1; 313 machine_has_topology_irq = 1;
314 314
315 tl_info = alloc_bootmem_pages(PAGE_SIZE); 315 tl_info = alloc_bootmem_pages(PAGE_SIZE);
316 if (!tl_info)
317 goto error;
318 info = tl_info; 316 info = tl_info;
319 stsi(info, 15, 1, 2); 317 stsi(info, 15, 1, 2);
320 318
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ca90ee3f930e..0fa5dc5d68e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires)
136} 136}
137#endif 137#endif
138 138
139static void start_cpu_timer(void) 139void vtime_start_cpu_timer(void)
140{ 140{
141 struct vtimer_queue *vt_list; 141 struct vtimer_queue *vt_list;
142 142
@@ -150,7 +150,7 @@ static void start_cpu_timer(void)
150 set_vtimer(vt_list->idle); 150 set_vtimer(vt_list->idle);
151} 151}
152 152
153static void stop_cpu_timer(void) 153void vtime_stop_cpu_timer(void)
154{ 154{
155 struct vtimer_queue *vt_list; 155 struct vtimer_queue *vt_list;
156 156
@@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
319 spin_lock_irqsave(&vt_list->lock, flags); 319 spin_lock_irqsave(&vt_list->lock, flags);
320 320
321 if (timer->cpu != smp_processor_id()) 321 BUG_ON(timer->cpu != smp_processor_id());
322 printk("internal_add_vtimer: BUG, running on wrong CPU");
323 322
324 /* if list is empty we only have to set the timer */ 323 /* if list is empty we only have to set the timer */
325 if (list_empty(&vt_list->list)) { 324 if (list_empty(&vt_list->list)) {
@@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer)
353 put_cpu(); 352 put_cpu();
354} 353}
355 354
356static inline int prepare_vtimer(struct vtimer_list *timer) 355static inline void prepare_vtimer(struct vtimer_list *timer)
357{ 356{
358 if (!timer->function) { 357 BUG_ON(!timer->function);
359 printk("add_virt_timer: uninitialized timer\n"); 358 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
360 return -EINVAL; 359 BUG_ON(vtimer_pending(timer));
361 }
362
363 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
364 printk("add_virt_timer: invalid timer expire value!\n");
365 return -EINVAL;
366 }
367
368 if (vtimer_pending(timer)) {
369 printk("add_virt_timer: timer pending\n");
370 return -EBUSY;
371 }
372
373 timer->cpu = get_cpu(); 360 timer->cpu = get_cpu();
374 return 0;
375} 361}
376 362
377/* 363/*
@@ -382,10 +368,7 @@ void add_virt_timer(void *new)
382 struct vtimer_list *timer; 368 struct vtimer_list *timer;
383 369
384 timer = (struct vtimer_list *)new; 370 timer = (struct vtimer_list *)new;
385 371 prepare_vtimer(timer);
386 if (prepare_vtimer(timer) < 0)
387 return;
388
389 timer->interval = 0; 372 timer->interval = 0;
390 internal_add_vtimer(timer); 373 internal_add_vtimer(timer);
391} 374}
@@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new)
399 struct vtimer_list *timer; 382 struct vtimer_list *timer;
400 383
401 timer = (struct vtimer_list *)new; 384 timer = (struct vtimer_list *)new;
402 385 prepare_vtimer(timer);
403 if (prepare_vtimer(timer) < 0)
404 return;
405
406 timer->interval = timer->expires; 386 timer->interval = timer->expires;
407 internal_add_vtimer(timer); 387 internal_add_vtimer(timer);
408} 388}
@@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
423 unsigned long flags; 403 unsigned long flags;
424 int cpu; 404 int cpu;
425 405
426 if (!timer->function) { 406 BUG_ON(!timer->function);
427 printk("mod_virt_timer: uninitialized timer\n"); 407 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
428 return -EINVAL;
429 }
430
431 if (!expires || expires > VTIMER_MAX_SLICE) {
432 printk("mod_virt_timer: invalid expire range\n");
433 return -EINVAL;
434 }
435 408
436 /* 409 /*
437 * This is a common optimization triggered by the 410 * This is a common optimization triggered by the
@@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
444 cpu = get_cpu(); 417 cpu = get_cpu();
445 vt_list = &per_cpu(virt_cpu_timer, cpu); 418 vt_list = &per_cpu(virt_cpu_timer, cpu);
446 419
420 /* check if we run on the right CPU */
421 BUG_ON(timer->cpu != cpu);
422
447 /* disable interrupts before test if timer is pending */ 423 /* disable interrupts before test if timer is pending */
448 spin_lock_irqsave(&vt_list->lock, flags); 424 spin_lock_irqsave(&vt_list->lock, flags);
449 425
@@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
458 return 0; 434 return 0;
459 } 435 }
460 436
461 /* check if we run on the right CPU */
462 if (timer->cpu != cpu) {
463 printk("mod_virt_timer: running on wrong CPU, check your code\n");
464 spin_unlock_irqrestore(&vt_list->lock, flags);
465 put_cpu();
466 return -EINVAL;
467 }
468
469 list_del_init(&timer->entry); 437 list_del_init(&timer->entry);
470 timer->expires = expires; 438 timer->expires = expires;
471 439
@@ -536,24 +504,6 @@ void init_cpu_vtimer(void)
536 504
537} 505}
538 506
539static int vtimer_idle_notify(struct notifier_block *self,
540 unsigned long action, void *hcpu)
541{
542 switch (action) {
543 case S390_CPU_IDLE:
544 stop_cpu_timer();
545 break;
546 case S390_CPU_NOT_IDLE:
547 start_cpu_timer();
548 break;
549 }
550 return NOTIFY_OK;
551}
552
553static struct notifier_block vtimer_idle_nb = {
554 .notifier_call = vtimer_idle_notify,
555};
556
557void __init vtime_init(void) 507void __init vtime_init(void)
558{ 508{
559 /* request the cpu timer external interrupt */ 509 /* request the cpu timer external interrupt */
@@ -561,9 +511,6 @@ void __init vtime_init(void)
561 &ext_int_info_timer) != 0) 511 &ext_int_info_timer) != 0)
562 panic("Couldn't request external interrupt 0x1005"); 512 panic("Couldn't request external interrupt 0x1005");
563 513
564 if (register_idle_notifier(&vtimer_idle_nb))
565 panic("Couldn't register idle notifier");
566
567 /* Enable cpu timer interrupts on the boot cpu. */ 514 /* Enable cpu timer interrupts on the boot cpu. */
568 init_cpu_vtimer(); 515 init_cpu_vtimer();
569} 516}