aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-07-25 10:40:14 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-07-25 10:40:14 -0400
commitff877ea80efa2015b6263766f78ee42c2a1b32f9 (patch)
tree85205005c611ab774702148558321c6fb92f1ccd /arch/s390
parent30821fee4f0cb3e6d241d9f7ddc37742212e3eb7 (diff)
parentd37e6bf68fc1eb34a4ad21d9ae8890ed37ea80e7 (diff)
Merge branch 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig23
-rw-r--r--arch/s390/appldata/appldata.h10
-rw-r--r--arch/s390/appldata/appldata_base.c45
-rw-r--r--arch/s390/appldata/appldata_mem.c43
-rw-r--r--arch/s390/appldata/appldata_net_sum.c39
-rw-r--r--arch/s390/appldata/appldata_os.c57
-rw-r--r--arch/s390/crypto/crypt_s390.h4
-rw-r--r--arch/s390/crypto/prng.c7
-rw-r--r--arch/s390/hypfs/inode.c29
-rw-r--r--arch/s390/kernel/Makefile9
-rw-r--r--arch/s390/kernel/binfmt_elf32.c214
-rw-r--r--arch/s390/kernel/compat_ptrace.h4
-rw-r--r--arch/s390/kernel/debug.c9
-rw-r--r--arch/s390/kernel/early.c211
-rw-r--r--arch/s390/kernel/ipl.c462
-rw-r--r--arch/s390/kernel/kprobes.c4
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/mem_detect.c100
-rw-r--r--arch/s390/kernel/process.c32
-rw-r--r--arch/s390/kernel/ptrace.c363
-rw-r--r--arch/s390/kernel/setup.c51
-rw-r--r--arch/s390/kernel/smp.c58
-rw-r--r--arch/s390/kernel/stacktrace.c3
-rw-r--r--arch/s390/kernel/time.c669
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kernel/vtime.c81
-rw-r--r--arch/s390/kvm/interrupt.c32
-rw-r--r--arch/s390/kvm/kvm-s390.c21
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c20
-rw-r--r--arch/s390/mm/init.c19
32 files changed, 1584 insertions, 1041 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 107e492cb47e..eb530b4128ba 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -146,6 +146,7 @@ config MATHEMU
146config COMPAT 146config COMPAT
147 bool "Kernel support for 31 bit emulation" 147 bool "Kernel support for 31 bit emulation"
148 depends on 64BIT 148 depends on 64BIT
149 select COMPAT_BINFMT_ELF
149 help 150 help
150 Select this option if you want to enable your system kernel to 151 Select this option if you want to enable your system kernel to
151 handle system-calls from ELF binaries for 31 bit ESA. This option 152 handle system-calls from ELF binaries for 31 bit ESA. This option
@@ -288,7 +289,7 @@ config WARN_STACK_SIZE
288 int "Maximum frame size considered safe (128-2048)" 289 int "Maximum frame size considered safe (128-2048)"
289 range 128 2048 290 range 128 2048
290 depends on WARN_STACK 291 depends on WARN_STACK
291 default "256" 292 default "2048"
292 help 293 help
293 This allows you to specify the maximum frame size a function may 294 This allows you to specify the maximum frame size a function may
294 have without the compiler complaining about it. 295 have without the compiler complaining about it.
@@ -312,6 +313,10 @@ config ARCH_SPARSEMEM_DEFAULT
312config ARCH_SELECT_MEMORY_MODEL 313config ARCH_SELECT_MEMORY_MODEL
313 def_bool y 314 def_bool y
314 315
316config ARCH_ENABLE_MEMORY_HOTPLUG
317 def_bool y
318 depends on SPARSEMEM
319
315source "mm/Kconfig" 320source "mm/Kconfig"
316 321
317comment "I/O subsystem configuration" 322comment "I/O subsystem configuration"
@@ -344,6 +349,22 @@ config QDIO_DEBUG
344 349
345 If unsure, say N. 350 If unsure, say N.
346 351
352config CHSC_SCH
353 tristate "Support for CHSC subchannels"
354 help
355 This driver allows usage of CHSC subchannels. A CHSC subchannel
356 is usually present on LPAR only.
357 The driver creates a device /dev/chsc, which may be used to
358 obtain I/O configuration information about the machine and
359 to issue asynchronous chsc commands (DANGEROUS).
360 You will usually only want to use this interface on a special
361 LPAR designated for system management.
362
363 To compile this driver as a module, choose M here: the
364 module will be called chsc_sch.
365
366 If unsure, say N.
367
347comment "Misc" 368comment "Misc"
348 369
349config IPL 370config IPL
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index db3ae8505103..17a2636fec0a 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -3,13 +3,11 @@
3 * 3 *
4 * Definitions and interface for Linux - z/VM Monitor Stream. 4 * Definitions and interface for Linux - z/VM Monitor Stream.
5 * 5 *
6 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. 6 * Copyright IBM Corp. 2003, 2008
7 * 7 *
8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 8 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
9 */ 9 */
10 10
11//#define APPLDATA_DEBUG /* Debug messages on/off */
12
13#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */ 11#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */
14 /* data buffer */ 12 /* data buffer */
15#define APPLDATA_MAX_PROCS 100 13#define APPLDATA_MAX_PROCS 100
@@ -32,12 +30,6 @@
32#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x) 30#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
33#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x) 31#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
34 32
35#ifdef APPLDATA_DEBUG
36#define P_DEBUG(x...) printk(KERN_DEBUG MY_PRINT_NAME " debug: " x)
37#else
38#define P_DEBUG(x...) do {} while (0)
39#endif
40
41struct appldata_ops { 33struct appldata_ops {
42 struct list_head list; 34 struct list_head list;
43 struct ctl_table_header *sysctl_header; 35 struct ctl_table_header *sysctl_header;
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index ad40729bec3d..a7f8979fb925 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -5,7 +5,7 @@
5 * Exports appldata_register_ops() and appldata_unregister_ops() for the 5 * Exports appldata_register_ops() and appldata_unregister_ops() for the
6 * data gathering modules. 6 * data gathering modules.
7 * 7 *
8 * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH. 8 * Copyright IBM Corp. 2003, 2008
9 * 9 *
10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> 10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
11 */ 11 */
@@ -108,9 +108,6 @@ static LIST_HEAD(appldata_ops_list);
108 */ 108 */
109static void appldata_timer_function(unsigned long data) 109static void appldata_timer_function(unsigned long data)
110{ 110{
111 P_DEBUG(" -= Timer =-\n");
112 P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
113 atomic_read(&appldata_expire_count));
114 if (atomic_dec_and_test(&appldata_expire_count)) { 111 if (atomic_dec_and_test(&appldata_expire_count)) {
115 atomic_set(&appldata_expire_count, num_online_cpus()); 112 atomic_set(&appldata_expire_count, num_online_cpus());
116 queue_work(appldata_wq, (struct work_struct *) data); 113 queue_work(appldata_wq, (struct work_struct *) data);
@@ -128,14 +125,11 @@ static void appldata_work_fn(struct work_struct *work)
128 struct appldata_ops *ops; 125 struct appldata_ops *ops;
129 int i; 126 int i;
130 127
131 P_DEBUG(" -= Work Queue =-\n");
132 i = 0; 128 i = 0;
133 get_online_cpus(); 129 get_online_cpus();
134 spin_lock(&appldata_ops_lock); 130 spin_lock(&appldata_ops_lock);
135 list_for_each(lh, &appldata_ops_list) { 131 list_for_each(lh, &appldata_ops_list) {
136 ops = list_entry(lh, struct appldata_ops, list); 132 ops = list_entry(lh, struct appldata_ops, list);
137 P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
138 ++i, ops->active, ops->name);
139 if (ops->active == 1) { 133 if (ops->active == 1) {
140 ops->callback(ops->data); 134 ops->callback(ops->data);
141 } 135 }
@@ -209,10 +203,9 @@ __appldata_vtimer_setup(int cmd)
209 per_cpu(appldata_timer, i).expires = per_cpu_interval; 203 per_cpu(appldata_timer, i).expires = per_cpu_interval;
210 smp_call_function_single(i, add_virt_timer_periodic, 204 smp_call_function_single(i, add_virt_timer_periodic,
211 &per_cpu(appldata_timer, i), 205 &per_cpu(appldata_timer, i),
212 0, 1); 206 1);
213 } 207 }
214 appldata_timer_active = 1; 208 appldata_timer_active = 1;
215 P_INFO("Monitoring timer started.\n");
216 break; 209 break;
217 case APPLDATA_DEL_TIMER: 210 case APPLDATA_DEL_TIMER:
218 for_each_online_cpu(i) 211 for_each_online_cpu(i)
@@ -221,7 +214,6 @@ __appldata_vtimer_setup(int cmd)
221 break; 214 break;
222 appldata_timer_active = 0; 215 appldata_timer_active = 0;
223 atomic_set(&appldata_expire_count, num_online_cpus()); 216 atomic_set(&appldata_expire_count, num_online_cpus());
224 P_INFO("Monitoring timer stopped.\n");
225 break; 217 break;
226 case APPLDATA_MOD_TIMER: 218 case APPLDATA_MOD_TIMER:
227 per_cpu_interval = (u64) (appldata_interval*1000 / 219 per_cpu_interval = (u64) (appldata_interval*1000 /
@@ -236,7 +228,7 @@ __appldata_vtimer_setup(int cmd)
236 args.timer = &per_cpu(appldata_timer, i); 228 args.timer = &per_cpu(appldata_timer, i);
237 args.expires = per_cpu_interval; 229 args.expires = per_cpu_interval;
238 smp_call_function_single(i, __appldata_mod_vtimer_wrap, 230 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
239 &args, 0, 1); 231 &args, 1);
240 } 232 }
241 } 233 }
242} 234}
@@ -313,10 +305,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
313 } 305 }
314 interval = 0; 306 interval = 0;
315 sscanf(buf, "%i", &interval); 307 sscanf(buf, "%i", &interval);
316 if (interval <= 0) { 308 if (interval <= 0)
317 P_ERROR("Timer CPU interval has to be > 0!\n");
318 return -EINVAL; 309 return -EINVAL;
319 }
320 310
321 get_online_cpus(); 311 get_online_cpus();
322 spin_lock(&appldata_timer_lock); 312 spin_lock(&appldata_timer_lock);
@@ -324,9 +314,6 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
324 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 314 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
325 spin_unlock(&appldata_timer_lock); 315 spin_unlock(&appldata_timer_lock);
326 put_online_cpus(); 316 put_online_cpus();
327
328 P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
329 interval);
330out: 317out:
331 *lenp = len; 318 *lenp = len;
332 *ppos += len; 319 *ppos += len;
@@ -406,23 +393,16 @@ appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
406 P_ERROR("START DIAG 0xDC for %s failed, " 393 P_ERROR("START DIAG 0xDC for %s failed, "
407 "return code: %d\n", ops->name, rc); 394 "return code: %d\n", ops->name, rc);
408 module_put(ops->owner); 395 module_put(ops->owner);
409 } else { 396 } else
410 P_INFO("Monitoring %s data enabled, "
411 "DIAG 0xDC started.\n", ops->name);
412 ops->active = 1; 397 ops->active = 1;
413 }
414 } else if ((buf[0] == '0') && (ops->active == 1)) { 398 } else if ((buf[0] == '0') && (ops->active == 1)) {
415 ops->active = 0; 399 ops->active = 0;
416 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, 400 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
417 (unsigned long) ops->data, ops->size, 401 (unsigned long) ops->data, ops->size,
418 ops->mod_lvl); 402 ops->mod_lvl);
419 if (rc != 0) { 403 if (rc != 0)
420 P_ERROR("STOP DIAG 0xDC for %s failed, " 404 P_ERROR("STOP DIAG 0xDC for %s failed, "
421 "return code: %d\n", ops->name, rc); 405 "return code: %d\n", ops->name, rc);
422 } else {
423 P_INFO("Monitoring %s data disabled, "
424 "DIAG 0xDC stopped.\n", ops->name);
425 }
426 module_put(ops->owner); 406 module_put(ops->owner);
427 } 407 }
428 spin_unlock(&appldata_ops_lock); 408 spin_unlock(&appldata_ops_lock);
@@ -468,7 +448,6 @@ int appldata_register_ops(struct appldata_ops *ops)
468 ops->sysctl_header = register_sysctl_table(ops->ctl_table); 448 ops->sysctl_header = register_sysctl_table(ops->ctl_table);
469 if (!ops->sysctl_header) 449 if (!ops->sysctl_header)
470 goto out; 450 goto out;
471 P_INFO("%s-ops registered!\n", ops->name);
472 return 0; 451 return 0;
473out: 452out:
474 spin_lock(&appldata_ops_lock); 453 spin_lock(&appldata_ops_lock);
@@ -490,7 +469,6 @@ void appldata_unregister_ops(struct appldata_ops *ops)
490 spin_unlock(&appldata_ops_lock); 469 spin_unlock(&appldata_ops_lock);
491 unregister_sysctl_table(ops->sysctl_header); 470 unregister_sysctl_table(ops->sysctl_header);
492 kfree(ops->ctl_table); 471 kfree(ops->ctl_table);
493 P_INFO("%s-ops unregistered!\n", ops->name);
494} 472}
495/********************** module-ops management <END> **************************/ 473/********************** module-ops management <END> **************************/
496 474
@@ -553,14 +531,9 @@ static int __init appldata_init(void)
553{ 531{
554 int i; 532 int i;
555 533
556 P_DEBUG("sizeof(parameter_list) = %lu\n",
557 sizeof(struct appldata_parameter_list));
558
559 appldata_wq = create_singlethread_workqueue("appldata"); 534 appldata_wq = create_singlethread_workqueue("appldata");
560 if (!appldata_wq) { 535 if (!appldata_wq)
561 P_ERROR("Could not create work queue\n");
562 return -ENOMEM; 536 return -ENOMEM;
563 }
564 537
565 get_online_cpus(); 538 get_online_cpus();
566 for_each_online_cpu(i) 539 for_each_online_cpu(i)
@@ -571,8 +544,6 @@ static int __init appldata_init(void)
571 register_hotcpu_notifier(&appldata_nb); 544 register_hotcpu_notifier(&appldata_nb);
572 545
573 appldata_sysctl_header = register_sysctl_table(appldata_dir_table); 546 appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
574
575 P_DEBUG("Base interface initialized.\n");
576 return 0; 547 return 0;
577} 548}
578 549
@@ -584,7 +555,9 @@ EXPORT_SYMBOL_GPL(appldata_register_ops);
584EXPORT_SYMBOL_GPL(appldata_unregister_ops); 555EXPORT_SYMBOL_GPL(appldata_unregister_ops);
585EXPORT_SYMBOL_GPL(appldata_diag); 556EXPORT_SYMBOL_GPL(appldata_diag);
586 557
558#ifdef CONFIG_SWAP
587EXPORT_SYMBOL_GPL(si_swapinfo); 559EXPORT_SYMBOL_GPL(si_swapinfo);
560#endif
588EXPORT_SYMBOL_GPL(nr_threads); 561EXPORT_SYMBOL_GPL(nr_threads);
589EXPORT_SYMBOL_GPL(nr_running); 562EXPORT_SYMBOL_GPL(nr_running);
590EXPORT_SYMBOL_GPL(nr_iowait); 563EXPORT_SYMBOL_GPL(nr_iowait);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index 51181ccdb87b..3ed56b7d1b2f 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -14,14 +14,13 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/kernel_stat.h> 16#include <linux/kernel_stat.h>
17#include <asm/io.h>
18#include <linux/pagemap.h> 17#include <linux/pagemap.h>
19#include <linux/swap.h> 18#include <linux/swap.h>
19#include <asm/io.h>
20 20
21#include "appldata.h" 21#include "appldata.h"
22 22
23 23
24#define MY_PRINT_NAME "appldata_mem" /* for debug messages, etc. */
25#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */ 24#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
26 25
27/* 26/*
@@ -70,30 +69,6 @@ static struct appldata_mem_data {
70} __attribute__((packed)) appldata_mem_data; 69} __attribute__((packed)) appldata_mem_data;
71 70
72 71
73static inline void appldata_debug_print(struct appldata_mem_data *mem_data)
74{
75 P_DEBUG("--- MEM - RECORD ---\n");
76 P_DEBUG("pgpgin = %8lu KB\n", mem_data->pgpgin);
77 P_DEBUG("pgpgout = %8lu KB\n", mem_data->pgpgout);
78 P_DEBUG("pswpin = %8lu Pages\n", mem_data->pswpin);
79 P_DEBUG("pswpout = %8lu Pages\n", mem_data->pswpout);
80 P_DEBUG("pgalloc = %8lu \n", mem_data->pgalloc);
81 P_DEBUG("pgfault = %8lu \n", mem_data->pgfault);
82 P_DEBUG("pgmajfault = %8lu \n", mem_data->pgmajfault);
83 P_DEBUG("sharedram = %8lu KB\n", mem_data->sharedram);
84 P_DEBUG("totalram = %8lu KB\n", mem_data->totalram);
85 P_DEBUG("freeram = %8lu KB\n", mem_data->freeram);
86 P_DEBUG("totalhigh = %8lu KB\n", mem_data->totalhigh);
87 P_DEBUG("freehigh = %8lu KB\n", mem_data->freehigh);
88 P_DEBUG("bufferram = %8lu KB\n", mem_data->bufferram);
89 P_DEBUG("cached = %8lu KB\n", mem_data->cached);
90 P_DEBUG("totalswap = %8lu KB\n", mem_data->totalswap);
91 P_DEBUG("freeswap = %8lu KB\n", mem_data->freeswap);
92 P_DEBUG("sync_count_1 = %u\n", mem_data->sync_count_1);
93 P_DEBUG("sync_count_2 = %u\n", mem_data->sync_count_2);
94 P_DEBUG("timestamp = %lX\n", mem_data->timestamp);
95}
96
97/* 72/*
98 * appldata_get_mem_data() 73 * appldata_get_mem_data()
99 * 74 *
@@ -140,9 +115,6 @@ static void appldata_get_mem_data(void *data)
140 115
141 mem_data->timestamp = get_clock(); 116 mem_data->timestamp = get_clock();
142 mem_data->sync_count_2++; 117 mem_data->sync_count_2++;
143#ifdef APPLDATA_DEBUG
144 appldata_debug_print(mem_data);
145#endif
146} 118}
147 119
148 120
@@ -164,17 +136,7 @@ static struct appldata_ops ops = {
164 */ 136 */
165static int __init appldata_mem_init(void) 137static int __init appldata_mem_init(void)
166{ 138{
167 int rc; 139 return appldata_register_ops(&ops);
168
169 P_DEBUG("sizeof(mem) = %lu\n", sizeof(struct appldata_mem_data));
170
171 rc = appldata_register_ops(&ops);
172 if (rc != 0) {
173 P_ERROR("Error registering ops, rc = %i\n", rc);
174 } else {
175 P_DEBUG("%s-ops registered!\n", ops.name);
176 }
177 return rc;
178} 140}
179 141
180/* 142/*
@@ -185,7 +147,6 @@ static int __init appldata_mem_init(void)
185static void __exit appldata_mem_exit(void) 147static void __exit appldata_mem_exit(void)
186{ 148{
187 appldata_unregister_ops(&ops); 149 appldata_unregister_ops(&ops);
188 P_DEBUG("%s-ops unregistered!\n", ops.name);
189} 150}
190 151
191 152
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 4d8344336001..3b746556e1a3 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -21,9 +21,6 @@
21#include "appldata.h" 21#include "appldata.h"
22 22
23 23
24#define MY_PRINT_NAME "appldata_net_sum" /* for debug messages, etc. */
25
26
27/* 24/*
28 * Network data 25 * Network data
29 * 26 *
@@ -60,26 +57,6 @@ static struct appldata_net_sum_data {
60} __attribute__((packed)) appldata_net_sum_data; 57} __attribute__((packed)) appldata_net_sum_data;
61 58
62 59
63static inline void appldata_print_debug(struct appldata_net_sum_data *net_data)
64{
65 P_DEBUG("--- NET - RECORD ---\n");
66
67 P_DEBUG("nr_interfaces = %u\n", net_data->nr_interfaces);
68 P_DEBUG("rx_packets = %8lu\n", net_data->rx_packets);
69 P_DEBUG("tx_packets = %8lu\n", net_data->tx_packets);
70 P_DEBUG("rx_bytes = %8lu\n", net_data->rx_bytes);
71 P_DEBUG("tx_bytes = %8lu\n", net_data->tx_bytes);
72 P_DEBUG("rx_errors = %8lu\n", net_data->rx_errors);
73 P_DEBUG("tx_errors = %8lu\n", net_data->tx_errors);
74 P_DEBUG("rx_dropped = %8lu\n", net_data->rx_dropped);
75 P_DEBUG("tx_dropped = %8lu\n", net_data->tx_dropped);
76 P_DEBUG("collisions = %8lu\n", net_data->collisions);
77
78 P_DEBUG("sync_count_1 = %u\n", net_data->sync_count_1);
79 P_DEBUG("sync_count_2 = %u\n", net_data->sync_count_2);
80 P_DEBUG("timestamp = %lX\n", net_data->timestamp);
81}
82
83/* 60/*
84 * appldata_get_net_sum_data() 61 * appldata_get_net_sum_data()
85 * 62 *
@@ -135,9 +112,6 @@ static void appldata_get_net_sum_data(void *data)
135 112
136 net_data->timestamp = get_clock(); 113 net_data->timestamp = get_clock();
137 net_data->sync_count_2++; 114 net_data->sync_count_2++;
138#ifdef APPLDATA_DEBUG
139 appldata_print_debug(net_data);
140#endif
141} 115}
142 116
143 117
@@ -159,17 +133,7 @@ static struct appldata_ops ops = {
159 */ 133 */
160static int __init appldata_net_init(void) 134static int __init appldata_net_init(void)
161{ 135{
162 int rc; 136 return appldata_register_ops(&ops);
163
164 P_DEBUG("sizeof(net) = %lu\n", sizeof(struct appldata_net_sum_data));
165
166 rc = appldata_register_ops(&ops);
167 if (rc != 0) {
168 P_ERROR("Error registering ops, rc = %i\n", rc);
169 } else {
170 P_DEBUG("%s-ops registered!\n", ops.name);
171 }
172 return rc;
173} 137}
174 138
175/* 139/*
@@ -180,7 +144,6 @@ static int __init appldata_net_init(void)
180static void __exit appldata_net_exit(void) 144static void __exit appldata_net_exit(void)
181{ 145{
182 appldata_unregister_ops(&ops); 146 appldata_unregister_ops(&ops);
183 P_DEBUG("%s-ops unregistered!\n", ops.name);
184} 147}
185 148
186 149
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 6b3eafe10453..eb44f9f8ab91 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -89,44 +89,6 @@ static struct appldata_ops ops = {
89}; 89};
90 90
91 91
92static inline void appldata_print_debug(struct appldata_os_data *os_data)
93{
94 int a0, a1, a2, i;
95
96 P_DEBUG("--- OS - RECORD ---\n");
97 P_DEBUG("nr_threads = %u\n", os_data->nr_threads);
98 P_DEBUG("nr_running = %u\n", os_data->nr_running);
99 P_DEBUG("nr_iowait = %u\n", os_data->nr_iowait);
100 P_DEBUG("avenrun(int) = %8x / %8x / %8x\n", os_data->avenrun[0],
101 os_data->avenrun[1], os_data->avenrun[2]);
102 a0 = os_data->avenrun[0];
103 a1 = os_data->avenrun[1];
104 a2 = os_data->avenrun[2];
105 P_DEBUG("avenrun(float) = %d.%02d / %d.%02d / %d.%02d\n",
106 LOAD_INT(a0), LOAD_FRAC(a0), LOAD_INT(a1), LOAD_FRAC(a1),
107 LOAD_INT(a2), LOAD_FRAC(a2));
108
109 P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
110 for (i = 0; i < os_data->nr_cpus; i++) {
111 P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
112 "idle = %u, irq = %u, softirq = %u, iowait = %u, "
113 "steal = %u\n",
114 os_data->os_cpu[i].cpu_id,
115 os_data->os_cpu[i].per_cpu_user,
116 os_data->os_cpu[i].per_cpu_nice,
117 os_data->os_cpu[i].per_cpu_system,
118 os_data->os_cpu[i].per_cpu_idle,
119 os_data->os_cpu[i].per_cpu_irq,
120 os_data->os_cpu[i].per_cpu_softirq,
121 os_data->os_cpu[i].per_cpu_iowait,
122 os_data->os_cpu[i].per_cpu_steal);
123 }
124
125 P_DEBUG("sync_count_1 = %u\n", os_data->sync_count_1);
126 P_DEBUG("sync_count_2 = %u\n", os_data->sync_count_2);
127 P_DEBUG("timestamp = %lX\n", os_data->timestamp);
128}
129
130/* 92/*
131 * appldata_get_os_data() 93 * appldata_get_os_data()
132 * 94 *
@@ -180,13 +142,10 @@ static void appldata_get_os_data(void *data)
180 APPLDATA_START_INTERVAL_REC, 142 APPLDATA_START_INTERVAL_REC,
181 (unsigned long) ops.data, new_size, 143 (unsigned long) ops.data, new_size,
182 ops.mod_lvl); 144 ops.mod_lvl);
183 if (rc != 0) { 145 if (rc != 0)
184 P_ERROR("os: START NEW DIAG 0xDC failed, " 146 P_ERROR("os: START NEW DIAG 0xDC failed, "
185 "return code: %d, new size = %i\n", rc, 147 "return code: %d, new size = %i\n", rc,
186 new_size); 148 new_size);
187 P_INFO("os: stopping old record now\n");
188 } else
189 P_INFO("os: new record size = %i\n", new_size);
190 149
191 rc = appldata_diag(APPLDATA_RECORD_OS_ID, 150 rc = appldata_diag(APPLDATA_RECORD_OS_ID,
192 APPLDATA_STOP_REC, 151 APPLDATA_STOP_REC,
@@ -204,9 +163,6 @@ static void appldata_get_os_data(void *data)
204 } 163 }
205 os_data->timestamp = get_clock(); 164 os_data->timestamp = get_clock();
206 os_data->sync_count_2++; 165 os_data->sync_count_2++;
207#ifdef APPLDATA_DEBUG
208 appldata_print_debug(os_data);
209#endif
210} 166}
211 167
212 168
@@ -227,12 +183,9 @@ static int __init appldata_os_init(void)
227 rc = -ENOMEM; 183 rc = -ENOMEM;
228 goto out; 184 goto out;
229 } 185 }
230 P_DEBUG("max. sizeof(os) = %i, sizeof(os_cpu) = %lu\n", max_size,
231 sizeof(struct appldata_os_per_cpu));
232 186
233 appldata_os_data = kzalloc(max_size, GFP_DMA); 187 appldata_os_data = kzalloc(max_size, GFP_DMA);
234 if (appldata_os_data == NULL) { 188 if (appldata_os_data == NULL) {
235 P_ERROR("No memory for %s!\n", ops.name);
236 rc = -ENOMEM; 189 rc = -ENOMEM;
237 goto out; 190 goto out;
238 } 191 }
@@ -240,17 +193,12 @@ static int __init appldata_os_init(void)
240 appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu); 193 appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
241 appldata_os_data->cpu_offset = offsetof(struct appldata_os_data, 194 appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
242 os_cpu); 195 os_cpu);
243 P_DEBUG("cpu offset = %u\n", appldata_os_data->cpu_offset);
244 196
245 ops.data = appldata_os_data; 197 ops.data = appldata_os_data;
246 ops.callback = &appldata_get_os_data; 198 ops.callback = &appldata_get_os_data;
247 rc = appldata_register_ops(&ops); 199 rc = appldata_register_ops(&ops);
248 if (rc != 0) { 200 if (rc != 0)
249 P_ERROR("Error registering ops, rc = %i\n", rc);
250 kfree(appldata_os_data); 201 kfree(appldata_os_data);
251 } else {
252 P_DEBUG("%s-ops registered!\n", ops.name);
253 }
254out: 202out:
255 return rc; 203 return rc;
256} 204}
@@ -264,7 +212,6 @@ static void __exit appldata_os_exit(void)
264{ 212{
265 appldata_unregister_ops(&ops); 213 appldata_unregister_ops(&ops);
266 kfree(appldata_os_data); 214 kfree(appldata_os_data);
267 P_DEBUG("%s-ops unregistered!\n", ops.name);
268} 215}
269 216
270 217
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 9992f95ef992..0ef9829f2ad6 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -296,6 +296,10 @@ static inline int crypt_s390_func_available(int func)
296 unsigned char status[16]; 296 unsigned char status[16];
297 int ret; 297 int ret;
298 298
299 /* check if CPACF facility (bit 17) is available */
300 if (!(stfl() & 1ULL << (31 - 17)))
301 return 0;
302
299 switch (func & CRYPT_S390_OP_MASK) { 303 switch (func & CRYPT_S390_OP_MASK) {
300 case CRYPT_S390_KM: 304 case CRYPT_S390_KM:
301 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); 305 ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 0cfefddd8375..eca724d229ec 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -6,6 +6,7 @@
6#include <linux/fs.h> 6#include <linux/fs.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/smp_lock.h>
9#include <linux/miscdevice.h> 10#include <linux/miscdevice.h>
10#include <linux/module.h> 11#include <linux/module.h>
11#include <linux/moduleparam.h> 12#include <linux/moduleparam.h>
@@ -48,6 +49,7 @@ static unsigned char parm_block[32] = {
48 49
49static int prng_open(struct inode *inode, struct file *file) 50static int prng_open(struct inode *inode, struct file *file)
50{ 51{
52 cycle_kernel_lock();
51 return nonseekable_open(inode, file); 53 return nonseekable_open(inode, file);
52} 54}
53 55
@@ -185,11 +187,8 @@ static int __init prng_init(void)
185 prng_seed(16); 187 prng_seed(16);
186 188
187 ret = misc_register(&prng_dev); 189 ret = misc_register(&prng_dev);
188 if (ret) { 190 if (ret)
189 printk(KERN_WARNING
190 "Could not register misc device for PRNG.\n");
191 goto out_buf; 191 goto out_buf;
192 }
193 return 0; 192 return 0;
194 193
195out_buf: 194out_buf:
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 4b010ff814c9..7383781f3e6a 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -150,33 +150,24 @@ static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
150 unsigned long nr_segs, loff_t offset) 150 unsigned long nr_segs, loff_t offset)
151{ 151{
152 char *data; 152 char *data;
153 size_t len; 153 ssize_t ret;
154 struct file *filp = iocb->ki_filp; 154 struct file *filp = iocb->ki_filp;
155 /* XXX: temporary */ 155 /* XXX: temporary */
156 char __user *buf = iov[0].iov_base; 156 char __user *buf = iov[0].iov_base;
157 size_t count = iov[0].iov_len; 157 size_t count = iov[0].iov_len;
158 158
159 if (nr_segs != 1) { 159 if (nr_segs != 1)
160 count = -EINVAL; 160 return -EINVAL;
161 goto out;
162 }
163 161
164 data = filp->private_data; 162 data = filp->private_data;
165 len = strlen(data); 163 ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
166 if (offset > len) { 164 if (ret <= 0)
167 count = 0; 165 return ret;
168 goto out; 166
169 } 167 iocb->ki_pos += ret;
170 if (count > len - offset)
171 count = len - offset;
172 if (copy_to_user(buf, data + offset, count)) {
173 count = -EFAULT;
174 goto out;
175 }
176 iocb->ki_pos += count;
177 file_accessed(filp); 168 file_accessed(filp);
178out: 169
179 return count; 170 return ret;
180} 171}
181static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, 172static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
182 unsigned long nr_segs, loff_t offset) 173 unsigned long nr_segs, loff_t offset)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 6302f5082588..50f657e77344 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -7,9 +7,14 @@
7# 7#
8CFLAGS_smp.o := -Wno-nonnull 8CFLAGS_smp.o := -Wno-nonnull
9 9
10#
11# Pass UTS_MACHINE for user_regset definition
12#
13CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
14
10obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 15obj-y := bitmap.o traps.o time.o process.o base.o early.o \
11 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 16 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
12 s390_ext.o debug.o irq.o ipl.o dis.o diag.o 17 s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o
13 18
14obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 19obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
15obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 20obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -23,7 +28,7 @@ obj-$(CONFIG_AUDIT) += audit.o
23compat-obj-$(CONFIG_AUDIT) += compat_audit.o 28compat-obj-$(CONFIG_AUDIT) += compat_audit.o
24obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ 29obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
25 compat_wrapper.o compat_exec_domain.o \ 30 compat_wrapper.o compat_exec_domain.o \
26 binfmt_elf32.o $(compat-obj-y) 31 $(compat-obj-y)
27 32
28obj-$(CONFIG_VIRT_TIMER) += vtime.o 33obj-$(CONFIG_VIRT_TIMER) += vtime.o
29obj-$(CONFIG_STACKTRACE) += stacktrace.o 34obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
deleted file mode 100644
index 3e1c315b736d..000000000000
--- a/arch/s390/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,214 +0,0 @@
1/*
2 * Support for 32-bit Linux for S390 ELF binaries.
3 *
4 * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Gerhard Tonn (ton@de.ibm.com)
6 *
7 * Heavily inspired by the 32-bit Sparc compat code which is
8 * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
9 * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
10 */
11
12#define __ASMS390_ELF_H
13
14#include <linux/time.h>
15
16/*
17 * These are used to set parameters in the core dumps.
18 */
19#define ELF_CLASS ELFCLASS32
20#define ELF_DATA ELFDATA2MSB
21#define ELF_ARCH EM_S390
22
23/*
24 * This is used to ensure we don't load something for the wrong architecture.
25 */
26#define elf_check_arch(x) \
27 (((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
28 && (x)->e_ident[EI_CLASS] == ELF_CLASS)
29
30/* ELF register definitions */
31#define NUM_GPRS 16
32#define NUM_FPRS 16
33#define NUM_ACRS 16
34
35/* For SVR4/S390 the function pointer to be registered with `atexit` is
36 passed in R14. */
37#define ELF_PLAT_INIT(_r, load_addr) \
38 do { \
39 _r->gprs[14] = 0; \
40 } while(0)
41
42#define USE_ELF_CORE_DUMP
43#define ELF_EXEC_PAGESIZE 4096
44
45/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
46 use of this is to invoke "./ld.so someprog" to test out a new version of
47 the loader. We need to make sure that it is out of the way of the program
48 that it will "exec", and that there is sufficient room for the brk. */
49
50#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
51
52/* Wow, the "main" arch needs arch dependent functions too.. :) */
53
54/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
55 now struct_user_regs, they are different) */
56
57#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
58
59#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
60
61#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
62
63/* This yields a mask that user programs can use to figure out what
64 instruction set this CPU supports. */
65
66#define ELF_HWCAP (0)
67
68/* This yields a string that ld.so will use to load implementation
69 specific libraries for optimization. This is more specific in
70 intent than poking at uname or /proc/cpuinfo.
71
72 For the moment, we have only optimizations for the Intel generations,
73 but that could change... */
74
75#define ELF_PLATFORM (NULL)
76
77#define SET_PERSONALITY(ex, ibcs2) \
78do { \
79 if (ibcs2) \
80 set_personality(PER_SVR4); \
81 else if (current->personality != PER_LINUX32) \
82 set_personality(PER_LINUX); \
83 set_thread_flag(TIF_31BIT); \
84} while (0)
85
86#include "compat_linux.h"
87
88typedef _s390_fp_regs32 elf_fpregset_t;
89
90typedef struct
91{
92
93 _psw_t32 psw;
94 __u32 gprs[__NUM_GPRS];
95 __u32 acrs[__NUM_ACRS];
96 __u32 orig_gpr2;
97} s390_regs32;
98typedef s390_regs32 elf_gregset_t;
99
100static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
101{
102 int i;
103
104 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
105 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
106 for (i = 0; i < NUM_GPRS; i++)
107 regs->gprs[i] = ptregs->gprs[i];
108 save_access_regs(regs->acrs);
109 regs->orig_gpr2 = ptregs->orig_gpr2;
110 return 1;
111}
112
113static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
114{
115 struct pt_regs *ptregs = task_pt_regs(tsk);
116 int i;
117
118 memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
119 memcpy(&regs->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
120 for (i = 0; i < NUM_GPRS; i++)
121 regs->gprs[i] = ptregs->gprs[i];
122 memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
123 regs->orig_gpr2 = ptregs->orig_gpr2;
124 return 1;
125}
126
127static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
128{
129 if (tsk == current)
130 save_fp_regs((s390_fp_regs *) fpregs);
131 else
132 memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
133 return 1;
134}
135
136#include <asm/processor.h>
137#include <asm/pgalloc.h>
138#include <linux/module.h>
139#include <linux/elfcore.h>
140#include <linux/binfmts.h>
141#include <linux/compat.h>
142
143#define elf_prstatus elf_prstatus32
144struct elf_prstatus32
145{
146 struct elf_siginfo pr_info; /* Info associated with signal */
147 short pr_cursig; /* Current signal */
148 u32 pr_sigpend; /* Set of pending signals */
149 u32 pr_sighold; /* Set of held signals */
150 pid_t pr_pid;
151 pid_t pr_ppid;
152 pid_t pr_pgrp;
153 pid_t pr_sid;
154 struct compat_timeval pr_utime; /* User time */
155 struct compat_timeval pr_stime; /* System time */
156 struct compat_timeval pr_cutime; /* Cumulative user time */
157 struct compat_timeval pr_cstime; /* Cumulative system time */
158 elf_gregset_t pr_reg; /* GP registers */
159 int pr_fpvalid; /* True if math co-processor being used. */
160};
161
162#define elf_prpsinfo elf_prpsinfo32
163struct elf_prpsinfo32
164{
165 char pr_state; /* numeric process state */
166 char pr_sname; /* char for pr_state */
167 char pr_zomb; /* zombie */
168 char pr_nice; /* nice val */
169 u32 pr_flag; /* flags */
170 u16 pr_uid;
171 u16 pr_gid;
172 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
173 /* Lots missing */
174 char pr_fname[16]; /* filename of executable */
175 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
176};
177
178#include <linux/highuid.h>
179
180/*
181#define init_elf_binfmt init_elf32_binfmt
182*/
183
184#undef start_thread
185#define start_thread start_thread31
186
187static inline void start_thread31(struct pt_regs *regs, unsigned long new_psw,
188 unsigned long new_stackp)
189{
190 set_fs(USER_DS);
191 regs->psw.mask = psw_user32_bits;
192 regs->psw.addr = new_psw;
193 regs->gprs[15] = new_stackp;
194 crst_table_downgrade(current->mm, 1UL << 31);
195}
196
197MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
198 " Copyright 2000 IBM Corporation");
199MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
200
201#undef MODULE_DESCRIPTION
202#undef MODULE_AUTHOR
203
204#undef cputime_to_timeval
205#define cputime_to_timeval cputime_to_compat_timeval
206static inline void
207cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
208{
209 value->tv_usec = cputime % 1000000;
210 value->tv_sec = cputime / 1000000;
211}
212
213#include "../../../fs/binfmt_elf.c"
214
diff --git a/arch/s390/kernel/compat_ptrace.h b/arch/s390/kernel/compat_ptrace.h
index 419aef913ee1..cde81fa64f89 100644
--- a/arch/s390/kernel/compat_ptrace.h
+++ b/arch/s390/kernel/compat_ptrace.h
@@ -1,7 +1,7 @@
1#ifndef _PTRACE32_H 1#ifndef _PTRACE32_H
2#define _PTRACE32_H 2#define _PTRACE32_H
3 3
4#include "compat_linux.h" /* needed for _psw_t32 */ 4#include "compat_linux.h" /* needed for psw_compat_t */
5 5
6typedef struct { 6typedef struct {
7 __u32 cr[3]; 7 __u32 cr[3];
@@ -38,7 +38,7 @@ typedef struct {
38 38
39struct user_regs_struct32 39struct user_regs_struct32
40{ 40{
41 _psw_t32 psw; 41 psw_compat_t psw;
42 u32 gprs[NUM_GPRS]; 42 u32 gprs[NUM_GPRS];
43 u32 acrs[NUM_ACRS]; 43 u32 acrs[NUM_ACRS];
44 u32 orig_gpr2; 44 u32 orig_gpr2;
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index c93d1296cc0a..d80fcd4a7fe1 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1079,7 +1079,6 @@ __init debug_init(void)
1079 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table); 1079 s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
1080 mutex_lock(&debug_mutex); 1080 mutex_lock(&debug_mutex);
1081 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL); 1081 debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
1082 printk(KERN_INFO "debug: Initialization complete\n");
1083 initialized = 1; 1082 initialized = 1;
1084 mutex_unlock(&debug_mutex); 1083 mutex_unlock(&debug_mutex);
1085 1084
@@ -1193,7 +1192,6 @@ debug_get_uint(char *buf)
1193 for(; isspace(*buf); buf++); 1192 for(; isspace(*buf); buf++);
1194 rc = simple_strtoul(buf, &buf, 10); 1193 rc = simple_strtoul(buf, &buf, 10);
1195 if(*buf){ 1194 if(*buf){
1196 printk("debug: no integer specified!\n");
1197 rc = -EINVAL; 1195 rc = -EINVAL;
1198 } 1196 }
1199 return rc; 1197 return rc;
@@ -1340,19 +1338,12 @@ static void debug_flush(debug_info_t* id, int area)
1340 memset(id->areas[i][j], 0, PAGE_SIZE); 1338 memset(id->areas[i][j], 0, PAGE_SIZE);
1341 } 1339 }
1342 } 1340 }
1343 printk(KERN_INFO "debug: %s: all areas flushed\n",id->name);
1344 } else if(area >= 0 && area < id->nr_areas) { 1341 } else if(area >= 0 && area < id->nr_areas) {
1345 id->active_entries[area] = 0; 1342 id->active_entries[area] = 0;
1346 id->active_pages[area] = 0; 1343 id->active_pages[area] = 0;
1347 for(i = 0; i < id->pages_per_area; i++) { 1344 for(i = 0; i < id->pages_per_area; i++) {
1348 memset(id->areas[area][i],0,PAGE_SIZE); 1345 memset(id->areas[area][i],0,PAGE_SIZE);
1349 } 1346 }
1350 printk(KERN_INFO "debug: %s: area %i has been flushed\n",
1351 id->name, area);
1352 } else {
1353 printk(KERN_INFO
1354 "debug: %s: area %i cannot be flushed (range: %i - %i)\n",
1355 id->name, area, 0, id->nr_areas-1);
1356 } 1347 }
1357 spin_unlock_irqrestore(&id->lock,flags); 1348 spin_unlock_irqrestore(&id->lock,flags);
1358} 1349}
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d0e09684b9ce..2a2ca268b1dd 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/pfn.h> 15#include <linux/pfn.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <asm/ebcdic.h>
17#include <asm/ipl.h> 18#include <asm/ipl.h>
18#include <asm/lowcore.h> 19#include <asm/lowcore.h>
19#include <asm/processor.h> 20#include <asm/processor.h>
@@ -26,12 +27,40 @@
26/* 27/*
27 * Create a Kernel NSS if the SAVESYS= parameter is defined 28 * Create a Kernel NSS if the SAVESYS= parameter is defined
28 */ 29 */
29#define DEFSYS_CMD_SIZE 96 30#define DEFSYS_CMD_SIZE 128
30#define SAVESYS_CMD_SIZE 32 31#define SAVESYS_CMD_SIZE 32
31 32
32char kernel_nss_name[NSS_NAME_SIZE + 1]; 33char kernel_nss_name[NSS_NAME_SIZE + 1];
33 34
35static void __init setup_boot_command_line(void);
36
37
34#ifdef CONFIG_SHARED_KERNEL 38#ifdef CONFIG_SHARED_KERNEL
39int __init savesys_ipl_nss(char *cmd, const int cmdlen);
40
41asm(
42 " .section .init.text,\"ax\",@progbits\n"
43 " .align 4\n"
44 " .type savesys_ipl_nss, @function\n"
45 "savesys_ipl_nss:\n"
46#ifdef CONFIG_64BIT
47 " stmg 6,15,48(15)\n"
48 " lgr 14,3\n"
49 " sam31\n"
50 " diag 2,14,0x8\n"
51 " sam64\n"
52 " lgr 2,14\n"
53 " lmg 6,15,48(15)\n"
54#else
55 " stm 6,15,24(15)\n"
56 " lr 14,3\n"
57 " diag 2,14,0x8\n"
58 " lr 2,14\n"
59 " lm 6,15,24(15)\n"
60#endif
61 " br 14\n"
62 " .size savesys_ipl_nss, .-savesys_ipl_nss\n");
63
35static noinline __init void create_kernel_nss(void) 64static noinline __init void create_kernel_nss(void)
36{ 65{
37 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; 66 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
@@ -39,6 +68,7 @@ static noinline __init void create_kernel_nss(void)
39 unsigned int sinitrd_pfn, einitrd_pfn; 68 unsigned int sinitrd_pfn, einitrd_pfn;
40#endif 69#endif
41 int response; 70 int response;
71 size_t len;
42 char *savesys_ptr; 72 char *savesys_ptr;
43 char upper_command_line[COMMAND_LINE_SIZE]; 73 char upper_command_line[COMMAND_LINE_SIZE];
44 char defsys_cmd[DEFSYS_CMD_SIZE]; 74 char defsys_cmd[DEFSYS_CMD_SIZE];
@@ -49,8 +79,8 @@ static noinline __init void create_kernel_nss(void)
49 return; 79 return;
50 80
51 /* Convert COMMAND_LINE to upper case */ 81 /* Convert COMMAND_LINE to upper case */
52 for (i = 0; i < strlen(COMMAND_LINE); i++) 82 for (i = 0; i < strlen(boot_command_line); i++)
53 upper_command_line[i] = toupper(COMMAND_LINE[i]); 83 upper_command_line[i] = toupper(boot_command_line[i]);
54 84
55 savesys_ptr = strstr(upper_command_line, "SAVESYS="); 85 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
56 86
@@ -83,7 +113,8 @@ static noinline __init void create_kernel_nss(void)
83 } 113 }
84#endif 114#endif
85 115
86 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size); 116 sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK PARMREGS=0-13",
117 defsys_cmd, min_size);
87 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s", 118 sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
88 kernel_nss_name, kernel_nss_name); 119 kernel_nss_name, kernel_nss_name);
89 120
@@ -94,13 +125,24 @@ static noinline __init void create_kernel_nss(void)
94 return; 125 return;
95 } 126 }
96 127
97 __cpcmd(savesys_cmd, NULL, 0, &response); 128 len = strlen(savesys_cmd);
129 ASCEBC(savesys_cmd, len);
130 response = savesys_ipl_nss(savesys_cmd, len);
98 131
99 if (response != strlen(savesys_cmd)) { 132 /* On success: response is equal to the command size,
133 * max SAVESYS_CMD_SIZE
134 * On error: response contains the numeric portion of cp error message.
135 * for SAVESYS it will be >= 263
136 */
137 if (response > SAVESYS_CMD_SIZE) {
100 kernel_nss_name[0] = '\0'; 138 kernel_nss_name[0] = '\0';
101 return; 139 return;
102 } 140 }
103 141
142 /* re-setup boot command line with new ipl vm parms */
143 ipl_update_parameters();
144 setup_boot_command_line();
145
104 ipl_flags = IPL_NSS_VALID; 146 ipl_flags = IPL_NSS_VALID;
105} 147}
106 148
@@ -141,109 +183,11 @@ static noinline __init void detect_machine_type(void)
141 if (cpuinfo->cpu_id.version == 0xff) 183 if (cpuinfo->cpu_id.version == 0xff)
142 machine_flags |= MACHINE_FLAG_VM; 184 machine_flags |= MACHINE_FLAG_VM;
143 185
144 /* Running on a P/390 ? */
145 if (cpuinfo->cpu_id.machine == 0x7490)
146 machine_flags |= MACHINE_FLAG_P390;
147
148 /* Running under KVM ? */ 186 /* Running under KVM ? */
149 if (cpuinfo->cpu_id.version == 0xfe) 187 if (cpuinfo->cpu_id.version == 0xfe)
150 machine_flags |= MACHINE_FLAG_KVM; 188 machine_flags |= MACHINE_FLAG_KVM;
151} 189}
152 190
153#ifdef CONFIG_64BIT
154static noinline __init int memory_fast_detect(void)
155{
156 unsigned long val0 = 0;
157 unsigned long val1 = 0xc;
158 int ret = -ENOSYS;
159
160 if (ipl_flags & IPL_NSS_VALID)
161 return -ENOSYS;
162
163 asm volatile(
164 " diag %1,%2,0x260\n"
165 "0: lhi %0,0\n"
166 "1:\n"
167 EX_TABLE(0b,1b)
168 : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
169
170 if (ret || val0 != val1)
171 return -ENOSYS;
172
173 memory_chunk[0].size = val0 + 1;
174 return 0;
175}
176#else
177static inline int memory_fast_detect(void)
178{
179 return -ENOSYS;
180}
181#endif
182
183static inline __init unsigned long __tprot(unsigned long addr)
184{
185 int cc = -1;
186
187 asm volatile(
188 " tprot 0(%1),0\n"
189 "0: ipm %0\n"
190 " srl %0,28\n"
191 "1:\n"
192 EX_TABLE(0b,1b)
193 : "+d" (cc) : "a" (addr) : "cc");
194 return (unsigned long)cc;
195}
196
197/* Checking memory in 128KB increments. */
198#define CHUNK_INCR (1UL << 17)
199#define ADDR2G (1UL << 31)
200
201static noinline __init void find_memory_chunks(unsigned long memsize)
202{
203 unsigned long addr = 0, old_addr = 0;
204 unsigned long old_cc = CHUNK_READ_WRITE;
205 unsigned long cc;
206 int chunk = 0;
207
208 while (chunk < MEMORY_CHUNKS) {
209 cc = __tprot(addr);
210 while (cc == old_cc) {
211 addr += CHUNK_INCR;
212 if (memsize && addr >= memsize)
213 break;
214#ifndef CONFIG_64BIT
215 if (addr == ADDR2G)
216 break;
217#endif
218 cc = __tprot(addr);
219 }
220
221 if (old_addr != addr &&
222 (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
223 memory_chunk[chunk].addr = old_addr;
224 memory_chunk[chunk].size = addr - old_addr;
225 memory_chunk[chunk].type = old_cc;
226 chunk++;
227 }
228
229 old_addr = addr;
230 old_cc = cc;
231
232#ifndef CONFIG_64BIT
233 if (addr == ADDR2G)
234 break;
235#endif
236 /*
237 * Finish memory detection at the first hole
238 * if storage size is unknown.
239 */
240 if (cc == -1UL && !memsize)
241 break;
242 if (memsize && addr >= memsize)
243 break;
244 }
245}
246
247static __init void early_pgm_check_handler(void) 191static __init void early_pgm_check_handler(void)
248{ 192{
249 unsigned long addr; 193 unsigned long addr;
@@ -380,23 +324,61 @@ static __init void detect_machine_facilities(void)
380#endif 324#endif
381} 325}
382 326
327static __init void rescue_initrd(void)
328{
329#ifdef CONFIG_BLK_DEV_INITRD
330 /*
331 * Move the initrd right behind the bss section in case it starts
332 * within the bss section. So we don't overwrite it when the bss
333 * section gets cleared.
334 */
335 if (!INITRD_START || !INITRD_SIZE)
336 return;
337 if (INITRD_START >= (unsigned long) __bss_stop)
338 return;
339 memmove(__bss_stop, (void *) INITRD_START, INITRD_SIZE);
340 INITRD_START = (unsigned long) __bss_stop;
341#endif
342}
343
344/* Set up boot command line */
345static void __init setup_boot_command_line(void)
346{
347 char *parm = NULL;
348
349 /* copy arch command line */
350 strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
351 boot_command_line[ARCH_COMMAND_LINE_SIZE - 1] = 0;
352
353 /* append IPL PARM data to the boot command line */
354 if (MACHINE_IS_VM) {
355 parm = boot_command_line + strlen(boot_command_line);
356 *parm++ = ' ';
357 get_ipl_vmparm(parm);
358 if (parm[0] == '=')
359 memmove(boot_command_line, parm + 1, strlen(parm));
360 }
361}
362
363
383/* 364/*
384 * Save ipl parameters, clear bss memory, initialize storage keys 365 * Save ipl parameters, clear bss memory, initialize storage keys
385 * and create a kernel NSS at startup if the SAVESYS= parm is defined 366 * and create a kernel NSS at startup if the SAVESYS= parm is defined
386 */ 367 */
387void __init startup_init(void) 368void __init startup_init(void)
388{ 369{
389 unsigned long long memsize;
390
391 ipl_save_parameters(); 370 ipl_save_parameters();
371 rescue_initrd();
392 clear_bss_section(); 372 clear_bss_section();
393 init_kernel_storage_key(); 373 init_kernel_storage_key();
394 lockdep_init(); 374 lockdep_init();
395 lockdep_off(); 375 lockdep_off();
396 detect_machine_type();
397 create_kernel_nss();
398 sort_main_extable(); 376 sort_main_extable();
399 setup_lowcore_early(); 377 setup_lowcore_early();
378 detect_machine_type();
379 ipl_update_parameters();
380 setup_boot_command_line();
381 create_kernel_nss();
400 detect_mvpg(); 382 detect_mvpg();
401 detect_ieee(); 383 detect_ieee();
402 detect_csp(); 384 detect_csp();
@@ -404,18 +386,7 @@ void __init startup_init(void)
404 detect_diag44(); 386 detect_diag44();
405 detect_machine_facilities(); 387 detect_machine_facilities();
406 setup_hpage(); 388 setup_hpage();
407 sclp_read_info_early();
408 sclp_facilities_detect(); 389 sclp_facilities_detect();
409 memsize = sclp_memory_detect(); 390 detect_memory_layout(memory_chunk);
410#ifndef CONFIG_64BIT
411 /*
412 * Can't deal with more than 2G in 31 bit addressing mode, so
413 * limit the value in order to avoid strange side effects.
414 */
415 if (memsize > ADDR2G)
416 memsize = ADDR2G;
417#endif
418 if (memory_fast_detect() < 0)
419 find_memory_chunks((unsigned long) memsize);
420 lockdep_on(); 391 lockdep_on();
421} 392}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 532542447d66..54b2779b5e2f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/reboot.h> 15#include <linux/reboot.h>
16#include <linux/ctype.h> 16#include <linux/ctype.h>
17#include <linux/fs.h>
17#include <asm/ipl.h> 18#include <asm/ipl.h>
18#include <asm/smp.h> 19#include <asm/smp.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
@@ -22,6 +23,7 @@
22#include <asm/ebcdic.h> 23#include <asm/ebcdic.h>
23#include <asm/reset.h> 24#include <asm/reset.h>
24#include <asm/sclp.h> 25#include <asm/sclp.h>
26#include <asm/setup.h>
25 27
26#define IPL_PARM_BLOCK_VERSION 0 28#define IPL_PARM_BLOCK_VERSION 0
27 29
@@ -121,6 +123,7 @@ enum ipl_method {
121 REIPL_METHOD_FCP_RO_VM, 123 REIPL_METHOD_FCP_RO_VM,
122 REIPL_METHOD_FCP_DUMP, 124 REIPL_METHOD_FCP_DUMP,
123 REIPL_METHOD_NSS, 125 REIPL_METHOD_NSS,
126 REIPL_METHOD_NSS_DIAG,
124 REIPL_METHOD_DEFAULT, 127 REIPL_METHOD_DEFAULT,
125}; 128};
126 129
@@ -134,14 +137,15 @@ enum dump_method {
134 137
135static int diag308_set_works = 0; 138static int diag308_set_works = 0;
136 139
140static struct ipl_parameter_block ipl_block;
141
137static int reipl_capabilities = IPL_TYPE_UNKNOWN; 142static int reipl_capabilities = IPL_TYPE_UNKNOWN;
138 143
139static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN; 144static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
140static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT; 145static enum ipl_method reipl_method = REIPL_METHOD_DEFAULT;
141static struct ipl_parameter_block *reipl_block_fcp; 146static struct ipl_parameter_block *reipl_block_fcp;
142static struct ipl_parameter_block *reipl_block_ccw; 147static struct ipl_parameter_block *reipl_block_ccw;
143 148static struct ipl_parameter_block *reipl_block_nss;
144static char reipl_nss_name[NSS_NAME_SIZE + 1];
145 149
146static int dump_capabilities = DUMP_TYPE_NONE; 150static int dump_capabilities = DUMP_TYPE_NONE;
147static enum dump_type dump_type = DUMP_TYPE_NONE; 151static enum dump_type dump_type = DUMP_TYPE_NONE;
@@ -263,6 +267,56 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr,
263 267
264static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); 268static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
265 269
270/* VM IPL PARM routines */
271static void reipl_get_ascii_vmparm(char *dest,
272 const struct ipl_parameter_block *ipb)
273{
274 int i;
275 int len = 0;
276 char has_lowercase = 0;
277
278 if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) &&
279 (ipb->ipl_info.ccw.vm_parm_len > 0)) {
280
281 len = ipb->ipl_info.ccw.vm_parm_len;
282 memcpy(dest, ipb->ipl_info.ccw.vm_parm, len);
283 /* If at least one character is lowercase, we assume mixed
284 * case; otherwise we convert everything to lowercase.
285 */
286 for (i = 0; i < len; i++)
287 if ((dest[i] > 0x80 && dest[i] < 0x8a) || /* a-i */
288 (dest[i] > 0x90 && dest[i] < 0x9a) || /* j-r */
289 (dest[i] > 0xa1 && dest[i] < 0xaa)) { /* s-z */
290 has_lowercase = 1;
291 break;
292 }
293 if (!has_lowercase)
294 EBC_TOLOWER(dest, len);
295 EBCASC(dest, len);
296 }
297 dest[len] = 0;
298}
299
300void get_ipl_vmparm(char *dest)
301{
302 if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW))
303 reipl_get_ascii_vmparm(dest, &ipl_block);
304 else
305 dest[0] = 0;
306}
307
308static ssize_t ipl_vm_parm_show(struct kobject *kobj,
309 struct kobj_attribute *attr, char *page)
310{
311 char parm[DIAG308_VMPARM_SIZE + 1] = {};
312
313 get_ipl_vmparm(parm);
314 return sprintf(page, "%s\n", parm);
315}
316
317static struct kobj_attribute sys_ipl_vm_parm_attr =
318 __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL);
319
266static ssize_t sys_ipl_device_show(struct kobject *kobj, 320static ssize_t sys_ipl_device_show(struct kobject *kobj,
267 struct kobj_attribute *attr, char *page) 321 struct kobj_attribute *attr, char *page)
268{ 322{
@@ -285,14 +339,8 @@ static struct kobj_attribute sys_ipl_device_attr =
285static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr, 339static ssize_t ipl_parameter_read(struct kobject *kobj, struct bin_attribute *attr,
286 char *buf, loff_t off, size_t count) 340 char *buf, loff_t off, size_t count)
287{ 341{
288 unsigned int size = IPL_PARMBLOCK_SIZE; 342 return memory_read_from_buffer(buf, count, &off, IPL_PARMBLOCK_START,
289 343 IPL_PARMBLOCK_SIZE);
290 if (off > size)
291 return 0;
292 if (off + count > size)
293 count = size - off;
294 memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
295 return count;
296} 344}
297 345
298static struct bin_attribute ipl_parameter_attr = { 346static struct bin_attribute ipl_parameter_attr = {
@@ -310,12 +358,7 @@ static ssize_t ipl_scp_data_read(struct kobject *kobj, struct bin_attribute *att
310 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len; 358 unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
311 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data; 359 void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
312 360
313 if (off > size) 361 return memory_read_from_buffer(buf, count, &off, scp_data, size);
314 return 0;
315 if (off + count > size)
316 count = size - off;
317 memcpy(buf, scp_data + off, count);
318 return count;
319} 362}
320 363
321static struct bin_attribute ipl_scp_data_attr = { 364static struct bin_attribute ipl_scp_data_attr = {
@@ -370,15 +413,27 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj,
370static struct kobj_attribute sys_ipl_ccw_loadparm_attr = 413static struct kobj_attribute sys_ipl_ccw_loadparm_attr =
371 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); 414 __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL);
372 415
373static struct attribute *ipl_ccw_attrs[] = { 416static struct attribute *ipl_ccw_attrs_vm[] = {
374 &sys_ipl_type_attr.attr, 417 &sys_ipl_type_attr.attr,
375 &sys_ipl_device_attr.attr, 418 &sys_ipl_device_attr.attr,
376 &sys_ipl_ccw_loadparm_attr.attr, 419 &sys_ipl_ccw_loadparm_attr.attr,
420 &sys_ipl_vm_parm_attr.attr,
377 NULL, 421 NULL,
378}; 422};
379 423
380static struct attribute_group ipl_ccw_attr_group = { 424static struct attribute *ipl_ccw_attrs_lpar[] = {
381 .attrs = ipl_ccw_attrs, 425 &sys_ipl_type_attr.attr,
426 &sys_ipl_device_attr.attr,
427 &sys_ipl_ccw_loadparm_attr.attr,
428 NULL,
429};
430
431static struct attribute_group ipl_ccw_attr_group_vm = {
432 .attrs = ipl_ccw_attrs_vm,
433};
434
435static struct attribute_group ipl_ccw_attr_group_lpar = {
436 .attrs = ipl_ccw_attrs_lpar
382}; 437};
383 438
384/* NSS ipl device attributes */ 439/* NSS ipl device attributes */
@@ -388,6 +443,8 @@ DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
388static struct attribute *ipl_nss_attrs[] = { 443static struct attribute *ipl_nss_attrs[] = {
389 &sys_ipl_type_attr.attr, 444 &sys_ipl_type_attr.attr,
390 &sys_ipl_nss_name_attr.attr, 445 &sys_ipl_nss_name_attr.attr,
446 &sys_ipl_ccw_loadparm_attr.attr,
447 &sys_ipl_vm_parm_attr.attr,
391 NULL, 448 NULL,
392}; 449};
393 450
@@ -450,7 +507,12 @@ static int __init ipl_init(void)
450 } 507 }
451 switch (ipl_info.type) { 508 switch (ipl_info.type) {
452 case IPL_TYPE_CCW: 509 case IPL_TYPE_CCW:
453 rc = sysfs_create_group(&ipl_kset->kobj, &ipl_ccw_attr_group); 510 if (MACHINE_IS_VM)
511 rc = sysfs_create_group(&ipl_kset->kobj,
512 &ipl_ccw_attr_group_vm);
513 else
514 rc = sysfs_create_group(&ipl_kset->kobj,
515 &ipl_ccw_attr_group_lpar);
454 break; 516 break;
455 case IPL_TYPE_FCP: 517 case IPL_TYPE_FCP:
456 case IPL_TYPE_FCP_DUMP: 518 case IPL_TYPE_FCP_DUMP:
@@ -481,6 +543,83 @@ static struct shutdown_action __refdata ipl_action = {
481 * reipl shutdown action: Reboot Linux on shutdown. 543 * reipl shutdown action: Reboot Linux on shutdown.
482 */ 544 */
483 545
546/* VM IPL PARM attributes */
547static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb,
548 char *page)
549{
550 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
551
552 reipl_get_ascii_vmparm(vmparm, ipb);
553 return sprintf(page, "%s\n", vmparm);
554}
555
556static ssize_t reipl_generic_vmparm_store(struct ipl_parameter_block *ipb,
557 size_t vmparm_max,
558 const char *buf, size_t len)
559{
560 int i, ip_len;
561
562 /* ignore trailing newline */
563 ip_len = len;
564 if ((len > 0) && (buf[len - 1] == '\n'))
565 ip_len--;
566
567 if (ip_len > vmparm_max)
568 return -EINVAL;
569
570 /* parm is used to store kernel options, check for common chars */
571 for (i = 0; i < ip_len; i++)
572 if (!(isalnum(buf[i]) || isascii(buf[i]) || isprint(buf[i])))
573 return -EINVAL;
574
575 memset(ipb->ipl_info.ccw.vm_parm, 0, DIAG308_VMPARM_SIZE);
576 ipb->ipl_info.ccw.vm_parm_len = ip_len;
577 if (ip_len > 0) {
578 ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
579 memcpy(ipb->ipl_info.ccw.vm_parm, buf, ip_len);
580 ASCEBC(ipb->ipl_info.ccw.vm_parm, ip_len);
581 } else {
582 ipb->ipl_info.ccw.vm_flags &= ~DIAG308_VM_FLAGS_VP_VALID;
583 }
584
585 return len;
586}
587
588/* NSS wrapper */
589static ssize_t reipl_nss_vmparm_show(struct kobject *kobj,
590 struct kobj_attribute *attr, char *page)
591{
592 return reipl_generic_vmparm_show(reipl_block_nss, page);
593}
594
595static ssize_t reipl_nss_vmparm_store(struct kobject *kobj,
596 struct kobj_attribute *attr,
597 const char *buf, size_t len)
598{
599 return reipl_generic_vmparm_store(reipl_block_nss, 56, buf, len);
600}
601
602/* CCW wrapper */
603static ssize_t reipl_ccw_vmparm_show(struct kobject *kobj,
604 struct kobj_attribute *attr, char *page)
605{
606 return reipl_generic_vmparm_show(reipl_block_ccw, page);
607}
608
609static ssize_t reipl_ccw_vmparm_store(struct kobject *kobj,
610 struct kobj_attribute *attr,
611 const char *buf, size_t len)
612{
613 return reipl_generic_vmparm_store(reipl_block_ccw, 64, buf, len);
614}
615
616static struct kobj_attribute sys_reipl_nss_vmparm_attr =
617 __ATTR(parm, S_IRUGO | S_IWUSR, reipl_nss_vmparm_show,
618 reipl_nss_vmparm_store);
619static struct kobj_attribute sys_reipl_ccw_vmparm_attr =
620 __ATTR(parm, S_IRUGO | S_IWUSR, reipl_ccw_vmparm_show,
621 reipl_ccw_vmparm_store);
622
484/* FCP reipl device attributes */ 623/* FCP reipl device attributes */
485 624
486DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 625DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
@@ -513,27 +652,26 @@ static struct attribute_group reipl_fcp_attr_group = {
513DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", 652DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
514 reipl_block_ccw->ipl_info.ccw.devno); 653 reipl_block_ccw->ipl_info.ccw.devno);
515 654
516static void reipl_get_ascii_loadparm(char *loadparm) 655static void reipl_get_ascii_loadparm(char *loadparm,
656 struct ipl_parameter_block *ibp)
517{ 657{
518 memcpy(loadparm, &reipl_block_ccw->ipl_info.ccw.load_param, 658 memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN);
519 LOADPARM_LEN);
520 EBCASC(loadparm, LOADPARM_LEN); 659 EBCASC(loadparm, LOADPARM_LEN);
521 loadparm[LOADPARM_LEN] = 0; 660 loadparm[LOADPARM_LEN] = 0;
522 strstrip(loadparm); 661 strstrip(loadparm);
523} 662}
524 663
525static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj, 664static ssize_t reipl_generic_loadparm_show(struct ipl_parameter_block *ipb,
526 struct kobj_attribute *attr, char *page) 665 char *page)
527{ 666{
528 char buf[LOADPARM_LEN + 1]; 667 char buf[LOADPARM_LEN + 1];
529 668
530 reipl_get_ascii_loadparm(buf); 669 reipl_get_ascii_loadparm(buf, ipb);
531 return sprintf(page, "%s\n", buf); 670 return sprintf(page, "%s\n", buf);
532} 671}
533 672
534static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj, 673static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
535 struct kobj_attribute *attr, 674 const char *buf, size_t len)
536 const char *buf, size_t len)
537{ 675{
538 int i, lp_len; 676 int i, lp_len;
539 677
@@ -552,35 +690,128 @@ static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
552 return -EINVAL; 690 return -EINVAL;
553 } 691 }
554 /* initialize loadparm with blanks */ 692 /* initialize loadparm with blanks */
555 memset(&reipl_block_ccw->ipl_info.ccw.load_param, ' ', LOADPARM_LEN); 693 memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN);
556 /* copy and convert to ebcdic */ 694 /* copy and convert to ebcdic */
557 memcpy(&reipl_block_ccw->ipl_info.ccw.load_param, buf, lp_len); 695 memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len);
558 ASCEBC(reipl_block_ccw->ipl_info.ccw.load_param, LOADPARM_LEN); 696 ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN);
559 return len; 697 return len;
560} 698}
561 699
700/* NSS wrapper */
701static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
702 struct kobj_attribute *attr, char *page)
703{
704 return reipl_generic_loadparm_show(reipl_block_nss, page);
705}
706
707static ssize_t reipl_nss_loadparm_store(struct kobject *kobj,
708 struct kobj_attribute *attr,
709 const char *buf, size_t len)
710{
711 return reipl_generic_loadparm_store(reipl_block_nss, buf, len);
712}
713
714/* CCW wrapper */
715static ssize_t reipl_ccw_loadparm_show(struct kobject *kobj,
716 struct kobj_attribute *attr, char *page)
717{
718 return reipl_generic_loadparm_show(reipl_block_ccw, page);
719}
720
721static ssize_t reipl_ccw_loadparm_store(struct kobject *kobj,
722 struct kobj_attribute *attr,
723 const char *buf, size_t len)
724{
725 return reipl_generic_loadparm_store(reipl_block_ccw, buf, len);
726}
727
562static struct kobj_attribute sys_reipl_ccw_loadparm_attr = 728static struct kobj_attribute sys_reipl_ccw_loadparm_attr =
563 __ATTR(loadparm, 0644, reipl_ccw_loadparm_show, 729 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_ccw_loadparm_show,
564 reipl_ccw_loadparm_store); 730 reipl_ccw_loadparm_store);
565 731
566static struct attribute *reipl_ccw_attrs[] = { 732static struct attribute *reipl_ccw_attrs_vm[] = {
567 &sys_reipl_ccw_device_attr.attr, 733 &sys_reipl_ccw_device_attr.attr,
568 &sys_reipl_ccw_loadparm_attr.attr, 734 &sys_reipl_ccw_loadparm_attr.attr,
735 &sys_reipl_ccw_vmparm_attr.attr,
569 NULL, 736 NULL,
570}; 737};
571 738
572static struct attribute_group reipl_ccw_attr_group = { 739static struct attribute *reipl_ccw_attrs_lpar[] = {
740 &sys_reipl_ccw_device_attr.attr,
741 &sys_reipl_ccw_loadparm_attr.attr,
742 NULL,
743};
744
745static struct attribute_group reipl_ccw_attr_group_vm = {
746 .name = IPL_CCW_STR,
747 .attrs = reipl_ccw_attrs_vm,
748};
749
750static struct attribute_group reipl_ccw_attr_group_lpar = {
573 .name = IPL_CCW_STR, 751 .name = IPL_CCW_STR,
574 .attrs = reipl_ccw_attrs, 752 .attrs = reipl_ccw_attrs_lpar,
575}; 753};
576 754
577 755
578/* NSS reipl device attributes */ 756/* NSS reipl device attributes */
757static void reipl_get_ascii_nss_name(char *dst,
758 struct ipl_parameter_block *ipb)
759{
760 memcpy(dst, ipb->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
761 EBCASC(dst, NSS_NAME_SIZE);
762 dst[NSS_NAME_SIZE] = 0;
763}
764
765static ssize_t reipl_nss_name_show(struct kobject *kobj,
766 struct kobj_attribute *attr, char *page)
767{
768 char nss_name[NSS_NAME_SIZE + 1] = {};
579 769
580DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name); 770 reipl_get_ascii_nss_name(nss_name, reipl_block_nss);
771 return sprintf(page, "%s\n", nss_name);
772}
773
774static ssize_t reipl_nss_name_store(struct kobject *kobj,
775 struct kobj_attribute *attr,
776 const char *buf, size_t len)
777{
778 int nss_len;
779
780 /* ignore trailing newline */
781 nss_len = len;
782 if ((len > 0) && (buf[len - 1] == '\n'))
783 nss_len--;
784
785 if (nss_len > NSS_NAME_SIZE)
786 return -EINVAL;
787
788 memset(reipl_block_nss->ipl_info.ccw.nss_name, 0x40, NSS_NAME_SIZE);
789 if (nss_len > 0) {
790 reipl_block_nss->ipl_info.ccw.vm_flags |=
791 DIAG308_VM_FLAGS_NSS_VALID;
792 memcpy(reipl_block_nss->ipl_info.ccw.nss_name, buf, nss_len);
793 ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
794 EBC_TOUPPER(reipl_block_nss->ipl_info.ccw.nss_name, nss_len);
795 } else {
796 reipl_block_nss->ipl_info.ccw.vm_flags &=
797 ~DIAG308_VM_FLAGS_NSS_VALID;
798 }
799
800 return len;
801}
802
803static struct kobj_attribute sys_reipl_nss_name_attr =
804 __ATTR(name, S_IRUGO | S_IWUSR, reipl_nss_name_show,
805 reipl_nss_name_store);
806
807static struct kobj_attribute sys_reipl_nss_loadparm_attr =
808 __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_nss_loadparm_show,
809 reipl_nss_loadparm_store);
581 810
582static struct attribute *reipl_nss_attrs[] = { 811static struct attribute *reipl_nss_attrs[] = {
583 &sys_reipl_nss_name_attr.attr, 812 &sys_reipl_nss_name_attr.attr,
813 &sys_reipl_nss_loadparm_attr.attr,
814 &sys_reipl_nss_vmparm_attr.attr,
584 NULL, 815 NULL,
585}; 816};
586 817
@@ -617,7 +848,10 @@ static int reipl_set_type(enum ipl_type type)
617 reipl_method = REIPL_METHOD_FCP_DUMP; 848 reipl_method = REIPL_METHOD_FCP_DUMP;
618 break; 849 break;
619 case IPL_TYPE_NSS: 850 case IPL_TYPE_NSS:
620 reipl_method = REIPL_METHOD_NSS; 851 if (diag308_set_works)
852 reipl_method = REIPL_METHOD_NSS_DIAG;
853 else
854 reipl_method = REIPL_METHOD_NSS;
621 break; 855 break;
622 case IPL_TYPE_UNKNOWN: 856 case IPL_TYPE_UNKNOWN:
623 reipl_method = REIPL_METHOD_DEFAULT; 857 reipl_method = REIPL_METHOD_DEFAULT;
@@ -655,11 +889,38 @@ static struct kobj_attribute reipl_type_attr =
655 889
656static struct kset *reipl_kset; 890static struct kset *reipl_kset;
657 891
892static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb,
893 const enum ipl_method m)
894{
895 char loadparm[LOADPARM_LEN + 1] = {};
896 char vmparm[DIAG308_VMPARM_SIZE + 1] = {};
897 char nss_name[NSS_NAME_SIZE + 1] = {};
898 size_t pos = 0;
899
900 reipl_get_ascii_loadparm(loadparm, ipb);
901 reipl_get_ascii_nss_name(nss_name, ipb);
902 reipl_get_ascii_vmparm(vmparm, ipb);
903
904 switch (m) {
905 case REIPL_METHOD_CCW_VM:
906 pos = sprintf(dst, "IPL %X CLEAR", ipb->ipl_info.ccw.devno);
907 break;
908 case REIPL_METHOD_NSS:
909 pos = sprintf(dst, "IPL %s", nss_name);
910 break;
911 default:
912 break;
913 }
914 if (strlen(loadparm) > 0)
915 pos += sprintf(dst + pos, " LOADPARM '%s'", loadparm);
916 if (strlen(vmparm) > 0)
917 sprintf(dst + pos, " PARM %s", vmparm);
918}
919
658static void reipl_run(struct shutdown_trigger *trigger) 920static void reipl_run(struct shutdown_trigger *trigger)
659{ 921{
660 struct ccw_dev_id devid; 922 struct ccw_dev_id devid;
661 static char buf[100]; 923 static char buf[128];
662 char loadparm[LOADPARM_LEN + 1];
663 924
664 switch (reipl_method) { 925 switch (reipl_method) {
665 case REIPL_METHOD_CCW_CIO: 926 case REIPL_METHOD_CCW_CIO:
@@ -668,13 +929,7 @@ static void reipl_run(struct shutdown_trigger *trigger)
668 reipl_ccw_dev(&devid); 929 reipl_ccw_dev(&devid);
669 break; 930 break;
670 case REIPL_METHOD_CCW_VM: 931 case REIPL_METHOD_CCW_VM:
671 reipl_get_ascii_loadparm(loadparm); 932 get_ipl_string(buf, reipl_block_ccw, REIPL_METHOD_CCW_VM);
672 if (strlen(loadparm) == 0)
673 sprintf(buf, "IPL %X CLEAR",
674 reipl_block_ccw->ipl_info.ccw.devno);
675 else
676 sprintf(buf, "IPL %X CLEAR LOADPARM '%s'",
677 reipl_block_ccw->ipl_info.ccw.devno, loadparm);
678 __cpcmd(buf, NULL, 0, NULL); 933 __cpcmd(buf, NULL, 0, NULL);
679 break; 934 break;
680 case REIPL_METHOD_CCW_DIAG: 935 case REIPL_METHOD_CCW_DIAG:
@@ -691,8 +946,12 @@ static void reipl_run(struct shutdown_trigger *trigger)
691 case REIPL_METHOD_FCP_RO_VM: 946 case REIPL_METHOD_FCP_RO_VM:
692 __cpcmd("IPL", NULL, 0, NULL); 947 __cpcmd("IPL", NULL, 0, NULL);
693 break; 948 break;
949 case REIPL_METHOD_NSS_DIAG:
950 diag308(DIAG308_SET, reipl_block_nss);
951 diag308(DIAG308_IPL, NULL);
952 break;
694 case REIPL_METHOD_NSS: 953 case REIPL_METHOD_NSS:
695 sprintf(buf, "IPL %s", reipl_nss_name); 954 get_ipl_string(buf, reipl_block_nss, REIPL_METHOD_NSS);
696 __cpcmd(buf, NULL, 0, NULL); 955 __cpcmd(buf, NULL, 0, NULL);
697 break; 956 break;
698 case REIPL_METHOD_DEFAULT: 957 case REIPL_METHOD_DEFAULT:
@@ -707,16 +966,36 @@ static void reipl_run(struct shutdown_trigger *trigger)
707 disabled_wait((unsigned long) __builtin_return_address(0)); 966 disabled_wait((unsigned long) __builtin_return_address(0));
708} 967}
709 968
710static void __init reipl_probe(void) 969static void reipl_block_ccw_init(struct ipl_parameter_block *ipb)
711{ 970{
712 void *buffer; 971 ipb->hdr.len = IPL_PARM_BLK_CCW_LEN;
972 ipb->hdr.version = IPL_PARM_BLOCK_VERSION;
973 ipb->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN;
974 ipb->hdr.pbt = DIAG308_IPL_TYPE_CCW;
975}
713 976
714 buffer = (void *) get_zeroed_page(GFP_KERNEL); 977static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb)
715 if (!buffer) 978{
716 return; 979 /* LOADPARM */
717 if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK) 980 /* check if read scp info worked and set loadparm */
718 diag308_set_works = 1; 981 if (sclp_ipl_info.is_valid)
719 free_page((unsigned long)buffer); 982 memcpy(ipb->ipl_info.ccw.load_parm,
983 &sclp_ipl_info.loadparm, LOADPARM_LEN);
984 else
985 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
986 memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN);
987 ipb->hdr.flags = DIAG308_FLAGS_LP_VALID;
988
989 /* VM PARM */
990 if (MACHINE_IS_VM && diag308_set_works &&
991 (ipl_block.ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID)) {
992
993 ipb->ipl_info.ccw.vm_flags |= DIAG308_VM_FLAGS_VP_VALID;
994 ipb->ipl_info.ccw.vm_parm_len =
995 ipl_block.ipl_info.ccw.vm_parm_len;
996 memcpy(ipb->ipl_info.ccw.vm_parm,
997 ipl_block.ipl_info.ccw.vm_parm, DIAG308_VMPARM_SIZE);
998 }
720} 999}
721 1000
722static int __init reipl_nss_init(void) 1001static int __init reipl_nss_init(void)
@@ -725,10 +1004,31 @@ static int __init reipl_nss_init(void)
725 1004
726 if (!MACHINE_IS_VM) 1005 if (!MACHINE_IS_VM)
727 return 0; 1006 return 0;
1007
1008 reipl_block_nss = (void *) get_zeroed_page(GFP_KERNEL);
1009 if (!reipl_block_nss)
1010 return -ENOMEM;
1011
1012 if (!diag308_set_works)
1013 sys_reipl_nss_vmparm_attr.attr.mode = S_IRUGO;
1014
728 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group); 1015 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_nss_attr_group);
729 if (rc) 1016 if (rc)
730 return rc; 1017 return rc;
731 strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1); 1018
1019 reipl_block_ccw_init(reipl_block_nss);
1020 if (ipl_info.type == IPL_TYPE_NSS) {
1021 memset(reipl_block_nss->ipl_info.ccw.nss_name,
1022 ' ', NSS_NAME_SIZE);
1023 memcpy(reipl_block_nss->ipl_info.ccw.nss_name,
1024 kernel_nss_name, strlen(kernel_nss_name));
1025 ASCEBC(reipl_block_nss->ipl_info.ccw.nss_name, NSS_NAME_SIZE);
1026 reipl_block_nss->ipl_info.ccw.vm_flags |=
1027 DIAG308_VM_FLAGS_NSS_VALID;
1028
1029 reipl_block_ccw_fill_parms(reipl_block_nss);
1030 }
1031
732 reipl_capabilities |= IPL_TYPE_NSS; 1032 reipl_capabilities |= IPL_TYPE_NSS;
733 return 0; 1033 return 0;
734} 1034}
@@ -740,28 +1040,27 @@ static int __init reipl_ccw_init(void)
740 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL); 1040 reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
741 if (!reipl_block_ccw) 1041 if (!reipl_block_ccw)
742 return -ENOMEM; 1042 return -ENOMEM;
743 rc = sysfs_create_group(&reipl_kset->kobj, &reipl_ccw_attr_group); 1043
744 if (rc) { 1044 if (MACHINE_IS_VM) {
745 free_page((unsigned long)reipl_block_ccw); 1045 if (!diag308_set_works)
746 return rc; 1046 sys_reipl_ccw_vmparm_attr.attr.mode = S_IRUGO;
1047 rc = sysfs_create_group(&reipl_kset->kobj,
1048 &reipl_ccw_attr_group_vm);
1049 } else {
1050 if(!diag308_set_works)
1051 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
1052 rc = sysfs_create_group(&reipl_kset->kobj,
1053 &reipl_ccw_attr_group_lpar);
747 } 1054 }
748 reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN; 1055 if (rc)
749 reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION; 1056 return rc;
750 reipl_block_ccw->hdr.blk0_len = IPL_PARM_BLK0_CCW_LEN; 1057
751 reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW; 1058 reipl_block_ccw_init(reipl_block_ccw);
752 reipl_block_ccw->hdr.flags = DIAG308_FLAGS_LP_VALID; 1059 if (ipl_info.type == IPL_TYPE_CCW) {
753 /* check if read scp info worked and set loadparm */
754 if (sclp_ipl_info.is_valid)
755 memcpy(reipl_block_ccw->ipl_info.ccw.load_param,
756 &sclp_ipl_info.loadparm, LOADPARM_LEN);
757 else
758 /* read scp info failed: set empty loadparm (EBCDIC blanks) */
759 memset(reipl_block_ccw->ipl_info.ccw.load_param, 0x40,
760 LOADPARM_LEN);
761 if (!MACHINE_IS_VM && !diag308_set_works)
762 sys_reipl_ccw_loadparm_attr.attr.mode = S_IRUGO;
763 if (ipl_info.type == IPL_TYPE_CCW)
764 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; 1060 reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
1061 reipl_block_ccw_fill_parms(reipl_block_ccw);
1062 }
1063
765 reipl_capabilities |= IPL_TYPE_CCW; 1064 reipl_capabilities |= IPL_TYPE_CCW;
766 return 0; 1065 return 0;
767} 1066}
@@ -1298,7 +1597,6 @@ static void __init shutdown_actions_init(void)
1298 1597
1299static int __init s390_ipl_init(void) 1598static int __init s390_ipl_init(void)
1300{ 1599{
1301 reipl_probe();
1302 sclp_get_ipl_info(&sclp_ipl_info); 1600 sclp_get_ipl_info(&sclp_ipl_info);
1303 shutdown_actions_init(); 1601 shutdown_actions_init();
1304 shutdown_triggers_init(); 1602 shutdown_triggers_init();
@@ -1405,6 +1703,12 @@ void __init setup_ipl(void)
1405 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); 1703 atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
1406} 1704}
1407 1705
1706void __init ipl_update_parameters(void)
1707{
1708 if (diag308(DIAG308_STORE, &ipl_block) == DIAG308_RC_OK)
1709 diag308_set_works = 1;
1710}
1711
1408void __init ipl_save_parameters(void) 1712void __init ipl_save_parameters(void)
1409{ 1713{
1410 struct cio_iplinfo iplinfo; 1714 struct cio_iplinfo iplinfo;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index ed04d1372d5d..288ad490a6dd 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -41,10 +41,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) 41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
42 return -EINVAL; 42 return -EINVAL;
43 43
44 if ((unsigned long)p->addr & 0x01) { 44 if ((unsigned long)p->addr & 0x01)
45 printk("Attempt to register kprobe at an unaligned address\n");
46 return -EINVAL; 45 return -EINVAL;
47 }
48 46
49 /* Use the get_insn_slot() facility for correctness */ 47 /* Use the get_insn_slot() facility for correctness */
50 if (!(p->ainsn.insn = get_insn_slot())) 48 if (!(p->ainsn.insn = get_insn_slot()))
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 3c77dd36994c..131d7ee8b416 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -52,7 +52,6 @@ void machine_kexec_cleanup(struct kimage *image)
52 52
53void machine_shutdown(void) 53void machine_shutdown(void)
54{ 54{
55 printk(KERN_INFO "kexec: machine_shutdown called\n");
56} 55}
57 56
58void machine_kexec(struct kimage *image) 57void machine_kexec(struct kimage *image)
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c
new file mode 100644
index 000000000000..18ed7abe16c5
--- /dev/null
+++ b/arch/s390/kernel/mem_detect.c
@@ -0,0 +1,100 @@
1/*
2 * Copyright IBM Corp. 2008
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <asm/ipl.h>
9#include <asm/sclp.h>
10#include <asm/setup.h>
11
12static int memory_fast_detect(struct mem_chunk *chunk)
13{
14 unsigned long val0 = 0;
15 unsigned long val1 = 0xc;
16 int rc = -EOPNOTSUPP;
17
18 if (ipl_flags & IPL_NSS_VALID)
19 return -EOPNOTSUPP;
20 asm volatile(
21 " diag %1,%2,0x260\n"
22 "0: lhi %0,0\n"
23 "1:\n"
24 EX_TABLE(0b,1b)
25 : "+d" (rc), "+d" (val0), "+d" (val1) : : "cc");
26
27 if (rc || val0 != val1)
28 return -EOPNOTSUPP;
29 chunk->size = val0 + 1;
30 return 0;
31}
32
33static inline int tprot(unsigned long addr)
34{
35 int rc = -EFAULT;
36
37 asm volatile(
38 " tprot 0(%1),0\n"
39 "0: ipm %0\n"
40 " srl %0,28\n"
41 "1:\n"
42 EX_TABLE(0b,1b)
43 : "+d" (rc) : "a" (addr) : "cc");
44 return rc;
45}
46
47#define ADDR2G (1ULL << 31)
48
49static void find_memory_chunks(struct mem_chunk chunk[])
50{
51 unsigned long long memsize, rnmax, rzm;
52 unsigned long addr = 0, size;
53 int i = 0, type;
54
55 rzm = sclp_get_rzm();
56 rnmax = sclp_get_rnmax();
57 memsize = rzm * rnmax;
58 if (!rzm)
59 rzm = 1ULL << 17;
60 if (sizeof(long) == 4) {
61 rzm = min(ADDR2G, rzm);
62 memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
63 }
64 do {
65 size = 0;
66 type = tprot(addr);
67 do {
68 size += rzm;
69 if (memsize && addr + size >= memsize)
70 break;
71 } while (type == tprot(addr + size));
72 if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
73 chunk[i].addr = addr;
74 chunk[i].size = size;
75 chunk[i].type = type;
76 i++;
77 }
78 addr += size;
79 } while (addr < memsize && i < MEMORY_CHUNKS);
80}
81
82void detect_memory_layout(struct mem_chunk chunk[])
83{
84 unsigned long flags, cr0;
85
86 memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
87 if (memory_fast_detect(&chunk[0]) == 0)
88 return;
89 /* Disable IRQs, DAT and low address protection so tprot does the
90 * right thing and we don't get scheduled away with low address
91 * protection disabled.
92 */
93 flags = __raw_local_irq_stnsm(0xf8);
94 __ctl_store(cr0, 0, 0);
95 __ctl_clear_bit(0, 28);
96 find_memory_chunks(chunk);
97 __ctl_load(cr0, 0, 0);
98 __raw_local_irq_ssm(flags);
99}
100EXPORT_SYMBOL(detect_memory_layout);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7920861109d2..85defd01d293 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -75,46 +75,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
75 return sf->gprs[8]; 75 return sf->gprs[8];
76} 76}
77 77
78/*
79 * Need to know about CPUs going idle?
80 */
81static ATOMIC_NOTIFIER_HEAD(idle_chain);
82DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 78DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
83 79
84int register_idle_notifier(struct notifier_block *nb)
85{
86 return atomic_notifier_chain_register(&idle_chain, nb);
87}
88EXPORT_SYMBOL(register_idle_notifier);
89
90int unregister_idle_notifier(struct notifier_block *nb)
91{
92 return atomic_notifier_chain_unregister(&idle_chain, nb);
93}
94EXPORT_SYMBOL(unregister_idle_notifier);
95
96static int s390_idle_enter(void) 80static int s390_idle_enter(void)
97{ 81{
98 struct s390_idle_data *idle; 82 struct s390_idle_data *idle;
99 int nr_calls = 0;
100 void *hcpu;
101 int rc;
102 83
103 hcpu = (void *)(long)smp_processor_id();
104 rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
105 &nr_calls);
106 if (rc == NOTIFY_BAD) {
107 nr_calls--;
108 __atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
109 hcpu, nr_calls, NULL);
110 return rc;
111 }
112 idle = &__get_cpu_var(s390_idle); 84 idle = &__get_cpu_var(s390_idle);
113 spin_lock(&idle->lock); 85 spin_lock(&idle->lock);
114 idle->idle_count++; 86 idle->idle_count++;
115 idle->in_idle = 1; 87 idle->in_idle = 1;
116 idle->idle_enter = get_clock(); 88 idle->idle_enter = get_clock();
117 spin_unlock(&idle->lock); 89 spin_unlock(&idle->lock);
90 vtime_stop_cpu_timer();
118 return NOTIFY_OK; 91 return NOTIFY_OK;
119} 92}
120 93
@@ -122,13 +95,12 @@ void s390_idle_leave(void)
122{ 95{
123 struct s390_idle_data *idle; 96 struct s390_idle_data *idle;
124 97
98 vtime_start_cpu_timer();
125 idle = &__get_cpu_var(s390_idle); 99 idle = &__get_cpu_var(s390_idle);
126 spin_lock(&idle->lock); 100 spin_lock(&idle->lock);
127 idle->idle_time += get_clock() - idle->idle_enter; 101 idle->idle_time += get_clock() - idle->idle_enter;
128 idle->in_idle = 0; 102 idle->in_idle = 0;
129 spin_unlock(&idle->lock); 103 spin_unlock(&idle->lock);
130 atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
131 (void *)(long) smp_processor_id());
132} 104}
133 105
134extern void s390_handle_mcck(void); 106extern void s390_handle_mcck(void);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 35827b9bd4d1..2815bfe348a6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -33,6 +33,8 @@
33#include <linux/security.h> 33#include <linux/security.h>
34#include <linux/audit.h> 34#include <linux/audit.h>
35#include <linux/signal.h> 35#include <linux/signal.h>
36#include <linux/elf.h>
37#include <linux/regset.h>
36 38
37#include <asm/segment.h> 39#include <asm/segment.h>
38#include <asm/page.h> 40#include <asm/page.h>
@@ -47,6 +49,11 @@
47#include "compat_ptrace.h" 49#include "compat_ptrace.h"
48#endif 50#endif
49 51
52enum s390_regset {
53 REGSET_GENERAL,
54 REGSET_FP,
55};
56
50static void 57static void
51FixPerRegisters(struct task_struct *task) 58FixPerRegisters(struct task_struct *task)
52{ 59{
@@ -126,24 +133,10 @@ ptrace_disable(struct task_struct *child)
126 * struct user contain pad bytes that should be read as zeroes. 133 * struct user contain pad bytes that should be read as zeroes.
127 * Lovely... 134 * Lovely...
128 */ 135 */
129static int 136static unsigned long __peek_user(struct task_struct *child, addr_t addr)
130peek_user(struct task_struct *child, addr_t addr, addr_t data)
131{ 137{
132 struct user *dummy = NULL; 138 struct user *dummy = NULL;
133 addr_t offset, tmp, mask; 139 addr_t offset, tmp;
134
135 /*
136 * Stupid gdb peeks/pokes the access registers in 64 bit with
137 * an alignment of 4. Programmers from hell...
138 */
139 mask = __ADDR_MASK;
140#ifdef CONFIG_64BIT
141 if (addr >= (addr_t) &dummy->regs.acrs &&
142 addr < (addr_t) &dummy->regs.orig_gpr2)
143 mask = 3;
144#endif
145 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
146 return -EIO;
147 140
148 if (addr < (addr_t) &dummy->regs.acrs) { 141 if (addr < (addr_t) &dummy->regs.acrs) {
149 /* 142 /*
@@ -197,24 +190,18 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
197 } else 190 } else
198 tmp = 0; 191 tmp = 0;
199 192
200 return put_user(tmp, (addr_t __user *) data); 193 return tmp;
201} 194}
202 195
203/*
204 * Write a word to the user area of a process at location addr. This
205 * operation does have an additional problem compared to peek_user.
206 * Stores to the program status word and on the floating point
207 * control register needs to get checked for validity.
208 */
209static int 196static int
210poke_user(struct task_struct *child, addr_t addr, addr_t data) 197peek_user(struct task_struct *child, addr_t addr, addr_t data)
211{ 198{
212 struct user *dummy = NULL; 199 struct user *dummy = NULL;
213 addr_t offset, mask; 200 addr_t tmp, mask;
214 201
215 /* 202 /*
216 * Stupid gdb peeks/pokes the access registers in 64 bit with 203 * Stupid gdb peeks/pokes the access registers in 64 bit with
217 * an alignment of 4. Programmers from hell indeed... 204 * an alignment of 4. Programmers from hell...
218 */ 205 */
219 mask = __ADDR_MASK; 206 mask = __ADDR_MASK;
220#ifdef CONFIG_64BIT 207#ifdef CONFIG_64BIT
@@ -225,6 +212,21 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
225 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK) 212 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
226 return -EIO; 213 return -EIO;
227 214
215 tmp = __peek_user(child, addr);
216 return put_user(tmp, (addr_t __user *) data);
217}
218
219/*
220 * Write a word to the user area of a process at location addr. This
221 * operation does have an additional problem compared to peek_user.
222 * Stores to the program status word and on the floating point
223 * control register needs to get checked for validity.
224 */
225static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
226{
227 struct user *dummy = NULL;
228 addr_t offset;
229
228 if (addr < (addr_t) &dummy->regs.acrs) { 230 if (addr < (addr_t) &dummy->regs.acrs) {
229 /* 231 /*
230 * psw and gprs are stored on the stack 232 * psw and gprs are stored on the stack
@@ -292,6 +294,28 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
292 return 0; 294 return 0;
293} 295}
294 296
297static int
298poke_user(struct task_struct *child, addr_t addr, addr_t data)
299{
300 struct user *dummy = NULL;
301 addr_t mask;
302
303 /*
304 * Stupid gdb peeks/pokes the access registers in 64 bit with
305 * an alignment of 4. Programmers from hell indeed...
306 */
307 mask = __ADDR_MASK;
308#ifdef CONFIG_64BIT
309 if (addr >= (addr_t) &dummy->regs.acrs &&
310 addr < (addr_t) &dummy->regs.orig_gpr2)
311 mask = 3;
312#endif
313 if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
314 return -EIO;
315
316 return __poke_user(child, addr, data);
317}
318
295long arch_ptrace(struct task_struct *child, long request, long addr, long data) 319long arch_ptrace(struct task_struct *child, long request, long addr, long data)
296{ 320{
297 ptrace_area parea; 321 ptrace_area parea;
@@ -367,18 +391,13 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
367/* 391/*
368 * Same as peek_user but for a 31 bit program. 392 * Same as peek_user but for a 31 bit program.
369 */ 393 */
370static int 394static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
371peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
372{ 395{
373 struct user32 *dummy32 = NULL; 396 struct user32 *dummy32 = NULL;
374 per_struct32 *dummy_per32 = NULL; 397 per_struct32 *dummy_per32 = NULL;
375 addr_t offset; 398 addr_t offset;
376 __u32 tmp; 399 __u32 tmp;
377 400
378 if (!test_thread_flag(TIF_31BIT) ||
379 (addr & 3) || addr > sizeof(struct user) - 3)
380 return -EIO;
381
382 if (addr < (addr_t) &dummy32->regs.acrs) { 401 if (addr < (addr_t) &dummy32->regs.acrs) {
383 /* 402 /*
384 * psw and gprs are stored on the stack 403 * psw and gprs are stored on the stack
@@ -435,25 +454,32 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
435 } else 454 } else
436 tmp = 0; 455 tmp = 0;
437 456
457 return tmp;
458}
459
460static int peek_user_compat(struct task_struct *child,
461 addr_t addr, addr_t data)
462{
463 __u32 tmp;
464
465 if (!test_thread_flag(TIF_31BIT) ||
466 (addr & 3) || addr > sizeof(struct user) - 3)
467 return -EIO;
468
469 tmp = __peek_user_compat(child, addr);
438 return put_user(tmp, (__u32 __user *) data); 470 return put_user(tmp, (__u32 __user *) data);
439} 471}
440 472
441/* 473/*
442 * Same as poke_user but for a 31 bit program. 474 * Same as poke_user but for a 31 bit program.
443 */ 475 */
444static int 476static int __poke_user_compat(struct task_struct *child,
445poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data) 477 addr_t addr, addr_t data)
446{ 478{
447 struct user32 *dummy32 = NULL; 479 struct user32 *dummy32 = NULL;
448 per_struct32 *dummy_per32 = NULL; 480 per_struct32 *dummy_per32 = NULL;
481 __u32 tmp = (__u32) data;
449 addr_t offset; 482 addr_t offset;
450 __u32 tmp;
451
452 if (!test_thread_flag(TIF_31BIT) ||
453 (addr & 3) || addr > sizeof(struct user32) - 3)
454 return -EIO;
455
456 tmp = (__u32) data;
457 483
458 if (addr < (addr_t) &dummy32->regs.acrs) { 484 if (addr < (addr_t) &dummy32->regs.acrs) {
459 /* 485 /*
@@ -528,6 +554,16 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
528 return 0; 554 return 0;
529} 555}
530 556
557static int poke_user_compat(struct task_struct *child,
558 addr_t addr, addr_t data)
559{
560 if (!test_thread_flag(TIF_31BIT) ||
561 (addr & 3) || addr > sizeof(struct user32) - 3)
562 return -EIO;
563
564 return __poke_user_compat(child, addr, data);
565}
566
531long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 567long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
532 compat_ulong_t caddr, compat_ulong_t cdata) 568 compat_ulong_t caddr, compat_ulong_t cdata)
533{ 569{
@@ -539,11 +575,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
539 switch (request) { 575 switch (request) {
540 case PTRACE_PEEKUSR: 576 case PTRACE_PEEKUSR:
541 /* read the word at location addr in the USER area. */ 577 /* read the word at location addr in the USER area. */
542 return peek_user_emu31(child, addr, data); 578 return peek_user_compat(child, addr, data);
543 579
544 case PTRACE_POKEUSR: 580 case PTRACE_POKEUSR:
545 /* write the word at location addr in the USER area */ 581 /* write the word at location addr in the USER area */
546 return poke_user_emu31(child, addr, data); 582 return poke_user_compat(child, addr, data);
547 583
548 case PTRACE_PEEKUSR_AREA: 584 case PTRACE_PEEKUSR_AREA:
549 case PTRACE_POKEUSR_AREA: 585 case PTRACE_POKEUSR_AREA:
@@ -555,13 +591,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
555 copied = 0; 591 copied = 0;
556 while (copied < parea.len) { 592 while (copied < parea.len) {
557 if (request == PTRACE_PEEKUSR_AREA) 593 if (request == PTRACE_PEEKUSR_AREA)
558 ret = peek_user_emu31(child, addr, data); 594 ret = peek_user_compat(child, addr, data);
559 else { 595 else {
560 __u32 utmp; 596 __u32 utmp;
561 if (get_user(utmp, 597 if (get_user(utmp,
562 (__u32 __force __user *) data)) 598 (__u32 __force __user *) data))
563 return -EFAULT; 599 return -EFAULT;
564 ret = poke_user_emu31(child, addr, utmp); 600 ret = poke_user_compat(child, addr, utmp);
565 } 601 }
566 if (ret) 602 if (ret)
567 return ret; 603 return ret;
@@ -610,3 +646,240 @@ syscall_trace(struct pt_regs *regs, int entryexit)
610 regs->gprs[2], regs->orig_gpr2, regs->gprs[3], 646 regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
611 regs->gprs[4], regs->gprs[5]); 647 regs->gprs[4], regs->gprs[5]);
612} 648}
649
650/*
651 * user_regset definitions.
652 */
653
654static int s390_regs_get(struct task_struct *target,
655 const struct user_regset *regset,
656 unsigned int pos, unsigned int count,
657 void *kbuf, void __user *ubuf)
658{
659 if (target == current)
660 save_access_regs(target->thread.acrs);
661
662 if (kbuf) {
663 unsigned long *k = kbuf;
664 while (count > 0) {
665 *k++ = __peek_user(target, pos);
666 count -= sizeof(*k);
667 pos += sizeof(*k);
668 }
669 } else {
670 unsigned long __user *u = ubuf;
671 while (count > 0) {
672 if (__put_user(__peek_user(target, pos), u++))
673 return -EFAULT;
674 count -= sizeof(*u);
675 pos += sizeof(*u);
676 }
677 }
678 return 0;
679}
680
681static int s390_regs_set(struct task_struct *target,
682 const struct user_regset *regset,
683 unsigned int pos, unsigned int count,
684 const void *kbuf, const void __user *ubuf)
685{
686 int rc = 0;
687
688 if (target == current)
689 save_access_regs(target->thread.acrs);
690
691 if (kbuf) {
692 const unsigned long *k = kbuf;
693 while (count > 0 && !rc) {
694 rc = __poke_user(target, pos, *k++);
695 count -= sizeof(*k);
696 pos += sizeof(*k);
697 }
698 } else {
699 const unsigned long __user *u = ubuf;
700 while (count > 0 && !rc) {
701 unsigned long word;
702 rc = __get_user(word, u++);
703 if (rc)
704 break;
705 rc = __poke_user(target, pos, word);
706 count -= sizeof(*u);
707 pos += sizeof(*u);
708 }
709 }
710
711 if (rc == 0 && target == current)
712 restore_access_regs(target->thread.acrs);
713
714 return rc;
715}
716
717static int s390_fpregs_get(struct task_struct *target,
718 const struct user_regset *regset, unsigned int pos,
719 unsigned int count, void *kbuf, void __user *ubuf)
720{
721 if (target == current)
722 save_fp_regs(&target->thread.fp_regs);
723
724 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
725 &target->thread.fp_regs, 0, -1);
726}
727
728static int s390_fpregs_set(struct task_struct *target,
729 const struct user_regset *regset, unsigned int pos,
730 unsigned int count, const void *kbuf,
731 const void __user *ubuf)
732{
733 int rc = 0;
734
735 if (target == current)
736 save_fp_regs(&target->thread.fp_regs);
737
738 /* If setting FPC, must validate it first. */
739 if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
740 u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
741 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
742 0, offsetof(s390_fp_regs, fprs));
743 if (rc)
744 return rc;
745 if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
746 return -EINVAL;
747 target->thread.fp_regs.fpc = fpc[0];
748 }
749
750 if (rc == 0 && count > 0)
751 rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
752 target->thread.fp_regs.fprs,
753 offsetof(s390_fp_regs, fprs), -1);
754
755 if (rc == 0 && target == current)
756 restore_fp_regs(&target->thread.fp_regs);
757
758 return rc;
759}
760
761static const struct user_regset s390_regsets[] = {
762 [REGSET_GENERAL] = {
763 .core_note_type = NT_PRSTATUS,
764 .n = sizeof(s390_regs) / sizeof(long),
765 .size = sizeof(long),
766 .align = sizeof(long),
767 .get = s390_regs_get,
768 .set = s390_regs_set,
769 },
770 [REGSET_FP] = {
771 .core_note_type = NT_PRFPREG,
772 .n = sizeof(s390_fp_regs) / sizeof(long),
773 .size = sizeof(long),
774 .align = sizeof(long),
775 .get = s390_fpregs_get,
776 .set = s390_fpregs_set,
777 },
778};
779
780static const struct user_regset_view user_s390_view = {
781 .name = UTS_MACHINE,
782 .e_machine = EM_S390,
783 .regsets = s390_regsets,
784 .n = ARRAY_SIZE(s390_regsets)
785};
786
787#ifdef CONFIG_COMPAT
788static int s390_compat_regs_get(struct task_struct *target,
789 const struct user_regset *regset,
790 unsigned int pos, unsigned int count,
791 void *kbuf, void __user *ubuf)
792{
793 if (target == current)
794 save_access_regs(target->thread.acrs);
795
796 if (kbuf) {
797 compat_ulong_t *k = kbuf;
798 while (count > 0) {
799 *k++ = __peek_user_compat(target, pos);
800 count -= sizeof(*k);
801 pos += sizeof(*k);
802 }
803 } else {
804 compat_ulong_t __user *u = ubuf;
805 while (count > 0) {
806 if (__put_user(__peek_user_compat(target, pos), u++))
807 return -EFAULT;
808 count -= sizeof(*u);
809 pos += sizeof(*u);
810 }
811 }
812 return 0;
813}
814
815static int s390_compat_regs_set(struct task_struct *target,
816 const struct user_regset *regset,
817 unsigned int pos, unsigned int count,
818 const void *kbuf, const void __user *ubuf)
819{
820 int rc = 0;
821
822 if (target == current)
823 save_access_regs(target->thread.acrs);
824
825 if (kbuf) {
826 const compat_ulong_t *k = kbuf;
827 while (count > 0 && !rc) {
828 rc = __poke_user_compat(target, pos, *k++);
829 count -= sizeof(*k);
830 pos += sizeof(*k);
831 }
832 } else {
833 const compat_ulong_t __user *u = ubuf;
834 while (count > 0 && !rc) {
835 compat_ulong_t word;
836 rc = __get_user(word, u++);
837 if (rc)
838 break;
839 rc = __poke_user_compat(target, pos, word);
840 count -= sizeof(*u);
841 pos += sizeof(*u);
842 }
843 }
844
845 if (rc == 0 && target == current)
846 restore_access_regs(target->thread.acrs);
847
848 return rc;
849}
850
851static const struct user_regset s390_compat_regsets[] = {
852 [REGSET_GENERAL] = {
853 .core_note_type = NT_PRSTATUS,
854 .n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
855 .size = sizeof(compat_long_t),
856 .align = sizeof(compat_long_t),
857 .get = s390_compat_regs_get,
858 .set = s390_compat_regs_set,
859 },
860 [REGSET_FP] = {
861 .core_note_type = NT_PRFPREG,
862 .n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
863 .size = sizeof(compat_long_t),
864 .align = sizeof(compat_long_t),
865 .get = s390_fpregs_get,
866 .set = s390_fpregs_set,
867 },
868};
869
870static const struct user_regset_view user_s390_compat_view = {
871 .name = "s390",
872 .e_machine = EM_S390,
873 .regsets = s390_compat_regsets,
874 .n = ARRAY_SIZE(s390_compat_regsets)
875};
876#endif
877
878const struct user_regset_view *task_user_regset_view(struct task_struct *task)
879{
880#ifdef CONFIG_COMPAT
881 if (test_tsk_thread_flag(task, TIF_31BIT))
882 return &user_s390_compat_view;
883#endif
884 return &user_s390_view;
885}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 2bc70b6e876a..b358e18273b0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -77,7 +77,7 @@ unsigned long machine_flags;
77unsigned long elf_hwcap = 0; 77unsigned long elf_hwcap = 0;
78char elf_platform[ELF_PLATFORM_SIZE]; 78char elf_platform[ELF_PLATFORM_SIZE];
79 79
80struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS]; 80struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
81volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 81volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
82static unsigned long __initdata memory_end; 82static unsigned long __initdata memory_end;
83 83
@@ -205,12 +205,6 @@ static void __init conmode_default(void)
205 SET_CONSOLE_SCLP; 205 SET_CONSOLE_SCLP;
206#endif 206#endif
207 } 207 }
208 } else if (MACHINE_IS_P390) {
209#if defined(CONFIG_TN3215_CONSOLE)
210 SET_CONSOLE_3215;
211#elif defined(CONFIG_TN3270_CONSOLE)
212 SET_CONSOLE_3270;
213#endif
214 } else { 208 } else {
215#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) 209#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
216 SET_CONSOLE_SCLP; 210 SET_CONSOLE_SCLP;
@@ -221,18 +215,17 @@ static void __init conmode_default(void)
221#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) 215#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
222static void __init setup_zfcpdump(unsigned int console_devno) 216static void __init setup_zfcpdump(unsigned int console_devno)
223{ 217{
224 static char str[64]; 218 static char str[41];
225 219
226 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 220 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
227 return; 221 return;
228 if (console_devno != -1) 222 if (console_devno != -1)
229 sprintf(str, "cio_ignore=all,!0.0.%04x,!0.0.%04x", 223 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
230 ipl_info.data.fcp.dev_id.devno, console_devno); 224 ipl_info.data.fcp.dev_id.devno, console_devno);
231 else 225 else
232 sprintf(str, "cio_ignore=all,!0.0.%04x", 226 sprintf(str, " cio_ignore=all,!0.0.%04x",
233 ipl_info.data.fcp.dev_id.devno); 227 ipl_info.data.fcp.dev_id.devno);
234 strcat(COMMAND_LINE, " "); 228 strcat(boot_command_line, str);
235 strcat(COMMAND_LINE, str);
236 console_loglevel = 2; 229 console_loglevel = 2;
237} 230}
238#else 231#else
@@ -289,32 +282,6 @@ static int __init early_parse_mem(char *p)
289} 282}
290early_param("mem", early_parse_mem); 283early_param("mem", early_parse_mem);
291 284
292/*
293 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
294 */
295static int __init early_parse_ipldelay(char *p)
296{
297 unsigned long delay = 0;
298
299 delay = simple_strtoul(p, &p, 0);
300
301 switch (*p) {
302 case 's':
303 case 'S':
304 delay *= 1000000;
305 break;
306 case 'm':
307 case 'M':
308 delay *= 60 * 1000000;
309 }
310
311 /* now wait for the requested amount of time */
312 udelay(delay);
313
314 return 0;
315}
316early_param("ipldelay", early_parse_ipldelay);
317
318#ifdef CONFIG_S390_SWITCH_AMODE 285#ifdef CONFIG_S390_SWITCH_AMODE
319#ifdef CONFIG_PGSTE 286#ifdef CONFIG_PGSTE
320unsigned int switch_amode = 1; 287unsigned int switch_amode = 1;
@@ -804,11 +771,9 @@ setup_arch(char **cmdline_p)
804 printk("We are running native (64 bit mode)\n"); 771 printk("We are running native (64 bit mode)\n");
805#endif /* CONFIG_64BIT */ 772#endif /* CONFIG_64BIT */
806 773
807 /* Save unparsed command line copy for /proc/cmdline */ 774 /* Have one command line that is parsed and saved in /proc/cmdline */
808 strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); 775 /* boot_command_line has been already set up in early.c */
809 776 *cmdline_p = boot_command_line;
810 *cmdline_p = COMMAND_LINE;
811 *(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
812 777
813 ROOT_DEV = Root_RAM0; 778 ROOT_DEV = Root_RAM0;
814 779
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5d4fa4b1c74c..b795b3e24afd 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -109,7 +109,7 @@ static void do_call_function(void)
109} 109}
110 110
111static void __smp_call_function_map(void (*func) (void *info), void *info, 111static void __smp_call_function_map(void (*func) (void *info), void *info,
112 int nonatomic, int wait, cpumask_t map) 112 int wait, cpumask_t map)
113{ 113{
114 struct call_data_struct data; 114 struct call_data_struct data;
115 int cpu, local = 0; 115 int cpu, local = 0;
@@ -162,7 +162,6 @@ out:
162 * smp_call_function: 162 * smp_call_function:
163 * @func: the function to run; this must be fast and non-blocking 163 * @func: the function to run; this must be fast and non-blocking
164 * @info: an arbitrary pointer to pass to the function 164 * @info: an arbitrary pointer to pass to the function
165 * @nonatomic: unused
166 * @wait: if true, wait (atomically) until function has completed on other CPUs 165 * @wait: if true, wait (atomically) until function has completed on other CPUs
167 * 166 *
168 * Run a function on all other CPUs. 167 * Run a function on all other CPUs.
@@ -170,15 +169,14 @@ out:
170 * You must not call this function with disabled interrupts, from a 169 * You must not call this function with disabled interrupts, from a
171 * hardware interrupt handler or from a bottom half. 170 * hardware interrupt handler or from a bottom half.
172 */ 171 */
173int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 172int smp_call_function(void (*func) (void *info), void *info, int wait)
174 int wait)
175{ 173{
176 cpumask_t map; 174 cpumask_t map;
177 175
178 spin_lock(&call_lock); 176 spin_lock(&call_lock);
179 map = cpu_online_map; 177 map = cpu_online_map;
180 cpu_clear(smp_processor_id(), map); 178 cpu_clear(smp_processor_id(), map);
181 __smp_call_function_map(func, info, nonatomic, wait, map); 179 __smp_call_function_map(func, info, wait, map);
182 spin_unlock(&call_lock); 180 spin_unlock(&call_lock);
183 return 0; 181 return 0;
184} 182}
@@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function);
189 * @cpu: the CPU where func should run 187 * @cpu: the CPU where func should run
190 * @func: the function to run; this must be fast and non-blocking 188 * @func: the function to run; this must be fast and non-blocking
191 * @info: an arbitrary pointer to pass to the function 189 * @info: an arbitrary pointer to pass to the function
192 * @nonatomic: unused
193 * @wait: if true, wait (atomically) until function has completed on other CPUs 190 * @wait: if true, wait (atomically) until function has completed on other CPUs
194 * 191 *
195 * Run a function on one processor. 192 * Run a function on one processor.
@@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function);
198 * hardware interrupt handler or from a bottom half. 195 * hardware interrupt handler or from a bottom half.
199 */ 196 */
200int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 197int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
201 int nonatomic, int wait) 198 int wait)
202{ 199{
203 spin_lock(&call_lock); 200 spin_lock(&call_lock);
204 __smp_call_function_map(func, info, nonatomic, wait, 201 __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
205 cpumask_of_cpu(cpu));
206 spin_unlock(&call_lock); 202 spin_unlock(&call_lock);
207 return 0; 203 return 0;
208} 204}
@@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
228{ 224{
229 spin_lock(&call_lock); 225 spin_lock(&call_lock);
230 cpu_clear(smp_processor_id(), mask); 226 cpu_clear(smp_processor_id(), mask);
231 __smp_call_function_map(func, info, 0, wait, mask); 227 __smp_call_function_map(func, info, wait, mask);
232 spin_unlock(&call_lock); 228 spin_unlock(&call_lock);
233 return 0; 229 return 0;
234} 230}
@@ -303,7 +299,7 @@ static void smp_ptlb_callback(void *info)
303 299
304void smp_ptlb_all(void) 300void smp_ptlb_all(void)
305{ 301{
306 on_each_cpu(smp_ptlb_callback, NULL, 0, 1); 302 on_each_cpu(smp_ptlb_callback, NULL, 1);
307} 303}
308EXPORT_SYMBOL(smp_ptlb_all); 304EXPORT_SYMBOL(smp_ptlb_all);
309#endif /* ! CONFIG_64BIT */ 305#endif /* ! CONFIG_64BIT */
@@ -351,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit)
351 memset(&parms.orvals, 0, sizeof(parms.orvals)); 347 memset(&parms.orvals, 0, sizeof(parms.orvals));
352 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 348 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
353 parms.orvals[cr] = 1 << bit; 349 parms.orvals[cr] = 1 << bit;
354 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 350 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
355} 351}
356EXPORT_SYMBOL(smp_ctl_set_bit); 352EXPORT_SYMBOL(smp_ctl_set_bit);
357 353
@@ -365,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit)
365 memset(&parms.orvals, 0, sizeof(parms.orvals)); 361 memset(&parms.orvals, 0, sizeof(parms.orvals));
366 memset(&parms.andvals, 0xff, sizeof(parms.andvals)); 362 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
367 parms.andvals[cr] = ~(1L << bit); 363 parms.andvals[cr] = ~(1L << bit);
368 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); 364 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
369} 365}
370EXPORT_SYMBOL(smp_ctl_clear_bit); 366EXPORT_SYMBOL(smp_ctl_clear_bit);
371 367
@@ -868,7 +864,8 @@ int setup_profiling_timer(unsigned int multiplier)
868} 864}
869 865
870#ifdef CONFIG_HOTPLUG_CPU 866#ifdef CONFIG_HOTPLUG_CPU
871static ssize_t cpu_configure_show(struct sys_device *dev, char *buf) 867static ssize_t cpu_configure_show(struct sys_device *dev,
868 struct sysdev_attribute *attr, char *buf)
872{ 869{
873 ssize_t count; 870 ssize_t count;
874 871
@@ -878,8 +875,9 @@ static ssize_t cpu_configure_show(struct sys_device *dev, char *buf)
878 return count; 875 return count;
879} 876}
880 877
881static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf, 878static ssize_t cpu_configure_store(struct sys_device *dev,
882 size_t count) 879 struct sysdev_attribute *attr,
880 const char *buf, size_t count)
883{ 881{
884 int cpu = dev->id; 882 int cpu = dev->id;
885 int val, rc; 883 int val, rc;
@@ -926,7 +924,8 @@ out:
926static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); 924static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
927#endif /* CONFIG_HOTPLUG_CPU */ 925#endif /* CONFIG_HOTPLUG_CPU */
928 926
929static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf) 927static ssize_t cpu_polarization_show(struct sys_device *dev,
928 struct sysdev_attribute *attr, char *buf)
930{ 929{
931 int cpu = dev->id; 930 int cpu = dev->id;
932 ssize_t count; 931 ssize_t count;
@@ -954,7 +953,8 @@ static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf)
954} 953}
955static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL); 954static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
956 955
957static ssize_t show_cpu_address(struct sys_device *dev, char *buf) 956static ssize_t show_cpu_address(struct sys_device *dev,
957 struct sysdev_attribute *attr, char *buf)
958{ 958{
959 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]); 959 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
960} 960}
@@ -974,7 +974,8 @@ static struct attribute_group cpu_common_attr_group = {
974 .attrs = cpu_common_attrs, 974 .attrs = cpu_common_attrs,
975}; 975};
976 976
977static ssize_t show_capability(struct sys_device *dev, char *buf) 977static ssize_t show_capability(struct sys_device *dev,
978 struct sysdev_attribute *attr, char *buf)
978{ 979{
979 unsigned int capability; 980 unsigned int capability;
980 int rc; 981 int rc;
@@ -986,7 +987,8 @@ static ssize_t show_capability(struct sys_device *dev, char *buf)
986} 987}
987static SYSDEV_ATTR(capability, 0444, show_capability, NULL); 988static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
988 989
989static ssize_t show_idle_count(struct sys_device *dev, char *buf) 990static ssize_t show_idle_count(struct sys_device *dev,
991 struct sysdev_attribute *attr, char *buf)
990{ 992{
991 struct s390_idle_data *idle; 993 struct s390_idle_data *idle;
992 unsigned long long idle_count; 994 unsigned long long idle_count;
@@ -999,7 +1001,8 @@ static ssize_t show_idle_count(struct sys_device *dev, char *buf)
999} 1001}
1000static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); 1002static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
1001 1003
1002static ssize_t show_idle_time(struct sys_device *dev, char *buf) 1004static ssize_t show_idle_time(struct sys_device *dev,
1005 struct sysdev_attribute *attr, char *buf)
1003{ 1006{
1004 struct s390_idle_data *idle; 1007 struct s390_idle_data *idle;
1005 unsigned long long new_time; 1008 unsigned long long new_time;
@@ -1116,7 +1119,9 @@ out:
1116 return rc; 1119 return rc;
1117} 1120}
1118 1121
1119static ssize_t __ref rescan_store(struct sys_device *dev, const char *buf, 1122static ssize_t __ref rescan_store(struct sys_device *dev,
1123 struct sysdev_attribute *attr,
1124 const char *buf,
1120 size_t count) 1125 size_t count)
1121{ 1126{
1122 int rc; 1127 int rc;
@@ -1127,7 +1132,9 @@ static ssize_t __ref rescan_store(struct sys_device *dev, const char *buf,
1127static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store); 1132static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
1128#endif /* CONFIG_HOTPLUG_CPU */ 1133#endif /* CONFIG_HOTPLUG_CPU */
1129 1134
1130static ssize_t dispatching_show(struct sys_device *dev, char *buf) 1135static ssize_t dispatching_show(struct sys_device *dev,
1136 struct sysdev_attribute *attr,
1137 char *buf)
1131{ 1138{
1132 ssize_t count; 1139 ssize_t count;
1133 1140
@@ -1137,8 +1144,9 @@ static ssize_t dispatching_show(struct sys_device *dev, char *buf)
1137 return count; 1144 return count;
1138} 1145}
1139 1146
1140static ssize_t dispatching_store(struct sys_device *dev, const char *buf, 1147static ssize_t dispatching_store(struct sys_device *dev,
1141 size_t count) 1148 struct sysdev_attribute *attr,
1149 const char *buf, size_t count)
1142{ 1150{
1143 int val, rc; 1151 int val, rc;
1144 char delim; 1152 char delim;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 85e46a5d0e08..8841919ef7e6 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -10,6 +10,7 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/stacktrace.h> 11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13#include <linux/module.h>
13 14
14static unsigned long save_context_stack(struct stack_trace *trace, 15static unsigned long save_context_stack(struct stack_trace *trace,
15 unsigned long sp, 16 unsigned long sp,
@@ -81,6 +82,7 @@ void save_stack_trace(struct stack_trace *trace)
81 S390_lowcore.thread_info, 82 S390_lowcore.thread_info,
82 S390_lowcore.thread_info + THREAD_SIZE, 1); 83 S390_lowcore.thread_info + THREAD_SIZE, 1);
83} 84}
85EXPORT_SYMBOL_GPL(save_stack_trace);
84 86
85void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 87void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
86{ 88{
@@ -93,3 +95,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
93 if (trace->nr_entries < trace->max_entries) 95 if (trace->nr_entries < trace->max_entries)
94 trace->entries[trace->nr_entries++] = ULONG_MAX; 96 trace->entries[trace->nr_entries++] = ULONG_MAX;
95} 97}
98EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 7aec676fefd5..ab70d9bd9261 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -3,7 +3,7 @@
3 * Time of day based timer functions. 3 * Time of day based timer functions.
4 * 4 *
5 * S390 version 5 * S390 version
6 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 * Copyright IBM Corp. 1999, 2008
7 * Author(s): Hartmut Penner (hp@de.ibm.com), 7 * Author(s): Hartmut Penner (hp@de.ibm.com),
8 * Martin Schwidefsky (schwidefsky@de.ibm.com), 8 * Martin Schwidefsky (schwidefsky@de.ibm.com),
9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) 9 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
@@ -31,6 +31,7 @@
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/clocksource.h> 32#include <linux/clocksource.h>
33#include <linux/clockchips.h> 33#include <linux/clockchips.h>
34#include <linux/bootmem.h>
34#include <asm/uaccess.h> 35#include <asm/uaccess.h>
35#include <asm/delay.h> 36#include <asm/delay.h>
36#include <asm/s390_ext.h> 37#include <asm/s390_ext.h>
@@ -162,7 +163,7 @@ void init_cpu_timer(void)
162 /* Enable clock comparator timer interrupt. */ 163 /* Enable clock comparator timer interrupt. */
163 __ctl_set_bit(0,11); 164 __ctl_set_bit(0,11);
164 165
165 /* Always allow ETR external interrupts, even without an ETR. */ 166 /* Always allow the timing alert external interrupt. */
166 __ctl_set_bit(0, 4); 167 __ctl_set_bit(0, 4);
167} 168}
168 169
@@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code)
170{ 171{
171} 172}
172 173
174static void etr_timing_alert(struct etr_irq_parm *);
175static void stp_timing_alert(struct stp_irq_parm *);
176
177static void timing_alert_interrupt(__u16 code)
178{
179 if (S390_lowcore.ext_params & 0x00c40000)
180 etr_timing_alert((struct etr_irq_parm *)
181 &S390_lowcore.ext_params);
182 if (S390_lowcore.ext_params & 0x00038000)
183 stp_timing_alert((struct stp_irq_parm *)
184 &S390_lowcore.ext_params);
185}
186
173static void etr_reset(void); 187static void etr_reset(void);
174static void etr_ext_handler(__u16); 188static void stp_reset(void);
175 189
176/* 190/*
177 * Get the TOD clock running. 191 * Get the TOD clock running.
@@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void)
181 u64 time; 195 u64 time;
182 196
183 etr_reset(); 197 etr_reset();
198 stp_reset();
184 if (store_clock(&time) == 0) 199 if (store_clock(&time) == 0)
185 return time; 200 return time;
186 /* TOD clock not running. Set the clock to Unix Epoch. */ 201 /* TOD clock not running. Set the clock to Unix Epoch. */
@@ -231,8 +246,9 @@ void __init time_init(void)
231 if (clocksource_register(&clocksource_tod) != 0) 246 if (clocksource_register(&clocksource_tod) != 0)
232 panic("Could not register TOD clock source"); 247 panic("Could not register TOD clock source");
233 248
234 /* request the etr external interrupt */ 249 /* request the timing alert external interrupt */
235 if (register_early_external_interrupt(0x1406, etr_ext_handler, 250 if (register_early_external_interrupt(0x1406,
251 timing_alert_interrupt,
236 &ext_int_etr_cc) != 0) 252 &ext_int_etr_cc) != 0)
237 panic("Couldn't request external interrupt 0x1406"); 253 panic("Couldn't request external interrupt 0x1406");
238 254
@@ -245,10 +261,112 @@ void __init time_init(void)
245} 261}
246 262
247/* 263/*
264 * The time is "clock". old is what we think the time is.
265 * Adjust the value by a multiple of jiffies and add the delta to ntp.
266 * "delay" is an approximation how long the synchronization took. If
267 * the time correction is positive, then "delay" is subtracted from
268 * the time difference and only the remaining part is passed to ntp.
269 */
270static unsigned long long adjust_time(unsigned long long old,
271 unsigned long long clock,
272 unsigned long long delay)
273{
274 unsigned long long delta, ticks;
275 struct timex adjust;
276
277 if (clock > old) {
278 /* It is later than we thought. */
279 delta = ticks = clock - old;
280 delta = ticks = (delta < delay) ? 0 : delta - delay;
281 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
282 adjust.offset = ticks * (1000000 / HZ);
283 } else {
284 /* It is earlier than we thought. */
285 delta = ticks = old - clock;
286 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
287 delta = -delta;
288 adjust.offset = -ticks * (1000000 / HZ);
289 }
290 jiffies_timer_cc += delta;
291 if (adjust.offset != 0) {
292 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
293 adjust.offset);
294 adjust.modes = ADJ_OFFSET_SINGLESHOT;
295 do_adjtimex(&adjust);
296 }
297 return delta;
298}
299
300static DEFINE_PER_CPU(atomic_t, clock_sync_word);
301static unsigned long clock_sync_flags;
302
303#define CLOCK_SYNC_HAS_ETR 0
304#define CLOCK_SYNC_HAS_STP 1
305#define CLOCK_SYNC_ETR 2
306#define CLOCK_SYNC_STP 3
307
308/*
309 * The synchronous get_clock function. It will write the current clock
310 * value to the clock pointer and return 0 if the clock is in sync with
311 * the external time source. If the clock mode is local it will return
312 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
313 * reference.
314 */
315int get_sync_clock(unsigned long long *clock)
316{
317 atomic_t *sw_ptr;
318 unsigned int sw0, sw1;
319
320 sw_ptr = &get_cpu_var(clock_sync_word);
321 sw0 = atomic_read(sw_ptr);
322 *clock = get_clock();
323 sw1 = atomic_read(sw_ptr);
324 put_cpu_var(clock_sync_sync);
325 if (sw0 == sw1 && (sw0 & 0x80000000U))
326 /* Success: time is in sync. */
327 return 0;
328 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
329 !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
330 return -ENOSYS;
331 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
332 !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
333 return -EACCES;
334 return -EAGAIN;
335}
336EXPORT_SYMBOL(get_sync_clock);
337
338/*
339 * Make get_sync_clock return -EAGAIN.
340 */
341static void disable_sync_clock(void *dummy)
342{
343 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
344 /*
345 * Clear the in-sync bit 2^31. All get_sync_clock calls will
346 * fail until the sync bit is turned back on. In addition
347 * increase the "sequence" counter to avoid the race of an
348 * etr event and the complete recovery against get_sync_clock.
349 */
350 atomic_clear_mask(0x80000000, sw_ptr);
351 atomic_inc(sw_ptr);
352}
353
354/*
355 * Make get_sync_clock return 0 again.
356 * Needs to be called from a context disabled for preemption.
357 */
358static void enable_sync_clock(void)
359{
360 atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
361 atomic_set_mask(0x80000000, sw_ptr);
362}
363
364/*
248 * External Time Reference (ETR) code. 365 * External Time Reference (ETR) code.
249 */ 366 */
250static int etr_port0_online; 367static int etr_port0_online;
251static int etr_port1_online; 368static int etr_port1_online;
369static int etr_steai_available;
252 370
253static int __init early_parse_etr(char *p) 371static int __init early_parse_etr(char *p)
254{ 372{
@@ -273,12 +391,6 @@ enum etr_event {
273 ETR_EVENT_UPDATE, 391 ETR_EVENT_UPDATE,
274}; 392};
275 393
276enum etr_flags {
277 ETR_FLAG_ENOSYS,
278 ETR_FLAG_EACCES,
279 ETR_FLAG_STEAI,
280};
281
282/* 394/*
283 * Valid bit combinations of the eacr register are (x = don't care): 395 * Valid bit combinations of the eacr register are (x = don't care):
284 * e0 e1 dp p0 p1 ea es sl 396 * e0 e1 dp p0 p1 ea es sl
@@ -305,74 +417,18 @@ enum etr_flags {
305 */ 417 */
306static struct etr_eacr etr_eacr; 418static struct etr_eacr etr_eacr;
307static u64 etr_tolec; /* time of last eacr update */ 419static u64 etr_tolec; /* time of last eacr update */
308static unsigned long etr_flags;
309static struct etr_aib etr_port0; 420static struct etr_aib etr_port0;
310static int etr_port0_uptodate; 421static int etr_port0_uptodate;
311static struct etr_aib etr_port1; 422static struct etr_aib etr_port1;
312static int etr_port1_uptodate; 423static int etr_port1_uptodate;
313static unsigned long etr_events; 424static unsigned long etr_events;
314static struct timer_list etr_timer; 425static struct timer_list etr_timer;
315static DEFINE_PER_CPU(atomic_t, etr_sync_word);
316 426
317static void etr_timeout(unsigned long dummy); 427static void etr_timeout(unsigned long dummy);
318static void etr_work_fn(struct work_struct *work); 428static void etr_work_fn(struct work_struct *work);
319static DECLARE_WORK(etr_work, etr_work_fn); 429static DECLARE_WORK(etr_work, etr_work_fn);
320 430
321/* 431/*
322 * The etr get_clock function. It will write the current clock value
323 * to the clock pointer and return 0 if the clock is in sync with the
324 * external time source. If the clock mode is local it will return
325 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
326 * reference. This function is what ETR is all about..
327 */
328int get_sync_clock(unsigned long long *clock)
329{
330 atomic_t *sw_ptr;
331 unsigned int sw0, sw1;
332
333 sw_ptr = &get_cpu_var(etr_sync_word);
334 sw0 = atomic_read(sw_ptr);
335 *clock = get_clock();
336 sw1 = atomic_read(sw_ptr);
337 put_cpu_var(etr_sync_sync);
338 if (sw0 == sw1 && (sw0 & 0x80000000U))
339 /* Success: time is in sync. */
340 return 0;
341 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
342 return -ENOSYS;
343 if (test_bit(ETR_FLAG_EACCES, &etr_flags))
344 return -EACCES;
345 return -EAGAIN;
346}
347EXPORT_SYMBOL(get_sync_clock);
348
349/*
350 * Make get_sync_clock return -EAGAIN.
351 */
352static void etr_disable_sync_clock(void *dummy)
353{
354 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
355 /*
356 * Clear the in-sync bit 2^31. All get_sync_clock calls will
357 * fail until the sync bit is turned back on. In addition
358 * increase the "sequence" counter to avoid the race of an
359 * etr event and the complete recovery against get_sync_clock.
360 */
361 atomic_clear_mask(0x80000000, sw_ptr);
362 atomic_inc(sw_ptr);
363}
364
365/*
366 * Make get_sync_clock return 0 again.
367 * Needs to be called from a context disabled for preemption.
368 */
369static void etr_enable_sync_clock(void)
370{
371 atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
372 atomic_set_mask(0x80000000, sw_ptr);
373}
374
375/*
376 * Reset ETR attachment. 432 * Reset ETR attachment.
377 */ 433 */
378static void etr_reset(void) 434static void etr_reset(void)
@@ -381,15 +437,13 @@ static void etr_reset(void)
381 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, 437 .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
382 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, 438 .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
383 .es = 0, .sl = 0 }; 439 .es = 0, .sl = 0 };
384 if (etr_setr(&etr_eacr) == 0) 440 if (etr_setr(&etr_eacr) == 0) {
385 etr_tolec = get_clock(); 441 etr_tolec = get_clock();
386 else { 442 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
387 set_bit(ETR_FLAG_ENOSYS, &etr_flags); 443 } else if (etr_port0_online || etr_port1_online) {
388 if (etr_port0_online || etr_port1_online) { 444 printk(KERN_WARNING "Running on non ETR capable "
389 printk(KERN_WARNING "Running on non ETR capable " 445 "machine, only local mode available.\n");
390 "machine, only local mode available.\n"); 446 etr_port0_online = etr_port1_online = 0;
391 etr_port0_online = etr_port1_online = 0;
392 }
393 } 447 }
394} 448}
395 449
@@ -397,14 +451,12 @@ static int __init etr_init(void)
397{ 451{
398 struct etr_aib aib; 452 struct etr_aib aib;
399 453
400 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 454 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
401 return 0; 455 return 0;
402 /* Check if this machine has the steai instruction. */ 456 /* Check if this machine has the steai instruction. */
403 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 457 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
404 set_bit(ETR_FLAG_STEAI, &etr_flags); 458 etr_steai_available = 1;
405 setup_timer(&etr_timer, etr_timeout, 0UL); 459 setup_timer(&etr_timer, etr_timeout, 0UL);
406 if (!etr_port0_online && !etr_port1_online)
407 set_bit(ETR_FLAG_EACCES, &etr_flags);
408 if (etr_port0_online) { 460 if (etr_port0_online) {
409 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 461 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
410 schedule_work(&etr_work); 462 schedule_work(&etr_work);
@@ -435,7 +487,8 @@ void etr_switch_to_local(void)
435{ 487{
436 if (!etr_eacr.sl) 488 if (!etr_eacr.sl)
437 return; 489 return;
438 etr_disable_sync_clock(NULL); 490 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
491 disable_sync_clock(NULL);
439 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 492 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
440 schedule_work(&etr_work); 493 schedule_work(&etr_work);
441} 494}
@@ -450,23 +503,21 @@ void etr_sync_check(void)
450{ 503{
451 if (!etr_eacr.es) 504 if (!etr_eacr.es)
452 return; 505 return;
453 etr_disable_sync_clock(NULL); 506 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
507 disable_sync_clock(NULL);
454 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 508 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
455 schedule_work(&etr_work); 509 schedule_work(&etr_work);
456} 510}
457 511
458/* 512/*
459 * ETR external interrupt. There are two causes: 513 * ETR timing alert. There are two causes:
460 * 1) port state change, check the usability of the port 514 * 1) port state change, check the usability of the port
461 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the 515 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
462 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) 516 * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
463 * or ETR-data word 4 (edf4) has changed. 517 * or ETR-data word 4 (edf4) has changed.
464 */ 518 */
465static void etr_ext_handler(__u16 code) 519static void etr_timing_alert(struct etr_irq_parm *intparm)
466{ 520{
467 struct etr_interruption_parameter *intparm =
468 (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
469
470 if (intparm->pc0) 521 if (intparm->pc0)
471 /* ETR port 0 state change. */ 522 /* ETR port 0 state change. */
472 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 523 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
@@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
591 return 1; 642 return 1;
592} 643}
593 644
594/* 645struct clock_sync_data {
595 * The time is "clock". old is what we think the time is.
596 * Adjust the value by a multiple of jiffies and add the delta to ntp.
597 * "delay" is an approximation how long the synchronization took. If
598 * the time correction is positive, then "delay" is subtracted from
599 * the time difference and only the remaining part is passed to ntp.
600 */
601static unsigned long long etr_adjust_time(unsigned long long old,
602 unsigned long long clock,
603 unsigned long long delay)
604{
605 unsigned long long delta, ticks;
606 struct timex adjust;
607
608 if (clock > old) {
609 /* It is later than we thought. */
610 delta = ticks = clock - old;
611 delta = ticks = (delta < delay) ? 0 : delta - delay;
612 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
613 adjust.offset = ticks * (1000000 / HZ);
614 } else {
615 /* It is earlier than we thought. */
616 delta = ticks = old - clock;
617 delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
618 delta = -delta;
619 adjust.offset = -ticks * (1000000 / HZ);
620 }
621 jiffies_timer_cc += delta;
622 if (adjust.offset != 0) {
623 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
624 adjust.offset);
625 adjust.modes = ADJ_OFFSET_SINGLESHOT;
626 do_adjtimex(&adjust);
627 }
628 return delta;
629}
630
631static struct {
632 int in_sync; 646 int in_sync;
633 unsigned long long fixup_cc; 647 unsigned long long fixup_cc;
634} etr_sync; 648};
635 649
636static void etr_sync_cpu_start(void *dummy) 650static void clock_sync_cpu_start(void *dummy)
637{ 651{
638 etr_enable_sync_clock(); 652 struct clock_sync_data *sync = dummy;
653
654 enable_sync_clock();
639 /* 655 /*
640 * This looks like a busy wait loop but it isn't. etr_sync_cpus 656 * This looks like a busy wait loop but it isn't. etr_sync_cpus
641 * is called on all other cpus while the TOD clocks is stopped. 657 * is called on all other cpus while the TOD clocks is stopped.
642 * __udelay will stop the cpu on an enabled wait psw until the 658 * __udelay will stop the cpu on an enabled wait psw until the
643 * TOD is running again. 659 * TOD is running again.
644 */ 660 */
645 while (etr_sync.in_sync == 0) { 661 while (sync->in_sync == 0) {
646 __udelay(1); 662 __udelay(1);
647 /* 663 /*
648 * A different cpu changes *in_sync. Therefore use 664 * A different cpu changes *in_sync. Therefore use
@@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy)
650 */ 666 */
651 barrier(); 667 barrier();
652 } 668 }
653 if (etr_sync.in_sync != 1) 669 if (sync->in_sync != 1)
654 /* Didn't work. Clear per-cpu in sync bit again. */ 670 /* Didn't work. Clear per-cpu in sync bit again. */
655 etr_disable_sync_clock(NULL); 671 disable_sync_clock(NULL);
656 /* 672 /*
657 * This round of TOD syncing is done. Set the clock comparator 673 * This round of TOD syncing is done. Set the clock comparator
658 * to the next tick and let the processor continue. 674 * to the next tick and let the processor continue.
659 */ 675 */
660 fixup_clock_comparator(etr_sync.fixup_cc); 676 fixup_clock_comparator(sync->fixup_cc);
661} 677}
662 678
663static void etr_sync_cpu_end(void *dummy) 679static void clock_sync_cpu_end(void *dummy)
664{ 680{
665} 681}
666 682
@@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy)
672static int etr_sync_clock(struct etr_aib *aib, int port) 688static int etr_sync_clock(struct etr_aib *aib, int port)
673{ 689{
674 struct etr_aib *sync_port; 690 struct etr_aib *sync_port;
691 struct clock_sync_data etr_sync;
675 unsigned long long clock, old_clock, delay, delta; 692 unsigned long long clock, old_clock, delay, delta;
676 int follows; 693 int follows;
677 int rc; 694 int rc;
@@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
690 */ 707 */
691 memset(&etr_sync, 0, sizeof(etr_sync)); 708 memset(&etr_sync, 0, sizeof(etr_sync));
692 preempt_disable(); 709 preempt_disable();
693 smp_call_function(etr_sync_cpu_start, NULL, 0, 0); 710 smp_call_function(clock_sync_cpu_start, &etr_sync, 0);
694 local_irq_disable(); 711 local_irq_disable();
695 etr_enable_sync_clock(); 712 enable_sync_clock();
696 713
697 /* Set clock to next OTE. */ 714 /* Set clock to next OTE. */
698 __ctl_set_bit(14, 21); 715 __ctl_set_bit(14, 21);
@@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
707 /* Adjust Linux timing variables. */ 724 /* Adjust Linux timing variables. */
708 delay = (unsigned long long) 725 delay = (unsigned long long)
709 (aib->edf2.etv - sync_port->edf2.etv) << 32; 726 (aib->edf2.etv - sync_port->edf2.etv) << 32;
710 delta = etr_adjust_time(old_clock, clock, delay); 727 delta = adjust_time(old_clock, clock, delay);
711 etr_sync.fixup_cc = delta; 728 etr_sync.fixup_cc = delta;
712 fixup_clock_comparator(delta); 729 fixup_clock_comparator(delta);
713 /* Verify that the clock is properly set. */ 730 /* Verify that the clock is properly set. */
714 if (!etr_aib_follows(sync_port, aib, port)) { 731 if (!etr_aib_follows(sync_port, aib, port)) {
715 /* Didn't work. */ 732 /* Didn't work. */
716 etr_disable_sync_clock(NULL); 733 disable_sync_clock(NULL);
717 etr_sync.in_sync = -EAGAIN; 734 etr_sync.in_sync = -EAGAIN;
718 rc = -EAGAIN; 735 rc = -EAGAIN;
719 } else { 736 } else {
@@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
724 /* Could not set the clock ?!? */ 741 /* Could not set the clock ?!? */
725 __ctl_clear_bit(0, 29); 742 __ctl_clear_bit(0, 29);
726 __ctl_clear_bit(14, 21); 743 __ctl_clear_bit(14, 21);
727 etr_disable_sync_clock(NULL); 744 disable_sync_clock(NULL);
728 etr_sync.in_sync = -EAGAIN; 745 etr_sync.in_sync = -EAGAIN;
729 rc = -EAGAIN; 746 rc = -EAGAIN;
730 } 747 }
731 local_irq_enable(); 748 local_irq_enable();
732 smp_call_function(etr_sync_cpu_end,NULL,0,0); 749 smp_call_function(clock_sync_cpu_end, NULL, 0);
733 preempt_enable(); 750 preempt_enable();
734 return rc; 751 return rc;
735} 752}
@@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
832 * Do not try to get the alternate port aib if the clock 849 * Do not try to get the alternate port aib if the clock
833 * is not in sync yet. 850 * is not in sync yet.
834 */ 851 */
835 if (!eacr.es) 852 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es)
836 return eacr; 853 return eacr;
837 854
838 /* 855 /*
@@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib,
840 * the other port immediately. If only stetr is available the 857 * the other port immediately. If only stetr is available the
841 * data-port bit toggle has to be used. 858 * data-port bit toggle has to be used.
842 */ 859 */
843 if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { 860 if (etr_steai_available) {
844 if (eacr.p0 && !etr_port0_uptodate) { 861 if (eacr.p0 && !etr_port0_uptodate) {
845 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); 862 etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
846 etr_port0_uptodate = 1; 863 etr_port0_uptodate = 1;
@@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work)
909 if (!eacr.ea) { 926 if (!eacr.ea) {
910 /* Both ports offline. Reset everything. */ 927 /* Both ports offline. Reset everything. */
911 eacr.dp = eacr.es = eacr.sl = 0; 928 eacr.dp = eacr.es = eacr.sl = 0;
912 on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); 929 on_each_cpu(disable_sync_clock, NULL, 1);
913 del_timer_sync(&etr_timer); 930 del_timer_sync(&etr_timer);
914 etr_update_eacr(eacr); 931 etr_update_eacr(eacr);
915 set_bit(ETR_FLAG_EACCES, &etr_flags); 932 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
916 return; 933 return;
917 } 934 }
918 935
@@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work)
953 eacr.e1 = 1; 970 eacr.e1 = 1;
954 sync_port = (etr_port0_uptodate && 971 sync_port = (etr_port0_uptodate &&
955 etr_port_valid(&etr_port0, 0)) ? 0 : -1; 972 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
956 clear_bit(ETR_FLAG_EACCES, &etr_flags);
957 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { 973 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
958 eacr.sl = 0; 974 eacr.sl = 0;
959 eacr.e0 = 0; 975 eacr.e0 = 0;
@@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work)
962 eacr.es = 0; 978 eacr.es = 0;
963 sync_port = (etr_port1_uptodate && 979 sync_port = (etr_port1_uptodate &&
964 etr_port_valid(&etr_port1, 1)) ? 1 : -1; 980 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
965 clear_bit(ETR_FLAG_EACCES, &etr_flags);
966 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { 981 } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
967 eacr.sl = 1; 982 eacr.sl = 1;
968 eacr.e0 = 1; 983 eacr.e0 = 1;
@@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work)
976 eacr.e1 = 1; 991 eacr.e1 = 1;
977 sync_port = (etr_port0_uptodate && 992 sync_port = (etr_port0_uptodate &&
978 etr_port_valid(&etr_port0, 0)) ? 0 : -1; 993 etr_port_valid(&etr_port0, 0)) ? 0 : -1;
979 clear_bit(ETR_FLAG_EACCES, &etr_flags);
980 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { 994 } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
981 eacr.sl = 1; 995 eacr.sl = 1;
982 eacr.e0 = 0; 996 eacr.e0 = 0;
@@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work)
985 eacr.es = 0; 999 eacr.es = 0;
986 sync_port = (etr_port1_uptodate && 1000 sync_port = (etr_port1_uptodate &&
987 etr_port_valid(&etr_port1, 1)) ? 1 : -1; 1001 etr_port_valid(&etr_port1, 1)) ? 1 : -1;
988 clear_bit(ETR_FLAG_EACCES, &etr_flags);
989 } else { 1002 } else {
990 /* Both ports not usable. */ 1003 /* Both ports not usable. */
991 eacr.es = eacr.sl = 0; 1004 eacr.es = eacr.sl = 0;
992 sync_port = -1; 1005 sync_port = -1;
993 set_bit(ETR_FLAG_EACCES, &etr_flags); 1006 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
994 } 1007 }
995 1008
1009 if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1010 eacr.es = 0;
1011
996 /* 1012 /*
997 * If the clock is in sync just update the eacr and return. 1013 * If the clock is in sync just update the eacr and return.
998 * If there is no valid sync port wait for a port update. 1014 * If there is no valid sync port wait for a port update.
999 */ 1015 */
1000 if (eacr.es || sync_port < 0) { 1016 if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) ||
1017 eacr.es || sync_port < 0) {
1001 etr_update_eacr(eacr); 1018 etr_update_eacr(eacr);
1002 etr_set_tolec_timeout(now); 1019 etr_set_tolec_timeout(now);
1003 return; 1020 return;
@@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work)
1018 * and set up a timer to try again after 0.5 seconds 1035 * and set up a timer to try again after 0.5 seconds
1019 */ 1036 */
1020 etr_update_eacr(eacr); 1037 etr_update_eacr(eacr);
1038 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1021 if (now < etr_tolec + (1600000 << 12) || 1039 if (now < etr_tolec + (1600000 << 12) ||
1022 etr_sync_clock(&aib, sync_port) != 0) { 1040 etr_sync_clock(&aib, sync_port) != 0) {
1023 /* Sync failed. Try again in 1/2 second. */ 1041 /* Sync failed. Try again in 1/2 second. */
1024 eacr.es = 0; 1042 eacr.es = 0;
1025 etr_update_eacr(eacr); 1043 etr_update_eacr(eacr);
1044 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1026 etr_set_sync_timeout(); 1045 etr_set_sync_timeout();
1027 } else 1046 } else
1028 etr_set_tolec_timeout(now); 1047 etr_set_tolec_timeout(now);
@@ -1081,7 +1100,9 @@ static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1081 return etr_port1_online ? &etr_port1 : NULL; 1100 return etr_port1_online ? &etr_port1 : NULL;
1082} 1101}
1083 1102
1084static ssize_t etr_online_show(struct sys_device *dev, char *buf) 1103static ssize_t etr_online_show(struct sys_device *dev,
1104 struct sysdev_attribute *attr,
1105 char *buf)
1085{ 1106{
1086 unsigned int online; 1107 unsigned int online;
1087 1108
@@ -1090,15 +1111,16 @@ static ssize_t etr_online_show(struct sys_device *dev, char *buf)
1090} 1111}
1091 1112
1092static ssize_t etr_online_store(struct sys_device *dev, 1113static ssize_t etr_online_store(struct sys_device *dev,
1093 const char *buf, size_t count) 1114 struct sysdev_attribute *attr,
1115 const char *buf, size_t count)
1094{ 1116{
1095 unsigned int value; 1117 unsigned int value;
1096 1118
1097 value = simple_strtoul(buf, NULL, 0); 1119 value = simple_strtoul(buf, NULL, 0);
1098 if (value != 0 && value != 1) 1120 if (value != 0 && value != 1)
1099 return -EINVAL; 1121 return -EINVAL;
1100 if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) 1122 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
1101 return -ENOSYS; 1123 return -EOPNOTSUPP;
1102 if (dev == &etr_port0_dev) { 1124 if (dev == &etr_port0_dev) {
1103 if (etr_port0_online == value) 1125 if (etr_port0_online == value)
1104 return count; /* Nothing to do. */ 1126 return count; /* Nothing to do. */
@@ -1117,7 +1139,9 @@ static ssize_t etr_online_store(struct sys_device *dev,
1117 1139
1118static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store); 1140static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1119 1141
1120static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf) 1142static ssize_t etr_stepping_control_show(struct sys_device *dev,
1143 struct sysdev_attribute *attr,
1144 char *buf)
1121{ 1145{
1122 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ? 1146 return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1123 etr_eacr.e0 : etr_eacr.e1); 1147 etr_eacr.e0 : etr_eacr.e1);
@@ -1125,7 +1149,8 @@ static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
1125 1149
1126static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL); 1150static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1127 1151
1128static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf) 1152static ssize_t etr_mode_code_show(struct sys_device *dev,
1153 struct sysdev_attribute *attr, char *buf)
1129{ 1154{
1130 if (!etr_port0_online && !etr_port1_online) 1155 if (!etr_port0_online && !etr_port1_online)
1131 /* Status word is not uptodate if both ports are offline. */ 1156 /* Status word is not uptodate if both ports are offline. */
@@ -1136,7 +1161,8 @@ static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
1136 1161
1137static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL); 1162static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1138 1163
1139static ssize_t etr_untuned_show(struct sys_device *dev, char *buf) 1164static ssize_t etr_untuned_show(struct sys_device *dev,
1165 struct sysdev_attribute *attr, char *buf)
1140{ 1166{
1141 struct etr_aib *aib = etr_aib_from_dev(dev); 1167 struct etr_aib *aib = etr_aib_from_dev(dev);
1142 1168
@@ -1147,7 +1173,8 @@ static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
1147 1173
1148static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL); 1174static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1149 1175
1150static ssize_t etr_network_id_show(struct sys_device *dev, char *buf) 1176static ssize_t etr_network_id_show(struct sys_device *dev,
1177 struct sysdev_attribute *attr, char *buf)
1151{ 1178{
1152 struct etr_aib *aib = etr_aib_from_dev(dev); 1179 struct etr_aib *aib = etr_aib_from_dev(dev);
1153 1180
@@ -1158,7 +1185,8 @@ static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
1158 1185
1159static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL); 1186static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1160 1187
1161static ssize_t etr_id_show(struct sys_device *dev, char *buf) 1188static ssize_t etr_id_show(struct sys_device *dev,
1189 struct sysdev_attribute *attr, char *buf)
1162{ 1190{
1163 struct etr_aib *aib = etr_aib_from_dev(dev); 1191 struct etr_aib *aib = etr_aib_from_dev(dev);
1164 1192
@@ -1169,7 +1197,8 @@ static ssize_t etr_id_show(struct sys_device *dev, char *buf)
1169 1197
1170static SYSDEV_ATTR(id, 0400, etr_id_show, NULL); 1198static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1171 1199
1172static ssize_t etr_port_number_show(struct sys_device *dev, char *buf) 1200static ssize_t etr_port_number_show(struct sys_device *dev,
1201 struct sysdev_attribute *attr, char *buf)
1173{ 1202{
1174 struct etr_aib *aib = etr_aib_from_dev(dev); 1203 struct etr_aib *aib = etr_aib_from_dev(dev);
1175 1204
@@ -1180,7 +1209,8 @@ static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
1180 1209
1181static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL); 1210static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1182 1211
1183static ssize_t etr_coupled_show(struct sys_device *dev, char *buf) 1212static ssize_t etr_coupled_show(struct sys_device *dev,
1213 struct sysdev_attribute *attr, char *buf)
1184{ 1214{
1185 struct etr_aib *aib = etr_aib_from_dev(dev); 1215 struct etr_aib *aib = etr_aib_from_dev(dev);
1186 1216
@@ -1191,7 +1221,8 @@ static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
1191 1221
1192static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL); 1222static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1193 1223
1194static ssize_t etr_local_time_show(struct sys_device *dev, char *buf) 1224static ssize_t etr_local_time_show(struct sys_device *dev,
1225 struct sysdev_attribute *attr, char *buf)
1195{ 1226{
1196 struct etr_aib *aib = etr_aib_from_dev(dev); 1227 struct etr_aib *aib = etr_aib_from_dev(dev);
1197 1228
@@ -1202,7 +1233,8 @@ static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
1202 1233
1203static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL); 1234static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1204 1235
1205static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf) 1236static ssize_t etr_utc_offset_show(struct sys_device *dev,
1237 struct sysdev_attribute *attr, char *buf)
1206{ 1238{
1207 struct etr_aib *aib = etr_aib_from_dev(dev); 1239 struct etr_aib *aib = etr_aib_from_dev(dev);
1208 1240
@@ -1292,3 +1324,318 @@ out:
1292} 1324}
1293 1325
1294device_initcall(etr_init_sysfs); 1326device_initcall(etr_init_sysfs);
1327
1328/*
1329 * Server Time Protocol (STP) code.
1330 */
1331static int stp_online;
1332static struct stp_sstpi stp_info;
1333static void *stp_page;
1334
1335static void stp_work_fn(struct work_struct *work);
1336static DECLARE_WORK(stp_work, stp_work_fn);
1337
1338static int __init early_parse_stp(char *p)
1339{
1340 if (strncmp(p, "off", 3) == 0)
1341 stp_online = 0;
1342 else if (strncmp(p, "on", 2) == 0)
1343 stp_online = 1;
1344 return 0;
1345}
1346early_param("stp", early_parse_stp);
1347
1348/*
1349 * Reset STP attachment.
1350 */
1351static void stp_reset(void)
1352{
1353 int rc;
1354
1355 stp_page = alloc_bootmem_pages(PAGE_SIZE);
1356 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1357 if (rc == 1)
1358 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
1359 else if (stp_online) {
1360 printk(KERN_WARNING "Running on non STP capable machine.\n");
1361 free_bootmem((unsigned long) stp_page, PAGE_SIZE);
1362 stp_page = NULL;
1363 stp_online = 0;
1364 }
1365}
1366
1367static int __init stp_init(void)
1368{
1369 if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online)
1370 schedule_work(&stp_work);
1371 return 0;
1372}
1373
1374arch_initcall(stp_init);
1375
1376/*
1377 * STP timing alert. There are three causes:
1378 * 1) timing status change
1379 * 2) link availability change
1380 * 3) time control parameter change
1381 * In all three cases we are only interested in the clock source state.
1382 * If a STP clock source is now available use it.
1383 */
1384static void stp_timing_alert(struct stp_irq_parm *intparm)
1385{
1386 if (intparm->tsc || intparm->lac || intparm->tcpc)
1387 schedule_work(&stp_work);
1388}
1389
1390/*
1391 * STP sync check machine check. This is called when the timing state
1392 * changes from the synchronized state to the unsynchronized state.
1393 * After a STP sync check the clock is not in sync. The machine check
1394 * is broadcasted to all cpus at the same time.
1395 */
1396void stp_sync_check(void)
1397{
1398 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1399 return;
1400 disable_sync_clock(NULL);
1401 schedule_work(&stp_work);
1402}
1403
1404/*
1405 * STP island condition machine check. This is called when an attached
1406 * server attempts to communicate over an STP link and the servers
1407 * have matching CTN ids and have a valid stratum-1 configuration
1408 * but the configurations do not match.
1409 */
1410void stp_island_check(void)
1411{
1412 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1413 return;
1414 disable_sync_clock(NULL);
1415 schedule_work(&stp_work);
1416}
1417
1418/*
1419 * STP tasklet. Check for the STP state and take over the clock
1420 * synchronization if the STP clock source is usable.
1421 */
1422static void stp_work_fn(struct work_struct *work)
1423{
1424 struct clock_sync_data stp_sync;
1425 unsigned long long old_clock, delta;
1426 int rc;
1427
1428 if (!stp_online) {
1429 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1430 return;
1431 }
1432
1433 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1434 if (rc)
1435 return;
1436
1437 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1438 if (rc || stp_info.c == 0)
1439 return;
1440
1441 /*
1442 * Catch all other cpus and make them wait until we have
1443 * successfully synced the clock. smp_call_function will
1444 * return after all other cpus are in clock_sync_cpu_start.
1445 */
1446 memset(&stp_sync, 0, sizeof(stp_sync));
1447 preempt_disable();
1448 smp_call_function(clock_sync_cpu_start, &stp_sync, 0);
1449 local_irq_disable();
1450 enable_sync_clock();
1451
1452 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1453 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1454 schedule_work(&etr_work);
1455
1456 rc = 0;
1457 if (stp_info.todoff[0] || stp_info.todoff[1] ||
1458 stp_info.todoff[2] || stp_info.todoff[3] ||
1459 stp_info.tmd != 2) {
1460 old_clock = get_clock();
1461 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
1462 if (rc == 0) {
1463 delta = adjust_time(old_clock, get_clock(), 0);
1464 fixup_clock_comparator(delta);
1465 rc = chsc_sstpi(stp_page, &stp_info,
1466 sizeof(struct stp_sstpi));
1467 if (rc == 0 && stp_info.tmd != 2)
1468 rc = -EAGAIN;
1469 }
1470 }
1471 if (rc) {
1472 disable_sync_clock(NULL);
1473 stp_sync.in_sync = -EAGAIN;
1474 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1475 if (etr_port0_online || etr_port1_online)
1476 schedule_work(&etr_work);
1477 } else
1478 stp_sync.in_sync = 1;
1479
1480 local_irq_enable();
1481 smp_call_function(clock_sync_cpu_end, NULL, 0);
1482 preempt_enable();
1483}
1484
1485/*
1486 * STP class sysfs interface functions
1487 */
1488static struct sysdev_class stp_sysclass = {
1489 .name = "stp",
1490};
1491
1492static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf)
1493{
1494 if (!stp_online)
1495 return -ENODATA;
1496 return sprintf(buf, "%016llx\n",
1497 *(unsigned long long *) stp_info.ctnid);
1498}
1499
1500static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1501
1502static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf)
1503{
1504 if (!stp_online)
1505 return -ENODATA;
1506 return sprintf(buf, "%i\n", stp_info.ctn);
1507}
1508
1509static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1510
1511static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf)
1512{
1513 if (!stp_online || !(stp_info.vbits & 0x2000))
1514 return -ENODATA;
1515 return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
1516}
1517
1518static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1519
1520static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf)
1521{
1522 if (!stp_online || !(stp_info.vbits & 0x8000))
1523 return -ENODATA;
1524 return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
1525}
1526
1527static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1528
1529static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf)
1530{
1531 if (!stp_online)
1532 return -ENODATA;
1533 return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
1534}
1535
1536static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1537
1538static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf)
1539{
1540 if (!stp_online || !(stp_info.vbits & 0x0800))
1541 return -ENODATA;
1542 return sprintf(buf, "%i\n", (int) stp_info.tto);
1543}
1544
1545static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1546
1547static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf)
1548{
1549 if (!stp_online || !(stp_info.vbits & 0x4000))
1550 return -ENODATA;
1551 return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
1552}
1553
1554static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1555 stp_time_zone_offset_show, NULL);
1556
1557static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf)
1558{
1559 if (!stp_online)
1560 return -ENODATA;
1561 return sprintf(buf, "%i\n", stp_info.tmd);
1562}
1563
1564static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1565
1566static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf)
1567{
1568 if (!stp_online)
1569 return -ENODATA;
1570 return sprintf(buf, "%i\n", stp_info.tst);
1571}
1572
1573static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1574
1575static ssize_t stp_online_show(struct sysdev_class *class, char *buf)
1576{
1577 return sprintf(buf, "%i\n", stp_online);
1578}
1579
1580static ssize_t stp_online_store(struct sysdev_class *class,
1581 const char *buf, size_t count)
1582{
1583 unsigned int value;
1584
1585 value = simple_strtoul(buf, NULL, 0);
1586 if (value != 0 && value != 1)
1587 return -EINVAL;
1588 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1589 return -EOPNOTSUPP;
1590 stp_online = value;
1591 schedule_work(&stp_work);
1592 return count;
1593}
1594
1595/*
1596 * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
1597 * stp/online but attr_online already exists in this file ..
1598 */
1599static struct sysdev_class_attribute attr_stp_online = {
1600 .attr = { .name = "online", .mode = 0600 },
1601 .show = stp_online_show,
1602 .store = stp_online_store,
1603};
1604
1605static struct sysdev_class_attribute *stp_attributes[] = {
1606 &attr_ctn_id,
1607 &attr_ctn_type,
1608 &attr_dst_offset,
1609 &attr_leap_seconds,
1610 &attr_stp_online,
1611 &attr_stratum,
1612 &attr_time_offset,
1613 &attr_time_zone_offset,
1614 &attr_timing_mode,
1615 &attr_timing_state,
1616 NULL
1617};
1618
1619static int __init stp_init_sysfs(void)
1620{
1621 struct sysdev_class_attribute **attr;
1622 int rc;
1623
1624 rc = sysdev_class_register(&stp_sysclass);
1625 if (rc)
1626 goto out;
1627 for (attr = stp_attributes; *attr; attr++) {
1628 rc = sysdev_class_create_file(&stp_sysclass, *attr);
1629 if (rc)
1630 goto out_unreg;
1631 }
1632 return 0;
1633out_unreg:
1634 for (; attr >= stp_attributes; attr--)
1635 sysdev_class_remove_file(&stp_sysclass, *attr);
1636 sysdev_class_unregister(&stp_sysclass);
1637out:
1638 return rc;
1639}
1640
1641device_initcall(stp_init_sysfs);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 661a07217057..212d618b0095 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -313,8 +313,6 @@ void __init s390_init_cpu_topology(void)
313 machine_has_topology_irq = 1; 313 machine_has_topology_irq = 1;
314 314
315 tl_info = alloc_bootmem_pages(PAGE_SIZE); 315 tl_info = alloc_bootmem_pages(PAGE_SIZE);
316 if (!tl_info)
317 goto error;
318 info = tl_info; 316 info = tl_info;
319 stsi(info, 15, 1, 2); 317 stsi(info, 15, 1, 2);
320 318
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b4607155e8d0..76c1e60c92f3 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -40,7 +40,6 @@ SECTIONS
40 _etext = .; /* End of text section */ 40 _etext = .; /* End of text section */
41 41
42 NOTES :text :note 42 NOTES :text :note
43 BUG_TABLE :text
44 43
45 RODATA 44 RODATA
46 45
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index ca90ee3f930e..0fa5dc5d68e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -136,7 +136,7 @@ static inline void set_vtimer(__u64 expires)
136} 136}
137#endif 137#endif
138 138
139static void start_cpu_timer(void) 139void vtime_start_cpu_timer(void)
140{ 140{
141 struct vtimer_queue *vt_list; 141 struct vtimer_queue *vt_list;
142 142
@@ -150,7 +150,7 @@ static void start_cpu_timer(void)
150 set_vtimer(vt_list->idle); 150 set_vtimer(vt_list->idle);
151} 151}
152 152
153static void stop_cpu_timer(void) 153void vtime_stop_cpu_timer(void)
154{ 154{
155 struct vtimer_queue *vt_list; 155 struct vtimer_queue *vt_list;
156 156
@@ -318,8 +318,7 @@ static void internal_add_vtimer(struct vtimer_list *timer)
318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu); 318 vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
319 spin_lock_irqsave(&vt_list->lock, flags); 319 spin_lock_irqsave(&vt_list->lock, flags);
320 320
321 if (timer->cpu != smp_processor_id()) 321 BUG_ON(timer->cpu != smp_processor_id());
322 printk("internal_add_vtimer: BUG, running on wrong CPU");
323 322
324 /* if list is empty we only have to set the timer */ 323 /* if list is empty we only have to set the timer */
325 if (list_empty(&vt_list->list)) { 324 if (list_empty(&vt_list->list)) {
@@ -353,25 +352,12 @@ static void internal_add_vtimer(struct vtimer_list *timer)
353 put_cpu(); 352 put_cpu();
354} 353}
355 354
356static inline int prepare_vtimer(struct vtimer_list *timer) 355static inline void prepare_vtimer(struct vtimer_list *timer)
357{ 356{
358 if (!timer->function) { 357 BUG_ON(!timer->function);
359 printk("add_virt_timer: uninitialized timer\n"); 358 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
360 return -EINVAL; 359 BUG_ON(vtimer_pending(timer));
361 }
362
363 if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
364 printk("add_virt_timer: invalid timer expire value!\n");
365 return -EINVAL;
366 }
367
368 if (vtimer_pending(timer)) {
369 printk("add_virt_timer: timer pending\n");
370 return -EBUSY;
371 }
372
373 timer->cpu = get_cpu(); 360 timer->cpu = get_cpu();
374 return 0;
375} 361}
376 362
377/* 363/*
@@ -382,10 +368,7 @@ void add_virt_timer(void *new)
382 struct vtimer_list *timer; 368 struct vtimer_list *timer;
383 369
384 timer = (struct vtimer_list *)new; 370 timer = (struct vtimer_list *)new;
385 371 prepare_vtimer(timer);
386 if (prepare_vtimer(timer) < 0)
387 return;
388
389 timer->interval = 0; 372 timer->interval = 0;
390 internal_add_vtimer(timer); 373 internal_add_vtimer(timer);
391} 374}
@@ -399,10 +382,7 @@ void add_virt_timer_periodic(void *new)
399 struct vtimer_list *timer; 382 struct vtimer_list *timer;
400 383
401 timer = (struct vtimer_list *)new; 384 timer = (struct vtimer_list *)new;
402 385 prepare_vtimer(timer);
403 if (prepare_vtimer(timer) < 0)
404 return;
405
406 timer->interval = timer->expires; 386 timer->interval = timer->expires;
407 internal_add_vtimer(timer); 387 internal_add_vtimer(timer);
408} 388}
@@ -423,15 +403,8 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
423 unsigned long flags; 403 unsigned long flags;
424 int cpu; 404 int cpu;
425 405
426 if (!timer->function) { 406 BUG_ON(!timer->function);
427 printk("mod_virt_timer: uninitialized timer\n"); 407 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
428 return -EINVAL;
429 }
430
431 if (!expires || expires > VTIMER_MAX_SLICE) {
432 printk("mod_virt_timer: invalid expire range\n");
433 return -EINVAL;
434 }
435 408
436 /* 409 /*
437 * This is a common optimization triggered by the 410 * This is a common optimization triggered by the
@@ -444,6 +417,9 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
444 cpu = get_cpu(); 417 cpu = get_cpu();
445 vt_list = &per_cpu(virt_cpu_timer, cpu); 418 vt_list = &per_cpu(virt_cpu_timer, cpu);
446 419
420 /* check if we run on the right CPU */
421 BUG_ON(timer->cpu != cpu);
422
447 /* disable interrupts before test if timer is pending */ 423 /* disable interrupts before test if timer is pending */
448 spin_lock_irqsave(&vt_list->lock, flags); 424 spin_lock_irqsave(&vt_list->lock, flags);
449 425
@@ -458,14 +434,6 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
458 return 0; 434 return 0;
459 } 435 }
460 436
461 /* check if we run on the right CPU */
462 if (timer->cpu != cpu) {
463 printk("mod_virt_timer: running on wrong CPU, check your code\n");
464 spin_unlock_irqrestore(&vt_list->lock, flags);
465 put_cpu();
466 return -EINVAL;
467 }
468
469 list_del_init(&timer->entry); 437 list_del_init(&timer->entry);
470 timer->expires = expires; 438 timer->expires = expires;
471 439
@@ -536,24 +504,6 @@ void init_cpu_vtimer(void)
536 504
537} 505}
538 506
539static int vtimer_idle_notify(struct notifier_block *self,
540 unsigned long action, void *hcpu)
541{
542 switch (action) {
543 case S390_CPU_IDLE:
544 stop_cpu_timer();
545 break;
546 case S390_CPU_NOT_IDLE:
547 start_cpu_timer();
548 break;
549 }
550 return NOTIFY_OK;
551}
552
553static struct notifier_block vtimer_idle_nb = {
554 .notifier_call = vtimer_idle_notify,
555};
556
557void __init vtime_init(void) 507void __init vtime_init(void)
558{ 508{
559 /* request the cpu timer external interrupt */ 509 /* request the cpu timer external interrupt */
@@ -561,9 +511,6 @@ void __init vtime_init(void)
561 &ext_int_info_timer) != 0) 511 &ext_int_info_timer) != 0)
562 panic("Couldn't request external interrupt 0x1005"); 512 panic("Couldn't request external interrupt 0x1005");
563 513
564 if (register_idle_notifier(&vtimer_idle_nb))
565 panic("Couldn't register idle notifier");
566
567 /* Enable cpu timer interrupts on the boot cpu. */ 514 /* Enable cpu timer interrupts on the boot cpu. */
568 init_cpu_vtimer(); 515 init_cpu_vtimer();
569} 516}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 84a7fed4cd4e..11230b0db957 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -31,7 +31,7 @@ static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
31} 31}
32 32
33static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, 33static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
34 struct interrupt_info *inti) 34 struct kvm_s390_interrupt_info *inti)
35{ 35{
36 switch (inti->type) { 36 switch (inti->type) {
37 case KVM_S390_INT_EMERGENCY: 37 case KVM_S390_INT_EMERGENCY:
@@ -91,7 +91,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
91} 91}
92 92
93static void __set_intercept_indicator(struct kvm_vcpu *vcpu, 93static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
94 struct interrupt_info *inti) 94 struct kvm_s390_interrupt_info *inti)
95{ 95{
96 switch (inti->type) { 96 switch (inti->type) {
97 case KVM_S390_INT_EMERGENCY: 97 case KVM_S390_INT_EMERGENCY:
@@ -111,7 +111,7 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
111} 111}
112 112
113static void __do_deliver_interrupt(struct kvm_vcpu *vcpu, 113static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
114 struct interrupt_info *inti) 114 struct kvm_s390_interrupt_info *inti)
115{ 115{
116 const unsigned short table[] = { 2, 4, 4, 6 }; 116 const unsigned short table[] = { 2, 4, 4, 6 };
117 int rc, exception = 0; 117 int rc, exception = 0;
@@ -290,9 +290,9 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
290 290
291int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) 291int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
292{ 292{
293 struct local_interrupt *li = &vcpu->arch.local_int; 293 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
294 struct float_interrupt *fi = vcpu->arch.local_int.float_int; 294 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
295 struct interrupt_info *inti; 295 struct kvm_s390_interrupt_info *inti;
296 int rc = 0; 296 int rc = 0;
297 297
298 if (atomic_read(&li->active)) { 298 if (atomic_read(&li->active)) {
@@ -408,9 +408,9 @@ void kvm_s390_idle_wakeup(unsigned long data)
408 408
409void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) 409void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
410{ 410{
411 struct local_interrupt *li = &vcpu->arch.local_int; 411 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
412 struct float_interrupt *fi = vcpu->arch.local_int.float_int; 412 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
413 struct interrupt_info *n, *inti = NULL; 413 struct kvm_s390_interrupt_info *n, *inti = NULL;
414 int deliver; 414 int deliver;
415 415
416 __reset_intercept_indicators(vcpu); 416 __reset_intercept_indicators(vcpu);
@@ -465,8 +465,8 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
465 465
466int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) 466int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
467{ 467{
468 struct local_interrupt *li = &vcpu->arch.local_int; 468 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
469 struct interrupt_info *inti; 469 struct kvm_s390_interrupt_info *inti;
470 470
471 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 471 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
472 if (!inti) 472 if (!inti)
@@ -487,9 +487,9 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
487int kvm_s390_inject_vm(struct kvm *kvm, 487int kvm_s390_inject_vm(struct kvm *kvm,
488 struct kvm_s390_interrupt *s390int) 488 struct kvm_s390_interrupt *s390int)
489{ 489{
490 struct local_interrupt *li; 490 struct kvm_s390_local_interrupt *li;
491 struct float_interrupt *fi; 491 struct kvm_s390_float_interrupt *fi;
492 struct interrupt_info *inti; 492 struct kvm_s390_interrupt_info *inti;
493 int sigcpu; 493 int sigcpu;
494 494
495 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 495 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
@@ -544,8 +544,8 @@ int kvm_s390_inject_vm(struct kvm *kvm,
544int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, 544int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
545 struct kvm_s390_interrupt *s390int) 545 struct kvm_s390_interrupt *s390int)
546{ 546{
547 struct local_interrupt *li; 547 struct kvm_s390_local_interrupt *li;
548 struct interrupt_info *inti; 548 struct kvm_s390_interrupt_info *inti;
549 549
550 inti = kzalloc(sizeof(*inti), GFP_KERNEL); 550 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
551 if (!inti) 551 if (!inti)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 6558b09ff579..1782cbcd2829 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -79,10 +79,6 @@ void kvm_arch_hardware_disable(void *garbage)
79{ 79{
80} 80}
81 81
82void decache_vcpus_on_cpu(int cpu)
83{
84}
85
86int kvm_arch_hardware_setup(void) 82int kvm_arch_hardware_setup(void)
87{ 83{
88 return 0; 84 return 0;
@@ -198,6 +194,7 @@ out_nokvm:
198void kvm_arch_destroy_vm(struct kvm *kvm) 194void kvm_arch_destroy_vm(struct kvm *kvm)
199{ 195{
200 debug_unregister(kvm->arch.dbf); 196 debug_unregister(kvm->arch.dbf);
197 kvm_free_physmem(kvm);
201 free_page((unsigned long)(kvm->arch.sca)); 198 free_page((unsigned long)(kvm->arch.sca));
202 kfree(kvm); 199 kfree(kvm);
203 module_put(THIS_MODULE); 200 module_put(THIS_MODULE);
@@ -250,11 +247,16 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
250 vcpu->arch.sie_block->gbea = 1; 247 vcpu->arch.sie_block->gbea = 1;
251} 248}
252 249
250/* The current code can have up to 256 pages for virtio */
251#define VIRTIODESCSPACE (256ul * 4096ul)
252
253int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 253int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
254{ 254{
255 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 255 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
256 vcpu->arch.sie_block->gmslm = 0xffffffffffUL; 256 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
257 vcpu->arch.sie_block->gmsor = 0x000000000000; 257 vcpu->kvm->arch.guest_origin +
258 VIRTIODESCSPACE - 1ul;
259 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
258 vcpu->arch.sie_block->ecb = 2; 260 vcpu->arch.sie_block->ecb = 2;
259 vcpu->arch.sie_block->eca = 0xC1002001U; 261 vcpu->arch.sie_block->eca = 0xC1002001U;
260 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup, 262 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
@@ -273,7 +275,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
273 if (!vcpu) 275 if (!vcpu)
274 goto out_nomem; 276 goto out_nomem;
275 277
276 vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL); 278 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
279 get_zeroed_page(GFP_KERNEL);
277 280
278 if (!vcpu->arch.sie_block) 281 if (!vcpu->arch.sie_block)
279 goto out_free_cpu; 282 goto out_free_cpu;
@@ -672,6 +675,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
672 return 0; 675 return 0;
673} 676}
674 677
678void kvm_arch_flush_shadow(struct kvm *kvm)
679{
680}
681
675gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 682gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
676{ 683{
677 return gfn; 684 return gfn;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index c02286c6a931..2e2d2ffb6a07 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -199,7 +199,7 @@ out:
199 199
200static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) 200static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
201{ 201{
202 struct float_interrupt *fi = &vcpu->kvm->arch.float_int; 202 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
203 int cpus = 0; 203 int cpus = 0;
204 int n; 204 int n;
205 205
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 0a236acfb5f6..5a556114eaa5 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -45,7 +45,7 @@
45 45
46static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg) 46static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
47{ 47{
48 struct float_interrupt *fi = &vcpu->kvm->arch.float_int; 48 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
49 int rc; 49 int rc;
50 50
51 if (cpu_addr >= KVM_MAX_VCPUS) 51 if (cpu_addr >= KVM_MAX_VCPUS)
@@ -71,9 +71,9 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
71 71
72static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) 72static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
73{ 73{
74 struct float_interrupt *fi = &vcpu->kvm->arch.float_int; 74 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
75 struct local_interrupt *li; 75 struct kvm_s390_local_interrupt *li;
76 struct interrupt_info *inti; 76 struct kvm_s390_interrupt_info *inti;
77 int rc; 77 int rc;
78 78
79 if (cpu_addr >= KVM_MAX_VCPUS) 79 if (cpu_addr >= KVM_MAX_VCPUS)
@@ -108,9 +108,9 @@ unlock:
108 108
109static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) 109static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
110{ 110{
111 struct float_interrupt *fi = &vcpu->kvm->arch.float_int; 111 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
112 struct local_interrupt *li; 112 struct kvm_s390_local_interrupt *li;
113 struct interrupt_info *inti; 113 struct kvm_s390_interrupt_info *inti;
114 int rc; 114 int rc;
115 115
116 if (cpu_addr >= KVM_MAX_VCPUS) 116 if (cpu_addr >= KVM_MAX_VCPUS)
@@ -169,9 +169,9 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
169static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, 169static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
170 u64 *reg) 170 u64 *reg)
171{ 171{
172 struct float_interrupt *fi = &vcpu->kvm->arch.float_int; 172 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
173 struct local_interrupt *li; 173 struct kvm_s390_local_interrupt *li;
174 struct interrupt_info *inti; 174 struct kvm_s390_interrupt_info *inti;
175 int rc; 175 int rc;
176 u8 tmp; 176 u8 tmp;
177 177
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 05598649b326..388cc7420055 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -202,3 +202,22 @@ void free_initrd_mem(unsigned long start, unsigned long end)
202 } 202 }
203} 203}
204#endif 204#endif
205
206#ifdef CONFIG_MEMORY_HOTPLUG
207int arch_add_memory(int nid, u64 start, u64 size)
208{
209 struct pglist_data *pgdat;
210 struct zone *zone;
211 int rc;
212
213 pgdat = NODE_DATA(nid);
214 zone = pgdat->node_zones + ZONE_NORMAL;
215 rc = vmem_add_mapping(start, size);
216 if (rc)
217 return rc;
218 rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size));
219 if (rc)
220 vmem_remove_mapping(start, size);
221 return rc;
222}
223#endif /* CONFIG_MEMORY_HOTPLUG */