aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-19 14:35:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-19 14:35:30 -0400
commitba0234ec35127fe21d373db53cbaf9fe20620cb6 (patch)
treea2cbef204482512ae9e723f2bf4d22051975ef45 /arch/s390
parent537b60d17894b7c19a6060feae40299d7109d6e7 (diff)
parent939e379e9e183ae6291ac7caa4a5e1dfadae4ccc (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (24 commits) [S390] drivers/s390/char: Use kmemdup [S390] drivers/s390/char: Use kstrdup [S390] debug: enable exception-trace debug facility [S390] s390_hypfs: Add new attributes [S390] qdio: remove API wrappers [S390] qdio: set correct bit in dsci [S390] qdio: dont convert timestamps to microseconds [S390] qdio: remove memset hack [S390] qdio: prevent starvation on PCI devices [S390] qdio: count number of qdio interrupts [S390] user space fault: report fault before calling do_exit [S390] topology: expose core identifier [S390] dasd: remove uid from devmap [S390] dasd: add dynamic pav toleration [S390] vdso: add missing vdso_install target [S390] vdso: remove redundant check for CONFIG_64BIT [S390] avoid default_llseek in s390 drivers [S390] vmcp: disallow modular build [S390] add breaking event address for user space [S390] virtualization aware cpu measurement ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig7
-rw-r--r--arch/s390/Makefile6
-rw-r--r--arch/s390/hypfs/hypfs.h4
-rw-r--r--arch/s390/hypfs/hypfs_diag.c123
-rw-r--r--arch/s390/hypfs/hypfs_vm.c87
-rw-r--r--arch/s390/hypfs/inode.c42
-rw-r--r--arch/s390/include/asm/cputime.h9
-rw-r--r--arch/s390/include/asm/lowcore.h89
-rw-r--r--arch/s390/include/asm/ptrace.h3
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/system.h5
-rw-r--r--arch/s390/include/asm/thread_info.h1
-rw-r--r--arch/s390/include/asm/timex.h8
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c6
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S324
-rw-r--r--arch/s390/kernel/entry64.S617
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/nmi.c3
-rw-r--r--arch/s390/kernel/processor.c37
-rw-r--r--arch/s390/kernel/ptrace.c68
-rw-r--r--arch/s390/kernel/s390_ext.c3
-rw-r--r--arch/s390/kernel/setup.c27
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/topology.c7
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vdso.c4
-rw-r--r--arch/s390/kernel/vtime.c15
-rw-r--r--arch/s390/kvm/Kconfig11
-rw-r--r--arch/s390/kvm/sie64a.S77
-rw-r--r--arch/s390/mm/fault.c32
34 files changed, 969 insertions, 697 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0d8cd9bbe101..79d0ca086820 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -444,13 +444,6 @@ config FORCE_MAX_ZONEORDER
444 int 444 int
445 default "9" 445 default "9"
446 446
447config PROCESS_DEBUG
448 bool "Show crashed user process info"
449 help
450 Say Y to print all process fault locations to the console. This is
451 a debugging option; you probably do not want to set it unless you
452 are an S390 port maintainer.
453
454config PFAULT 447config PFAULT
455 bool "Pseudo page fault support" 448 bool "Pseudo page fault support"
456 help 449 help
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 0da10746e0e5..30c5f01f93b0 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -116,6 +116,12 @@ image bzImage: vmlinux
116zfcpdump: 116zfcpdump:
117 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 117 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
118 118
119vdso_install:
120ifeq ($(CONFIG_64BIT),y)
121 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
122endif
123 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
124
119archclean: 125archclean:
120 $(Q)$(MAKE) $(clean)=$(boot) 126 $(Q)$(MAKE) $(clean)=$(boot)
121 127
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index aea572009d60..fa487d4cc08b 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/debugfs.h>
14 15
15#define REG_FILE_MODE 0440 16#define REG_FILE_MODE 0440
16#define UPDATE_FILE_MODE 0220 17#define UPDATE_FILE_MODE 0220
@@ -34,6 +35,9 @@ extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
34 35
35/* VM Hypervisor */ 36/* VM Hypervisor */
36extern int hypfs_vm_init(void); 37extern int hypfs_vm_init(void);
38extern void hypfs_vm_exit(void);
37extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); 39extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
38 40
41/* Directory for debugfs files */
42extern struct dentry *hypfs_dbfs_dir;
39#endif /* _HYPFS_H_ */ 43#endif /* _HYPFS_H_ */
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 5b1acdba6495..1211bb1d2f24 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/mm.h>
18#include <asm/ebcdic.h> 19#include <asm/ebcdic.h>
19#include "hypfs.h" 20#include "hypfs.h"
20 21
@@ -22,6 +23,8 @@
22#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */ 23#define CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
23#define TMP_SIZE 64 /* size of temporary buffers */ 24#define TMP_SIZE 64 /* size of temporary buffers */
24 25
26#define DBFS_D204_HDR_VERSION 0
27
25/* diag 204 subcodes */ 28/* diag 204 subcodes */
26enum diag204_sc { 29enum diag204_sc {
27 SUBC_STIB4 = 4, 30 SUBC_STIB4 = 4,
@@ -47,6 +50,8 @@ static void *diag204_buf; /* 4K aligned buffer for diag204 data */
47static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */ 50static void *diag204_buf_vmalloc; /* vmalloc pointer for diag204 data */
48static int diag204_buf_pages; /* number of pages for diag204 data */ 51static int diag204_buf_pages; /* number of pages for diag204 data */
49 52
53static struct dentry *dbfs_d204_file;
54
50/* 55/*
51 * DIAG 204 data structures and member access functions. 56 * DIAG 204 data structures and member access functions.
52 * 57 *
@@ -364,18 +369,21 @@ static void diag204_free_buffer(void)
364 } else { 369 } else {
365 free_pages((unsigned long) diag204_buf, 0); 370 free_pages((unsigned long) diag204_buf, 0);
366 } 371 }
367 diag204_buf_pages = 0;
368 diag204_buf = NULL; 372 diag204_buf = NULL;
369} 373}
370 374
375static void *page_align_ptr(void *ptr)
376{
377 return (void *) PAGE_ALIGN((unsigned long) ptr);
378}
379
371static void *diag204_alloc_vbuf(int pages) 380static void *diag204_alloc_vbuf(int pages)
372{ 381{
373 /* The buffer has to be page aligned! */ 382 /* The buffer has to be page aligned! */
374 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1)); 383 diag204_buf_vmalloc = vmalloc(PAGE_SIZE * (pages + 1));
375 if (!diag204_buf_vmalloc) 384 if (!diag204_buf_vmalloc)
376 return ERR_PTR(-ENOMEM); 385 return ERR_PTR(-ENOMEM);
377 diag204_buf = (void*)((unsigned long)diag204_buf_vmalloc 386 diag204_buf = page_align_ptr(diag204_buf_vmalloc);
378 & ~0xfffUL) + 0x1000;
379 diag204_buf_pages = pages; 387 diag204_buf_pages = pages;
380 return diag204_buf; 388 return diag204_buf;
381} 389}
@@ -468,17 +476,26 @@ fail_alloc:
468 return rc; 476 return rc;
469} 477}
470 478
479static int diag204_do_store(void *buf, int pages)
480{
481 int rc;
482
483 rc = diag204((unsigned long) diag204_store_sc |
484 (unsigned long) diag204_info_type, pages, buf);
485 return rc < 0 ? -ENOSYS : 0;
486}
487
471static void *diag204_store(void) 488static void *diag204_store(void)
472{ 489{
473 void *buf; 490 void *buf;
474 int pages; 491 int pages, rc;
475 492
476 buf = diag204_get_buffer(diag204_info_type, &pages); 493 buf = diag204_get_buffer(diag204_info_type, &pages);
477 if (IS_ERR(buf)) 494 if (IS_ERR(buf))
478 goto out; 495 goto out;
479 if (diag204((unsigned long)diag204_store_sc | 496 rc = diag204_do_store(buf, pages);
480 (unsigned long)diag204_info_type, pages, buf) < 0) 497 if (rc)
481 return ERR_PTR(-ENOSYS); 498 return ERR_PTR(rc);
482out: 499out:
483 return buf; 500 return buf;
484} 501}
@@ -526,6 +543,92 @@ static int diag224_idx2name(int index, char *name)
526 return 0; 543 return 0;
527} 544}
528 545
546struct dbfs_d204_hdr {
547 u64 len; /* Length of d204 buffer without header */
548 u16 version; /* Version of header */
549 u8 sc; /* Used subcode */
550 char reserved[53];
551} __attribute__ ((packed));
552
553struct dbfs_d204 {
554 struct dbfs_d204_hdr hdr; /* 64 byte header */
555 char buf[]; /* d204 buffer */
556} __attribute__ ((packed));
557
558struct dbfs_d204_private {
559 struct dbfs_d204 *d204; /* Aligned d204 data with header */
560 void *base; /* Base pointer (needed for vfree) */
561};
562
563static int dbfs_d204_open(struct inode *inode, struct file *file)
564{
565 struct dbfs_d204_private *data;
566 struct dbfs_d204 *d204;
567 int rc, buf_size;
568
569 data = kzalloc(sizeof(*data), GFP_KERNEL);
570 if (!data)
571 return -ENOMEM;
572 buf_size = PAGE_SIZE * (diag204_buf_pages + 1) + sizeof(d204->hdr);
573 data->base = vmalloc(buf_size);
574 if (!data->base) {
575 rc = -ENOMEM;
576 goto fail_kfree_data;
577 }
578 memset(data->base, 0, buf_size);
579 d204 = page_align_ptr(data->base + sizeof(d204->hdr))
580 - sizeof(d204->hdr);
581 rc = diag204_do_store(&d204->buf, diag204_buf_pages);
582 if (rc)
583 goto fail_vfree_base;
584 d204->hdr.version = DBFS_D204_HDR_VERSION;
585 d204->hdr.len = PAGE_SIZE * diag204_buf_pages;
586 d204->hdr.sc = diag204_store_sc;
587 data->d204 = d204;
588 file->private_data = data;
589 return nonseekable_open(inode, file);
590
591fail_vfree_base:
592 vfree(data->base);
593fail_kfree_data:
594 kfree(data);
595 return rc;
596}
597
598static int dbfs_d204_release(struct inode *inode, struct file *file)
599{
600 struct dbfs_d204_private *data = file->private_data;
601
602 vfree(data->base);
603 kfree(data);
604 return 0;
605}
606
607static ssize_t dbfs_d204_read(struct file *file, char __user *buf,
608 size_t size, loff_t *ppos)
609{
610 struct dbfs_d204_private *data = file->private_data;
611
612 return simple_read_from_buffer(buf, size, ppos, data->d204,
613 data->d204->hdr.len +
614 sizeof(data->d204->hdr));
615}
616
617static const struct file_operations dbfs_d204_ops = {
618 .open = dbfs_d204_open,
619 .read = dbfs_d204_read,
620 .release = dbfs_d204_release,
621};
622
623static int hypfs_dbfs_init(void)
624{
625 dbfs_d204_file = debugfs_create_file("diag_204", 0400, hypfs_dbfs_dir,
626 NULL, &dbfs_d204_ops);
627 if (IS_ERR(dbfs_d204_file))
628 return PTR_ERR(dbfs_d204_file);
629 return 0;
630}
631
529__init int hypfs_diag_init(void) 632__init int hypfs_diag_init(void)
530{ 633{
531 int rc; 634 int rc;
@@ -540,11 +643,17 @@ __init int hypfs_diag_init(void)
540 pr_err("The hardware system does not provide all " 643 pr_err("The hardware system does not provide all "
541 "functions required by hypfs\n"); 644 "functions required by hypfs\n");
542 } 645 }
646 if (diag204_info_type == INFO_EXT) {
647 rc = hypfs_dbfs_init();
648 if (rc)
649 diag204_free_buffer();
650 }
543 return rc; 651 return rc;
544} 652}
545 653
546void hypfs_diag_exit(void) 654void hypfs_diag_exit(void)
547{ 655{
656 debugfs_remove(dbfs_d204_file);
548 diag224_delete_name_table(); 657 diag224_delete_name_table();
549 diag204_free_buffer(); 658 diag204_free_buffer();
550} 659}
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index f0b0d31f0b48..ee5ab1a578e7 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -10,14 +10,18 @@
10#include <linux/string.h> 10#include <linux/string.h>
11#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
12#include <asm/ebcdic.h> 12#include <asm/ebcdic.h>
13#include <asm/timex.h>
13#include "hypfs.h" 14#include "hypfs.h"
14 15
15#define NAME_LEN 8 16#define NAME_LEN 8
17#define DBFS_D2FC_HDR_VERSION 0
16 18
17static char local_guest[] = " "; 19static char local_guest[] = " ";
18static char all_guests[] = "* "; 20static char all_guests[] = "* ";
19static char *guest_query; 21static char *guest_query;
20 22
23static struct dentry *dbfs_d2fc_file;
24
21struct diag2fc_data { 25struct diag2fc_data {
22 __u32 version; 26 __u32 version;
23 __u32 flags; 27 __u32 flags;
@@ -76,23 +80,26 @@ static int diag2fc(int size, char* query, void *addr)
76 return -residual_cnt; 80 return -residual_cnt;
77} 81}
78 82
79static struct diag2fc_data *diag2fc_store(char *query, int *count) 83/*
84 * Allocate buffer for "query" and store diag 2fc at "offset"
85 */
86static void *diag2fc_store(char *query, unsigned int *count, int offset)
80{ 87{
88 void *data;
81 int size; 89 int size;
82 struct diag2fc_data *data;
83 90
84 do { 91 do {
85 size = diag2fc(0, query, NULL); 92 size = diag2fc(0, query, NULL);
86 if (size < 0) 93 if (size < 0)
87 return ERR_PTR(-EACCES); 94 return ERR_PTR(-EACCES);
88 data = vmalloc(size); 95 data = vmalloc(size + offset);
89 if (!data) 96 if (!data)
90 return ERR_PTR(-ENOMEM); 97 return ERR_PTR(-ENOMEM);
91 if (diag2fc(size, query, data) == 0) 98 if (diag2fc(size, query, data + offset) == 0)
92 break; 99 break;
93 vfree(data); 100 vfree(data);
94 } while (1); 101 } while (1);
95 *count = (size / sizeof(*data)); 102 *count = (size / sizeof(struct diag2fc_data));
96 103
97 return data; 104 return data;
98} 105}
@@ -168,9 +175,10 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
168{ 175{
169 struct dentry *dir, *file; 176 struct dentry *dir, *file;
170 struct diag2fc_data *data; 177 struct diag2fc_data *data;
171 int rc, i, count = 0; 178 unsigned int count = 0;
179 int rc, i;
172 180
173 data = diag2fc_store(guest_query, &count); 181 data = diag2fc_store(guest_query, &count, 0);
174 if (IS_ERR(data)) 182 if (IS_ERR(data))
175 return PTR_ERR(data); 183 return PTR_ERR(data);
176 184
@@ -218,8 +226,61 @@ failed:
218 return rc; 226 return rc;
219} 227}
220 228
229struct dbfs_d2fc_hdr {
230 u64 len; /* Length of d2fc buffer without header */
231 u16 version; /* Version of header */
232 char tod_ext[16]; /* TOD clock for d2fc */
233 u64 count; /* Number of VM guests in d2fc buffer */
234 char reserved[30];
235} __attribute__ ((packed));
236
237struct dbfs_d2fc {
238 struct dbfs_d2fc_hdr hdr; /* 64 byte header */
239 char buf[]; /* d2fc buffer */
240} __attribute__ ((packed));
241
242static int dbfs_d2fc_open(struct inode *inode, struct file *file)
243{
244 struct dbfs_d2fc *data;
245 unsigned int count;
246
247 data = diag2fc_store(guest_query, &count, sizeof(data->hdr));
248 if (IS_ERR(data))
249 return PTR_ERR(data);
250 get_clock_ext(data->hdr.tod_ext);
251 data->hdr.len = count * sizeof(struct diag2fc_data);
252 data->hdr.version = DBFS_D2FC_HDR_VERSION;
253 data->hdr.count = count;
254 memset(&data->hdr.reserved, 0, sizeof(data->hdr.reserved));
255 file->private_data = data;
256 return nonseekable_open(inode, file);
257}
258
259static int dbfs_d2fc_release(struct inode *inode, struct file *file)
260{
261 diag2fc_free(file->private_data);
262 return 0;
263}
264
265static ssize_t dbfs_d2fc_read(struct file *file, char __user *buf,
266 size_t size, loff_t *ppos)
267{
268 struct dbfs_d2fc *data = file->private_data;
269
270 return simple_read_from_buffer(buf, size, ppos, data, data->hdr.len +
271 sizeof(struct dbfs_d2fc_hdr));
272}
273
274static const struct file_operations dbfs_d2fc_ops = {
275 .open = dbfs_d2fc_open,
276 .read = dbfs_d2fc_read,
277 .release = dbfs_d2fc_release,
278};
279
221int hypfs_vm_init(void) 280int hypfs_vm_init(void)
222{ 281{
282 if (!MACHINE_IS_VM)
283 return 0;
223 if (diag2fc(0, all_guests, NULL) > 0) 284 if (diag2fc(0, all_guests, NULL) > 0)
224 guest_query = all_guests; 285 guest_query = all_guests;
225 else if (diag2fc(0, local_guest, NULL) > 0) 286 else if (diag2fc(0, local_guest, NULL) > 0)
@@ -227,5 +288,17 @@ int hypfs_vm_init(void)
227 else 288 else
228 return -EACCES; 289 return -EACCES;
229 290
291 dbfs_d2fc_file = debugfs_create_file("diag_2fc", 0400, hypfs_dbfs_dir,
292 NULL, &dbfs_d2fc_ops);
293 if (IS_ERR(dbfs_d2fc_file))
294 return PTR_ERR(dbfs_d2fc_file);
295
230 return 0; 296 return 0;
231} 297}
298
299void hypfs_vm_exit(void)
300{
301 if (!MACHINE_IS_VM)
302 return;
303 debugfs_remove(dbfs_d2fc_file);
304}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c53f8ac825ca..6b120f073043 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -46,6 +46,8 @@ static const struct super_operations hypfs_s_ops;
46/* start of list of all dentries, which have to be deleted on update */ 46/* start of list of all dentries, which have to be deleted on update */
47static struct dentry *hypfs_last_dentry; 47static struct dentry *hypfs_last_dentry;
48 48
49struct dentry *hypfs_dbfs_dir;
50
49static void hypfs_update_update(struct super_block *sb) 51static void hypfs_update_update(struct super_block *sb)
50{ 52{
51 struct hypfs_sb_info *sb_info = sb->s_fs_info; 53 struct hypfs_sb_info *sb_info = sb->s_fs_info;
@@ -145,7 +147,7 @@ static int hypfs_open(struct inode *inode, struct file *filp)
145 } 147 }
146 mutex_unlock(&fs_info->lock); 148 mutex_unlock(&fs_info->lock);
147 } 149 }
148 return 0; 150 return nonseekable_open(inode, filp);
149} 151}
150 152
151static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, 153static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
@@ -468,20 +470,22 @@ static int __init hypfs_init(void)
468{ 470{
469 int rc; 471 int rc;
470 472
471 if (MACHINE_IS_VM) { 473 hypfs_dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
472 if (hypfs_vm_init()) 474 if (IS_ERR(hypfs_dbfs_dir))
473 /* no diag 2fc, just exit */ 475 return PTR_ERR(hypfs_dbfs_dir);
474 return -ENODATA; 476
475 } else { 477 if (hypfs_diag_init()) {
476 if (hypfs_diag_init()) { 478 rc = -ENODATA;
477 rc = -ENODATA; 479 goto fail_debugfs_remove;
478 goto fail_diag; 480 }
479 } 481 if (hypfs_vm_init()) {
482 rc = -ENODATA;
483 goto fail_hypfs_diag_exit;
480 } 484 }
481 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); 485 s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
482 if (!s390_kobj) { 486 if (!s390_kobj) {
483 rc = -ENOMEM; 487 rc = -ENOMEM;
484 goto fail_sysfs; 488 goto fail_hypfs_vm_exit;
485 } 489 }
486 rc = register_filesystem(&hypfs_type); 490 rc = register_filesystem(&hypfs_type);
487 if (rc) 491 if (rc)
@@ -490,18 +494,22 @@ static int __init hypfs_init(void)
490 494
491fail_filesystem: 495fail_filesystem:
492 kobject_put(s390_kobj); 496 kobject_put(s390_kobj);
493fail_sysfs: 497fail_hypfs_vm_exit:
494 if (!MACHINE_IS_VM) 498 hypfs_vm_exit();
495 hypfs_diag_exit(); 499fail_hypfs_diag_exit:
496fail_diag: 500 hypfs_diag_exit();
501fail_debugfs_remove:
502 debugfs_remove(hypfs_dbfs_dir);
503
497 pr_err("Initialization of hypfs failed with rc=%i\n", rc); 504 pr_err("Initialization of hypfs failed with rc=%i\n", rc);
498 return rc; 505 return rc;
499} 506}
500 507
501static void __exit hypfs_exit(void) 508static void __exit hypfs_exit(void)
502{ 509{
503 if (!MACHINE_IS_VM) 510 hypfs_diag_exit();
504 hypfs_diag_exit(); 511 hypfs_vm_exit();
512 debugfs_remove(hypfs_dbfs_dir);
505 unregister_filesystem(&hypfs_type); 513 unregister_filesystem(&hypfs_type);
506 kobject_put(s390_kobj); 514 kobject_put(s390_kobj);
507} 515}
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 258ba88b7b50..8b1a52a137c5 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -188,15 +188,16 @@ struct s390_idle_data {
188 188
189DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 189DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
190 190
191void vtime_start_cpu(void); 191void vtime_start_cpu(__u64 int_clock, __u64 enter_timer);
192cputime64_t s390_get_idle_time(int cpu); 192cputime64_t s390_get_idle_time(int cpu);
193 193
194#define arch_idle_time(cpu) s390_get_idle_time(cpu) 194#define arch_idle_time(cpu) s390_get_idle_time(cpu)
195 195
196static inline void s390_idle_check(void) 196static inline void s390_idle_check(struct pt_regs *regs, __u64 int_clock,
197 __u64 enter_timer)
197{ 198{
198 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) 199 if (regs->psw.mask & PSW_MASK_WAIT)
199 vtime_start_cpu(); 200 vtime_start_cpu(int_clock, enter_timer);
200} 201}
201 202
202static inline int s390_nohz_delay(int cpu) 203static inline int s390_nohz_delay(int cpu)
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 05527c040b7a..0f97ef2d92ac 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -104,38 +104,39 @@ struct _lowcore {
104 /* CPU time accounting values */ 104 /* CPU time accounting values */
105 __u64 sync_enter_timer; /* 0x0250 */ 105 __u64 sync_enter_timer; /* 0x0250 */
106 __u64 async_enter_timer; /* 0x0258 */ 106 __u64 async_enter_timer; /* 0x0258 */
107 __u64 exit_timer; /* 0x0260 */ 107 __u64 mcck_enter_timer; /* 0x0260 */
108 __u64 user_timer; /* 0x0268 */ 108 __u64 exit_timer; /* 0x0268 */
109 __u64 system_timer; /* 0x0270 */ 109 __u64 user_timer; /* 0x0270 */
110 __u64 steal_timer; /* 0x0278 */ 110 __u64 system_timer; /* 0x0278 */
111 __u64 last_update_timer; /* 0x0280 */ 111 __u64 steal_timer; /* 0x0280 */
112 __u64 last_update_clock; /* 0x0288 */ 112 __u64 last_update_timer; /* 0x0288 */
113 __u64 last_update_clock; /* 0x0290 */
113 114
114 /* Current process. */ 115 /* Current process. */
115 __u32 current_task; /* 0x0290 */ 116 __u32 current_task; /* 0x0298 */
116 __u32 thread_info; /* 0x0294 */ 117 __u32 thread_info; /* 0x029c */
117 __u32 kernel_stack; /* 0x0298 */ 118 __u32 kernel_stack; /* 0x02a0 */
118 119
119 /* Interrupt and panic stack. */ 120 /* Interrupt and panic stack. */
120 __u32 async_stack; /* 0x029c */ 121 __u32 async_stack; /* 0x02a4 */
121 __u32 panic_stack; /* 0x02a0 */ 122 __u32 panic_stack; /* 0x02a8 */
122 123
123 /* Address space pointer. */ 124 /* Address space pointer. */
124 __u32 kernel_asce; /* 0x02a4 */ 125 __u32 kernel_asce; /* 0x02ac */
125 __u32 user_asce; /* 0x02a8 */ 126 __u32 user_asce; /* 0x02b0 */
126 __u32 user_exec_asce; /* 0x02ac */ 127 __u32 user_exec_asce; /* 0x02b4 */
127 128
128 /* SMP info area */ 129 /* SMP info area */
129 struct cpuid cpu_id; /* 0x02b0 */
130 __u32 cpu_nr; /* 0x02b8 */ 130 __u32 cpu_nr; /* 0x02b8 */
131 __u32 softirq_pending; /* 0x02bc */ 131 __u32 softirq_pending; /* 0x02bc */
132 __u32 percpu_offset; /* 0x02c0 */ 132 __u32 percpu_offset; /* 0x02c0 */
133 __u32 ext_call_fast; /* 0x02c4 */ 133 __u32 ext_call_fast; /* 0x02c4 */
134 __u64 int_clock; /* 0x02c8 */ 134 __u64 int_clock; /* 0x02c8 */
135 __u64 clock_comparator; /* 0x02d0 */ 135 __u64 mcck_clock; /* 0x02d0 */
136 __u32 machine_flags; /* 0x02d8 */ 136 __u64 clock_comparator; /* 0x02d8 */
137 __u32 ftrace_func; /* 0x02dc */ 137 __u32 machine_flags; /* 0x02e0 */
138 __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */ 138 __u32 ftrace_func; /* 0x02e4 */
139 __u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */
139 140
140 /* Interrupt response block */ 141 /* Interrupt response block */
141 __u8 irb[64]; /* 0x0300 */ 142 __u8 irb[64]; /* 0x0300 */
@@ -189,14 +190,14 @@ struct _lowcore {
189 __u32 data_exc_code; /* 0x0090 */ 190 __u32 data_exc_code; /* 0x0090 */
190 __u16 mon_class_num; /* 0x0094 */ 191 __u16 mon_class_num; /* 0x0094 */
191 __u16 per_perc_atmid; /* 0x0096 */ 192 __u16 per_perc_atmid; /* 0x0096 */
192 addr_t per_address; /* 0x0098 */ 193 __u64 per_address; /* 0x0098 */
193 __u8 exc_access_id; /* 0x00a0 */ 194 __u8 exc_access_id; /* 0x00a0 */
194 __u8 per_access_id; /* 0x00a1 */ 195 __u8 per_access_id; /* 0x00a1 */
195 __u8 op_access_id; /* 0x00a2 */ 196 __u8 op_access_id; /* 0x00a2 */
196 __u8 ar_access_id; /* 0x00a3 */ 197 __u8 ar_access_id; /* 0x00a3 */
197 __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */ 198 __u8 pad_0x00a4[0x00a8-0x00a4]; /* 0x00a4 */
198 addr_t trans_exc_code; /* 0x00a8 */ 199 __u64 trans_exc_code; /* 0x00a8 */
199 addr_t monitor_code; /* 0x00b0 */ 200 __u64 monitor_code; /* 0x00b0 */
200 __u16 subchannel_id; /* 0x00b8 */ 201 __u16 subchannel_id; /* 0x00b8 */
201 __u16 subchannel_nr; /* 0x00ba */ 202 __u16 subchannel_nr; /* 0x00ba */
202 __u32 io_int_parm; /* 0x00bc */ 203 __u32 io_int_parm; /* 0x00bc */
@@ -207,7 +208,7 @@ struct _lowcore {
207 __u32 mcck_interruption_code[2]; /* 0x00e8 */ 208 __u32 mcck_interruption_code[2]; /* 0x00e8 */
208 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ 209 __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */
209 __u32 external_damage_code; /* 0x00f4 */ 210 __u32 external_damage_code; /* 0x00f4 */
210 addr_t failing_storage_address; /* 0x00f8 */ 211 __u64 failing_storage_address; /* 0x00f8 */
211 __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */ 212 __u8 pad_0x0100[0x0110-0x0100]; /* 0x0100 */
212 __u64 breaking_event_addr; /* 0x0110 */ 213 __u64 breaking_event_addr; /* 0x0110 */
213 __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */ 214 __u8 pad_0x0118[0x0120-0x0118]; /* 0x0118 */
@@ -233,39 +234,41 @@ struct _lowcore {
233 /* CPU accounting and timing values. */ 234 /* CPU accounting and timing values. */
234 __u64 sync_enter_timer; /* 0x02a0 */ 235 __u64 sync_enter_timer; /* 0x02a0 */
235 __u64 async_enter_timer; /* 0x02a8 */ 236 __u64 async_enter_timer; /* 0x02a8 */
236 __u64 exit_timer; /* 0x02b0 */ 237 __u64 mcck_enter_timer; /* 0x02b0 */
237 __u64 user_timer; /* 0x02b8 */ 238 __u64 exit_timer; /* 0x02b8 */
238 __u64 system_timer; /* 0x02c0 */ 239 __u64 user_timer; /* 0x02c0 */
239 __u64 steal_timer; /* 0x02c8 */ 240 __u64 system_timer; /* 0x02c8 */
240 __u64 last_update_timer; /* 0x02d0 */ 241 __u64 steal_timer; /* 0x02d0 */
241 __u64 last_update_clock; /* 0x02d8 */ 242 __u64 last_update_timer; /* 0x02d8 */
243 __u64 last_update_clock; /* 0x02e0 */
242 244
243 /* Current process. */ 245 /* Current process. */
244 __u64 current_task; /* 0x02e0 */ 246 __u64 current_task; /* 0x02e8 */
245 __u64 thread_info; /* 0x02e8 */ 247 __u64 thread_info; /* 0x02f0 */
246 __u64 kernel_stack; /* 0x02f0 */ 248 __u64 kernel_stack; /* 0x02f8 */
247 249
248 /* Interrupt and panic stack. */ 250 /* Interrupt and panic stack. */
249 __u64 async_stack; /* 0x02f8 */ 251 __u64 async_stack; /* 0x0300 */
250 __u64 panic_stack; /* 0x0300 */ 252 __u64 panic_stack; /* 0x0308 */
251 253
252 /* Address space pointer. */ 254 /* Address space pointer. */
253 __u64 kernel_asce; /* 0x0308 */ 255 __u64 kernel_asce; /* 0x0310 */
254 __u64 user_asce; /* 0x0310 */ 256 __u64 user_asce; /* 0x0318 */
255 __u64 user_exec_asce; /* 0x0318 */ 257 __u64 user_exec_asce; /* 0x0320 */
256 258
257 /* SMP info area */ 259 /* SMP info area */
258 struct cpuid cpu_id; /* 0x0320 */
259 __u32 cpu_nr; /* 0x0328 */ 260 __u32 cpu_nr; /* 0x0328 */
260 __u32 softirq_pending; /* 0x032c */ 261 __u32 softirq_pending; /* 0x032c */
261 __u64 percpu_offset; /* 0x0330 */ 262 __u64 percpu_offset; /* 0x0330 */
262 __u64 ext_call_fast; /* 0x0338 */ 263 __u64 ext_call_fast; /* 0x0338 */
263 __u64 int_clock; /* 0x0340 */ 264 __u64 int_clock; /* 0x0340 */
264 __u64 clock_comparator; /* 0x0348 */ 265 __u64 mcck_clock; /* 0x0348 */
265 __u64 vdso_per_cpu_data; /* 0x0350 */ 266 __u64 clock_comparator; /* 0x0350 */
266 __u64 machine_flags; /* 0x0358 */ 267 __u64 vdso_per_cpu_data; /* 0x0358 */
267 __u64 ftrace_func; /* 0x0360 */ 268 __u64 machine_flags; /* 0x0360 */
268 __u8 pad_0x0368[0x0380-0x0368]; /* 0x0368 */ 269 __u64 ftrace_func; /* 0x0368 */
270 __u64 sie_hook; /* 0x0370 */
271 __u64 cmf_hpp; /* 0x0378 */
269 272
270 /* Interrupt response block. */ 273 /* Interrupt response block. */
271 __u8 irb[64]; /* 0x0380 */ 274 __u8 irb[64]; /* 0x0380 */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index fef9b33cdd59..e2c218dc68a6 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -328,8 +328,8 @@ struct pt_regs
328 psw_t psw; 328 psw_t psw;
329 unsigned long gprs[NUM_GPRS]; 329 unsigned long gprs[NUM_GPRS];
330 unsigned long orig_gpr2; 330 unsigned long orig_gpr2;
331 unsigned short svcnr;
332 unsigned short ilc; 331 unsigned short ilc;
332 unsigned short svcnr;
333}; 333};
334#endif 334#endif
335 335
@@ -436,6 +436,7 @@ typedef struct
436#define PTRACE_PEEKDATA_AREA 0x5003 436#define PTRACE_PEEKDATA_AREA 0x5003
437#define PTRACE_POKETEXT_AREA 0x5004 437#define PTRACE_POKETEXT_AREA 0x5004
438#define PTRACE_POKEDATA_AREA 0x5005 438#define PTRACE_POKEDATA_AREA 0x5005
439#define PTRACE_GET_LAST_BREAK 0x5006
439 440
440/* 441/*
441 * PT_PROT definition is loosely based on hppa bsd definition in 442 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 9b04b1102bbc..0eaae6260274 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -368,14 +368,12 @@ struct qdio_initialize {
368#define QDIO_FLAG_SYNC_OUTPUT 0x02 368#define QDIO_FLAG_SYNC_OUTPUT 0x02
369#define QDIO_FLAG_PCI_OUT 0x10 369#define QDIO_FLAG_PCI_OUT 0x10
370 370
371extern int qdio_initialize(struct qdio_initialize *);
372extern int qdio_allocate(struct qdio_initialize *); 371extern int qdio_allocate(struct qdio_initialize *);
373extern int qdio_establish(struct qdio_initialize *); 372extern int qdio_establish(struct qdio_initialize *);
374extern int qdio_activate(struct ccw_device *); 373extern int qdio_activate(struct ccw_device *);
375 374
376extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 375extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
377 int q_nr, unsigned int bufnr, unsigned int count); 376 int q_nr, unsigned int bufnr, unsigned int count);
378extern int qdio_cleanup(struct ccw_device*, int);
379extern int qdio_shutdown(struct ccw_device*, int); 377extern int qdio_shutdown(struct ccw_device*, int);
380extern int qdio_free(struct ccw_device *); 378extern int qdio_free(struct ccw_device *);
381extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*); 379extern int qdio_get_ssqd_desc(struct ccw_device *dev, struct qdio_ssqd_desc*);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 9ab6bd3a65d1..25e831d58e1e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -2,7 +2,7 @@
2 * include/asm-s390/setup.h 2 * include/asm-s390/setup.h
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright IBM Corp. 1999,2006 5 * Copyright IBM Corp. 1999,2010
6 */ 6 */
7 7
8#ifndef _ASM_S390_SETUP_H 8#ifndef _ASM_S390_SETUP_H
@@ -72,6 +72,7 @@ extern unsigned int user_mode;
72#define MACHINE_FLAG_HPAGE (1UL << 10) 72#define MACHINE_FLAG_HPAGE (1UL << 10)
73#define MACHINE_FLAG_PFMF (1UL << 11) 73#define MACHINE_FLAG_PFMF (1UL << 11)
74#define MACHINE_FLAG_LPAR (1UL << 12) 74#define MACHINE_FLAG_LPAR (1UL << 12)
75#define MACHINE_FLAG_SPP (1UL << 13)
75 76
76#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) 77#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
77#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) 78#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -88,6 +89,7 @@ extern unsigned int user_mode;
88#define MACHINE_HAS_MVCOS (0) 89#define MACHINE_HAS_MVCOS (0)
89#define MACHINE_HAS_HPAGE (0) 90#define MACHINE_HAS_HPAGE (0)
90#define MACHINE_HAS_PFMF (0) 91#define MACHINE_HAS_PFMF (0)
92#define MACHINE_HAS_SPP (0)
91#else /* __s390x__ */ 93#else /* __s390x__ */
92#define MACHINE_HAS_IEEE (1) 94#define MACHINE_HAS_IEEE (1)
93#define MACHINE_HAS_CSP (1) 95#define MACHINE_HAS_CSP (1)
@@ -97,6 +99,7 @@ extern unsigned int user_mode;
97#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS) 99#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
98#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE) 100#define MACHINE_HAS_HPAGE (S390_lowcore.machine_flags & MACHINE_FLAG_HPAGE)
99#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 101#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
102#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
100#endif /* __s390x__ */ 103#endif /* __s390x__ */
101 104
102#define ZFCPDUMP_HSA_SIZE (32UL<<20) 105#define ZFCPDUMP_HSA_SIZE (32UL<<20)
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 1741c1556a4e..cef66210c846 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -459,11 +459,6 @@ extern void (*_machine_power_off)(void);
459 459
460#define arch_align_stack(x) (x) 460#define arch_align_stack(x) (x)
461 461
462#ifdef CONFIG_TRACE_IRQFLAGS
463extern psw_t sysc_restore_trace_psw;
464extern psw_t io_restore_trace_psw;
465#endif
466
467static inline int tprot(unsigned long addr) 462static inline int tprot(unsigned long addr)
468{ 463{
469 int rc = -EFAULT; 464 int rc = -EFAULT;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 34f0873d6525..be3d3f91d86c 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -50,6 +50,7 @@ struct thread_info {
50 struct restart_block restart_block; 50 struct restart_block restart_block;
51 __u64 user_timer; 51 __u64 user_timer;
52 __u64 system_timer; 52 __u64 system_timer;
53 unsigned long last_break; /* last breaking-event-address. */
53}; 54};
54 55
55/* 56/*
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index f174bdaa6b59..09d345a701dc 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -61,11 +61,15 @@ static inline unsigned long long get_clock (void)
61 return clk; 61 return clk;
62} 62}
63 63
64static inline void get_clock_ext(char *clk)
65{
66 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
67}
68
64static inline unsigned long long get_clock_xt(void) 69static inline unsigned long long get_clock_xt(void)
65{ 70{
66 unsigned char clk[16]; 71 unsigned char clk[16];
67 72 get_clock_ext(clk);
68 asm volatile("stcke %0" : "=Q" (clk) : : "cc");
69 return *((unsigned long long *)&clk[1]); 73 return *((unsigned long long *)&clk[1]);
70} 74}
71 75
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 6e7211abd950..dc8a67297d0f 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -7,8 +7,10 @@
7 7
8const struct cpumask *cpu_coregroup_mask(unsigned int cpu); 8const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
9 9
10extern unsigned char cpu_core_id[NR_CPUS];
10extern cpumask_t cpu_core_map[NR_CPUS]; 11extern cpumask_t cpu_core_map[NR_CPUS];
11 12
13#define topology_core_id(cpu) (cpu_core_id[cpu])
12#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 14#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
13 15
14int topology_set_cpu_management(int fc); 16int topology_set_cpu_management(int fc);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index a09408952ed0..d9b490a2716e 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ int main(void)
39 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); 39 DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
40 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); 40 DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
41 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); 41 DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
42 DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
42 BLANK(); 43 BLANK();
43 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); 44 DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
44 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); 45 DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
@@ -112,6 +113,7 @@ int main(void)
112 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); 113 DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
113 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); 114 DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
114 DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); 115 DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
116 DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
115 DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); 117 DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
116 DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); 118 DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
117 DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); 119 DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
@@ -126,10 +128,12 @@ int main(void)
126 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); 128 DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
127 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); 129 DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
128 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce)); 130 DEFINE(__LC_USER_EXEC_ASCE, offsetof(struct _lowcore, user_exec_asce));
129 DEFINE(__LC_CPUID, offsetof(struct _lowcore, cpu_id));
130 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 131 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
132 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
131 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 133 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
132 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func)); 134 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
135 DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook));
136 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
133 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb)); 137 DEFINE(__LC_IRB, offsetof(struct _lowcore, irb));
134 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 138 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
135 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); 139 DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 0168472b2fdf..98192261491d 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -655,6 +655,7 @@ found:
655 p_info->act_entry_offset = 0; 655 p_info->act_entry_offset = 0;
656 file->private_data = p_info; 656 file->private_data = p_info;
657 debug_info_get(debug_info); 657 debug_info_get(debug_info);
658 nonseekable_open(inode, file);
658out: 659out:
659 mutex_unlock(&debug_mutex); 660 mutex_unlock(&debug_mutex);
660 return rc; 661 return rc;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2d92c2cf92d7..c00856ad4e5a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -356,6 +356,7 @@ static __init void detect_machine_facilities(void)
356{ 356{
357#ifdef CONFIG_64BIT 357#ifdef CONFIG_64BIT
358 unsigned int facilities; 358 unsigned int facilities;
359 unsigned long long facility_bits;
359 360
360 facilities = stfl(); 361 facilities = stfl();
361 if (facilities & (1 << 28)) 362 if (facilities & (1 << 28))
@@ -364,6 +365,9 @@ static __init void detect_machine_facilities(void)
364 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; 365 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
365 if (facilities & (1 << 4)) 366 if (facilities & (1 << 4))
366 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; 367 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
368 if ((stfle(&facility_bits, 1) > 0) &&
369 (facility_bits & (1ULL << (63 - 40))))
370 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
367#endif 371#endif
368} 372}
369 373
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6af7045280a8..d5e3e6007447 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -73,21 +73,24 @@ STACK_SIZE = 1 << STACK_SHIFT
73 basr %r14,%r1 73 basr %r14,%r1
74 .endm 74 .endm
75 75
76 .macro TRACE_IRQS_CHECK 76 .macro TRACE_IRQS_CHECK_ON
77 basr %r2,%r0
78 tm SP_PSW(%r15),0x03 # irqs enabled? 77 tm SP_PSW(%r15),0x03 # irqs enabled?
79 jz 0f 78 bz BASED(0f)
80 l %r1,BASED(.Ltrace_irq_on_caller) 79 TRACE_IRQS_ON
81 basr %r14,%r1 800:
82 j 1f 81 .endm
830: l %r1,BASED(.Ltrace_irq_off_caller) 82
84 basr %r14,%r1 83 .macro TRACE_IRQS_CHECK_OFF
851: 84 tm SP_PSW(%r15),0x03 # irqs enabled?
85 bz BASED(0f)
86 TRACE_IRQS_OFF
870:
86 .endm 88 .endm
87#else 89#else
88#define TRACE_IRQS_ON 90#define TRACE_IRQS_ON
89#define TRACE_IRQS_OFF 91#define TRACE_IRQS_OFF
90#define TRACE_IRQS_CHECK 92#define TRACE_IRQS_CHECK_ON
93#define TRACE_IRQS_CHECK_OFF
91#endif 94#endif
92 95
93#ifdef CONFIG_LOCKDEP 96#ifdef CONFIG_LOCKDEP
@@ -177,9 +180,9 @@ STACK_SIZE = 1 << STACK_SHIFT
177 s %r15,BASED(.Lc_spsize) # make room for registers & psw 180 s %r15,BASED(.Lc_spsize) # make room for registers & psw
178 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack 181 mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
179 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 182 st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
180 icm %r12,3,__LC_SVC_ILC 183 icm %r12,12,__LC_SVC_ILC
181 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 184 stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
182 st %r12,SP_SVCNR(%r15) 185 st %r12,SP_ILC(%r15)
183 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack 186 mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
184 la %r12,0 187 la %r12,0
185 st %r12,__SF_BACKCHAIN(%r15) # clear back chain 188 st %r12,__SF_BACKCHAIN(%r15) # clear back chain
@@ -273,66 +276,45 @@ sysc_do_restart:
273 st %r2,SP_R2(%r15) # store return value (change R2 on stack) 276 st %r2,SP_R2(%r15) # store return value (change R2 on stack)
274 277
275sysc_return: 278sysc_return:
279 LOCKDEP_SYS_EXIT
280sysc_tif:
276 tm __TI_flags+3(%r9),_TIF_WORK_SVC 281 tm __TI_flags+3(%r9),_TIF_WORK_SVC
277 bnz BASED(sysc_work) # there is work to do (signals etc.) 282 bnz BASED(sysc_work) # there is work to do (signals etc.)
278sysc_restore: 283sysc_restore:
279#ifdef CONFIG_TRACE_IRQFLAGS
280 la %r1,BASED(sysc_restore_trace_psw_addr)
281 l %r1,0(%r1)
282 lpsw 0(%r1)
283sysc_restore_trace:
284 TRACE_IRQS_CHECK
285 LOCKDEP_SYS_EXIT
286#endif
287sysc_leave:
288 RESTORE_ALL __LC_RETURN_PSW,1 284 RESTORE_ALL __LC_RETURN_PSW,1
289sysc_done: 285sysc_done:
290 286
291#ifdef CONFIG_TRACE_IRQFLAGS
292sysc_restore_trace_psw_addr:
293 .long sysc_restore_trace_psw
294
295 .section .data,"aw",@progbits
296 .align 8
297 .globl sysc_restore_trace_psw
298sysc_restore_trace_psw:
299 .long 0, sysc_restore_trace + 0x80000000
300 .previous
301#endif
302
303#
304# recheck if there is more work to do
305# 287#
306sysc_work_loop: 288# There is work to do, but first we need to check if we return to userspace.
307 tm __TI_flags+3(%r9),_TIF_WORK_SVC
308 bz BASED(sysc_restore) # there is no work to do
309#
310# One of the work bits is on. Find out which one.
311# 289#
312sysc_work: 290sysc_work:
313 tm SP_PSW+1(%r15),0x01 # returning to user ? 291 tm SP_PSW+1(%r15),0x01 # returning to user ?
314 bno BASED(sysc_restore) 292 bno BASED(sysc_restore)
293
294#
295# One of the work bits is on. Find out which one.
296#
297sysc_work_tif:
315 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 298 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
316 bo BASED(sysc_mcck_pending) 299 bo BASED(sysc_mcck_pending)
317 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 300 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
318 bo BASED(sysc_reschedule) 301 bo BASED(sysc_reschedule)
319 tm __TI_flags+3(%r9),_TIF_SIGPENDING 302 tm __TI_flags+3(%r9),_TIF_SIGPENDING
320 bnz BASED(sysc_sigpending) 303 bo BASED(sysc_sigpending)
321 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 304 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
322 bnz BASED(sysc_notify_resume) 305 bo BASED(sysc_notify_resume)
323 tm __TI_flags+3(%r9),_TIF_RESTART_SVC 306 tm __TI_flags+3(%r9),_TIF_RESTART_SVC
324 bo BASED(sysc_restart) 307 bo BASED(sysc_restart)
325 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 308 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
326 bo BASED(sysc_singlestep) 309 bo BASED(sysc_singlestep)
327 b BASED(sysc_restore) 310 b BASED(sysc_return) # beware of critical section cleanup
328sysc_work_done:
329 311
330# 312#
331# _TIF_NEED_RESCHED is set, call schedule 313# _TIF_NEED_RESCHED is set, call schedule
332# 314#
333sysc_reschedule: 315sysc_reschedule:
334 l %r1,BASED(.Lschedule) 316 l %r1,BASED(.Lschedule)
335 la %r14,BASED(sysc_work_loop) 317 la %r14,BASED(sysc_return)
336 br %r1 # call scheduler 318 br %r1 # call scheduler
337 319
338# 320#
@@ -340,7 +322,7 @@ sysc_reschedule:
340# 322#
341sysc_mcck_pending: 323sysc_mcck_pending:
342 l %r1,BASED(.Ls390_handle_mcck) 324 l %r1,BASED(.Ls390_handle_mcck)
343 la %r14,BASED(sysc_work_loop) 325 la %r14,BASED(sysc_return)
344 br %r1 # TIF bit will be cleared by handler 326 br %r1 # TIF bit will be cleared by handler
345 327
346# 328#
@@ -355,7 +337,7 @@ sysc_sigpending:
355 bo BASED(sysc_restart) 337 bo BASED(sysc_restart)
356 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP 338 tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
357 bo BASED(sysc_singlestep) 339 bo BASED(sysc_singlestep)
358 b BASED(sysc_work_loop) 340 b BASED(sysc_return)
359 341
360# 342#
361# _TIF_NOTIFY_RESUME is set, call do_notify_resume 343# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@@ -363,7 +345,7 @@ sysc_sigpending:
363sysc_notify_resume: 345sysc_notify_resume:
364 la %r2,SP_PTREGS(%r15) # load pt_regs 346 la %r2,SP_PTREGS(%r15) # load pt_regs
365 l %r1,BASED(.Ldo_notify_resume) 347 l %r1,BASED(.Ldo_notify_resume)
366 la %r14,BASED(sysc_work_loop) 348 la %r14,BASED(sysc_return)
367 br %r1 # call do_notify_resume 349 br %r1 # call do_notify_resume
368 350
369 351
@@ -458,11 +440,13 @@ kernel_execve:
458 br %r14 440 br %r14
459 # execve succeeded. 441 # execve succeeded.
4600: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4420: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
443 TRACE_IRQS_OFF
461 l %r15,__LC_KERNEL_STACK # load ksp 444 l %r15,__LC_KERNEL_STACK # load ksp
462 s %r15,BASED(.Lc_spsize) # make room for registers & psw 445 s %r15,BASED(.Lc_spsize) # make room for registers & psw
463 l %r9,__LC_THREAD_INFO 446 l %r9,__LC_THREAD_INFO
464 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 447 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
465 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449 TRACE_IRQS_ON
466 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 450 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
467 l %r1,BASED(.Lexecve_tail) 451 l %r1,BASED(.Lexecve_tail)
468 basr %r14,%r1 452 basr %r14,%r1
@@ -499,8 +483,8 @@ pgm_check_handler:
499 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 483 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
500 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 484 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
501pgm_no_vtime: 485pgm_no_vtime:
486 TRACE_IRQS_CHECK_OFF
502 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 487 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
503 TRACE_IRQS_OFF
504 l %r3,__LC_PGM_ILC # load program interruption code 488 l %r3,__LC_PGM_ILC # load program interruption code
505 la %r8,0x7f 489 la %r8,0x7f
506 nr %r8,%r3 490 nr %r8,%r3
@@ -509,8 +493,10 @@ pgm_do_call:
509 sll %r8,2 493 sll %r8,2
510 l %r7,0(%r8,%r7) # load address of handler routine 494 l %r7,0(%r8,%r7) # load address of handler routine
511 la %r2,SP_PTREGS(%r15) # address of register-save area 495 la %r2,SP_PTREGS(%r15) # address of register-save area
512 la %r14,BASED(sysc_return) 496 basr %r14,%r7 # branch to interrupt-handler
513 br %r7 # branch to interrupt-handler 497pgm_exit:
498 TRACE_IRQS_CHECK_ON
499 b BASED(sysc_return)
514 500
515# 501#
516# handle per exception 502# handle per exception
@@ -537,19 +523,19 @@ pgm_per_std:
537 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 523 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
538 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 524 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
539pgm_no_vtime2: 525pgm_no_vtime2:
526 TRACE_IRQS_CHECK_OFF
540 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 527 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
541 TRACE_IRQS_OFF
542 l %r1,__TI_task(%r9) 528 l %r1,__TI_task(%r9)
529 tm SP_PSW+1(%r15),0x01 # kernel per event ?
530 bz BASED(kernel_per)
543 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 531 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
544 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 532 mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
545 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 533 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
546 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 534 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
547 tm SP_PSW+1(%r15),0x01 # kernel per event ?
548 bz BASED(kernel_per)
549 l %r3,__LC_PGM_ILC # load program interruption code 535 l %r3,__LC_PGM_ILC # load program interruption code
550 la %r8,0x7f 536 la %r8,0x7f
551 nr %r8,%r3 # clear per-event-bit and ilc 537 nr %r8,%r3 # clear per-event-bit and ilc
552 be BASED(sysc_return) # only per or per+check ? 538 be BASED(pgm_exit) # only per or per+check ?
553 b BASED(pgm_do_call) 539 b BASED(pgm_do_call)
554 540
555# 541#
@@ -570,8 +556,8 @@ pgm_svcper:
570 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 556 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
571 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 557 oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
572 TRACE_IRQS_ON 558 TRACE_IRQS_ON
573 lm %r2,%r6,SP_R2(%r15) # load svc arguments
574 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 559 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
560 lm %r2,%r6,SP_R2(%r15) # load svc arguments
575 b BASED(sysc_do_svc) 561 b BASED(sysc_do_svc)
576 562
577# 563#
@@ -582,8 +568,8 @@ kernel_per:
582 mvi SP_SVCNR+1(%r15),0xff 568 mvi SP_SVCNR+1(%r15),0xff
583 la %r2,SP_PTREGS(%r15) # address of register-save area 569 la %r2,SP_PTREGS(%r15) # address of register-save area
584 l %r1,BASED(.Lhandle_per) # load adr. of per handler 570 l %r1,BASED(.Lhandle_per) # load adr. of per handler
585 la %r14,BASED(sysc_restore)# load adr. of system return 571 basr %r14,%r1 # branch to do_single_step
586 br %r1 # branch to do_single_step 572 b BASED(pgm_exit)
587 573
588/* 574/*
589 * IO interrupt handler routine 575 * IO interrupt handler routine
@@ -602,134 +588,126 @@ io_int_handler:
602 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 588 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
603 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 589 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
604io_no_vtime: 590io_no_vtime:
605 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
606 TRACE_IRQS_OFF 591 TRACE_IRQS_OFF
592 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
607 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ 593 l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
608 la %r2,SP_PTREGS(%r15) # address of register-save area 594 la %r2,SP_PTREGS(%r15) # address of register-save area
609 basr %r14,%r1 # branch to standard irq handler 595 basr %r14,%r1 # branch to standard irq handler
610io_return: 596io_return:
597 LOCKDEP_SYS_EXIT
598 TRACE_IRQS_ON
599io_tif:
611 tm __TI_flags+3(%r9),_TIF_WORK_INT 600 tm __TI_flags+3(%r9),_TIF_WORK_INT
612 bnz BASED(io_work) # there is work to do (signals etc.) 601 bnz BASED(io_work) # there is work to do (signals etc.)
613io_restore: 602io_restore:
614#ifdef CONFIG_TRACE_IRQFLAGS
615 la %r1,BASED(io_restore_trace_psw_addr)
616 l %r1,0(%r1)
617 lpsw 0(%r1)
618io_restore_trace:
619 TRACE_IRQS_CHECK
620 LOCKDEP_SYS_EXIT
621#endif
622io_leave:
623 RESTORE_ALL __LC_RETURN_PSW,0 603 RESTORE_ALL __LC_RETURN_PSW,0
624io_done: 604io_done:
625 605
626#ifdef CONFIG_TRACE_IRQFLAGS
627io_restore_trace_psw_addr:
628 .long io_restore_trace_psw
629
630 .section .data,"aw",@progbits
631 .align 8
632 .globl io_restore_trace_psw
633io_restore_trace_psw:
634 .long 0, io_restore_trace + 0x80000000
635 .previous
636#endif
637
638# 606#
639# switch to kernel stack, then check the TIF bits 607# There is work todo, find out in which context we have been interrupted:
608# 1) if we return to user space we can do all _TIF_WORK_INT work
609# 2) if we return to kernel code and preemptive scheduling is enabled check
610# the preemption counter and if it is zero call preempt_schedule_irq
611# Before any work can be done, a switch to the kernel stack is required.
640# 612#
641io_work: 613io_work:
642 tm SP_PSW+1(%r15),0x01 # returning to user ? 614 tm SP_PSW+1(%r15),0x01 # returning to user ?
643#ifndef CONFIG_PREEMPT 615 bo BASED(io_work_user) # yes -> do resched & signal
644 bno BASED(io_restore) # no-> skip resched & signal 616#ifdef CONFIG_PREEMPT
645#else
646 bnz BASED(io_work_user) # no -> check for preemptive scheduling
647 # check for preemptive scheduling 617 # check for preemptive scheduling
648 icm %r0,15,__TI_precount(%r9) 618 icm %r0,15,__TI_precount(%r9)
649 bnz BASED(io_restore) # preemption disabled 619 bnz BASED(io_restore) # preemption disabled
620 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
621 bno BASED(io_restore)
622 # switch to kernel stack
650 l %r1,SP_R15(%r15) 623 l %r1,SP_R15(%r15)
651 s %r1,BASED(.Lc_spsize) 624 s %r1,BASED(.Lc_spsize)
652 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 625 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
653 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 626 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
654 lr %r15,%r1 627 lr %r15,%r1
655io_resume_loop: 628 # TRACE_IRQS_ON already done at io_return, call
656 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 629 # TRACE_IRQS_OFF to keep things symmetrical
657 bno BASED(io_restore) 630 TRACE_IRQS_OFF
658 l %r1,BASED(.Lpreempt_schedule_irq) 631 l %r1,BASED(.Lpreempt_schedule_irq)
659 la %r14,BASED(io_resume_loop) 632 basr %r14,%r1 # call preempt_schedule_irq
660 br %r1 # call schedule 633 b BASED(io_return)
634#else
635 b BASED(io_restore)
661#endif 636#endif
662 637
638#
639# Need to do work before returning to userspace, switch to kernel stack
640#
663io_work_user: 641io_work_user:
664 l %r1,__LC_KERNEL_STACK 642 l %r1,__LC_KERNEL_STACK
665 s %r1,BASED(.Lc_spsize) 643 s %r1,BASED(.Lc_spsize)
666 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 644 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
667 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain 645 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
668 lr %r15,%r1 646 lr %r15,%r1
647
669# 648#
670# One of the work bits is on. Find out which one. 649# One of the work bits is on. Find out which one.
671# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED 650# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
672# and _TIF_MCCK_PENDING 651# and _TIF_MCCK_PENDING
673# 652#
674io_work_loop: 653io_work_tif:
675 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING 654 tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
676 bo BASED(io_mcck_pending) 655 bo BASED(io_mcck_pending)
677 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED 656 tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
678 bo BASED(io_reschedule) 657 bo BASED(io_reschedule)
679 tm __TI_flags+3(%r9),_TIF_SIGPENDING 658 tm __TI_flags+3(%r9),_TIF_SIGPENDING
680 bnz BASED(io_sigpending) 659 bo BASED(io_sigpending)
681 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME 660 tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
682 bnz BASED(io_notify_resume) 661 bo BASED(io_notify_resume)
683 b BASED(io_restore) 662 b BASED(io_return) # beware of critical section cleanup
684io_work_done:
685 663
686# 664#
687# _TIF_MCCK_PENDING is set, call handler 665# _TIF_MCCK_PENDING is set, call handler
688# 666#
689io_mcck_pending: 667io_mcck_pending:
668 # TRACE_IRQS_ON already done at io_return
690 l %r1,BASED(.Ls390_handle_mcck) 669 l %r1,BASED(.Ls390_handle_mcck)
691 basr %r14,%r1 # TIF bit will be cleared by handler 670 basr %r14,%r1 # TIF bit will be cleared by handler
692 b BASED(io_work_loop) 671 TRACE_IRQS_OFF
672 b BASED(io_return)
693 673
694# 674#
695# _TIF_NEED_RESCHED is set, call schedule 675# _TIF_NEED_RESCHED is set, call schedule
696# 676#
697io_reschedule: 677io_reschedule:
698 TRACE_IRQS_ON 678 # TRACE_IRQS_ON already done at io_return
699 l %r1,BASED(.Lschedule) 679 l %r1,BASED(.Lschedule)
700 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 680 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
701 basr %r14,%r1 # call scheduler 681 basr %r14,%r1 # call scheduler
702 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 682 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
703 TRACE_IRQS_OFF 683 TRACE_IRQS_OFF
704 tm __TI_flags+3(%r9),_TIF_WORK_INT 684 b BASED(io_return)
705 bz BASED(io_restore) # there is no work to do
706 b BASED(io_work_loop)
707 685
708# 686#
709# _TIF_SIGPENDING is set, call do_signal 687# _TIF_SIGPENDING is set, call do_signal
710# 688#
711io_sigpending: 689io_sigpending:
712 TRACE_IRQS_ON 690 # TRACE_IRQS_ON already done at io_return
713 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 691 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
714 la %r2,SP_PTREGS(%r15) # load pt_regs 692 la %r2,SP_PTREGS(%r15) # load pt_regs
715 l %r1,BASED(.Ldo_signal) 693 l %r1,BASED(.Ldo_signal)
716 basr %r14,%r1 # call do_signal 694 basr %r14,%r1 # call do_signal
717 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 695 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
718 TRACE_IRQS_OFF 696 TRACE_IRQS_OFF
719 b BASED(io_work_loop) 697 b BASED(io_return)
720 698
721# 699#
722# _TIF_SIGPENDING is set, call do_signal 700# _TIF_SIGPENDING is set, call do_signal
723# 701#
724io_notify_resume: 702io_notify_resume:
725 TRACE_IRQS_ON 703 # TRACE_IRQS_ON already done at io_return
726 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 704 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
727 la %r2,SP_PTREGS(%r15) # load pt_regs 705 la %r2,SP_PTREGS(%r15) # load pt_regs
728 l %r1,BASED(.Ldo_notify_resume) 706 l %r1,BASED(.Ldo_notify_resume)
729 basr %r14,%r1 # call do_signal 707 basr %r14,%r1 # call do_signal
730 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 708 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
731 TRACE_IRQS_OFF 709 TRACE_IRQS_OFF
732 b BASED(io_work_loop) 710 b BASED(io_return)
733 711
734/* 712/*
735 * External interrupt handler routine 713 * External interrupt handler routine
@@ -764,15 +742,14 @@ __critical_end:
764 742
765 .globl mcck_int_handler 743 .globl mcck_int_handler
766mcck_int_handler: 744mcck_int_handler:
767 stck __LC_INT_CLOCK 745 stck __LC_MCCK_CLOCK
768 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 746 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
769 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 747 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
770 SAVE_ALL_BASE __LC_SAVE_AREA+32 748 SAVE_ALL_BASE __LC_SAVE_AREA+32
771 la %r12,__LC_MCK_OLD_PSW 749 la %r12,__LC_MCK_OLD_PSW
772 tm __LC_MCCK_CODE,0x80 # system damage? 750 tm __LC_MCCK_CODE,0x80 # system damage?
773 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid 751 bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
774 mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER 752 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
775 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
776 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 753 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
777 bo BASED(1f) 754 bo BASED(1f)
778 la %r14,__LC_SYNC_ENTER_TIMER 755 la %r14,__LC_SYNC_ENTER_TIMER
@@ -786,7 +763,7 @@ mcck_int_handler:
786 bl BASED(0f) 763 bl BASED(0f)
787 la %r14,__LC_LAST_UPDATE_TIMER 764 la %r14,__LC_LAST_UPDATE_TIMER
7880: spt 0(%r14) 7650: spt 0(%r14)
789 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 766 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7901: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7671: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
791 bno BASED(mcck_int_main) # no -> skip cleanup critical 768 bno BASED(mcck_int_main) # no -> skip cleanup critical
792 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 769 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -808,9 +785,9 @@ mcck_int_main:
808 bno BASED(mcck_no_vtime) # no -> skip cleanup critical 785 bno BASED(mcck_no_vtime) # no -> skip cleanup critical
809 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 786 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
810 bz BASED(mcck_no_vtime) 787 bz BASED(mcck_no_vtime)
811 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 788 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
812 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 789 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
813 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 790 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
814mcck_no_vtime: 791mcck_no_vtime:
815 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct 792 l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
816 la %r2,SP_PTREGS(%r15) # load pt_regs 793 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -833,7 +810,6 @@ mcck_no_vtime:
833mcck_return: 810mcck_return:
834 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW 811 mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
835 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 812 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
836 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
837 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 813 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
838 bno BASED(0f) 814 bno BASED(0f)
839 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 815 lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
@@ -917,18 +893,14 @@ stack_overflow:
917 893
918cleanup_table_system_call: 894cleanup_table_system_call:
919 .long system_call + 0x80000000, sysc_do_svc + 0x80000000 895 .long system_call + 0x80000000, sysc_do_svc + 0x80000000
920cleanup_table_sysc_return: 896cleanup_table_sysc_tif:
921 .long sysc_return + 0x80000000, sysc_leave + 0x80000000 897 .long sysc_tif + 0x80000000, sysc_restore + 0x80000000
922cleanup_table_sysc_leave: 898cleanup_table_sysc_restore:
923 .long sysc_leave + 0x80000000, sysc_done + 0x80000000 899 .long sysc_restore + 0x80000000, sysc_done + 0x80000000
924cleanup_table_sysc_work_loop: 900cleanup_table_io_tif:
925 .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000 901 .long io_tif + 0x80000000, io_restore + 0x80000000
926cleanup_table_io_return: 902cleanup_table_io_restore:
927 .long io_return + 0x80000000, io_leave + 0x80000000 903 .long io_restore + 0x80000000, io_done + 0x80000000
928cleanup_table_io_leave:
929 .long io_leave + 0x80000000, io_done + 0x80000000
930cleanup_table_io_work_loop:
931 .long io_work_loop + 0x80000000, io_work_done + 0x80000000
932 904
933cleanup_critical: 905cleanup_critical:
934 clc 4(4,%r12),BASED(cleanup_table_system_call) 906 clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -936,49 +908,40 @@ cleanup_critical:
936 clc 4(4,%r12),BASED(cleanup_table_system_call+4) 908 clc 4(4,%r12),BASED(cleanup_table_system_call+4)
937 bl BASED(cleanup_system_call) 909 bl BASED(cleanup_system_call)
9380: 9100:
939 clc 4(4,%r12),BASED(cleanup_table_sysc_return) 911 clc 4(4,%r12),BASED(cleanup_table_sysc_tif)
940 bl BASED(0f)
941 clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
942 bl BASED(cleanup_sysc_return)
9430:
944 clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
945 bl BASED(0f)
946 clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
947 bl BASED(cleanup_sysc_leave)
9480:
949 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
950 bl BASED(0f) 912 bl BASED(0f)
951 clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4) 913 clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4)
952 bl BASED(cleanup_sysc_return) 914 bl BASED(cleanup_sysc_tif)
9530: 9150:
954 clc 4(4,%r12),BASED(cleanup_table_io_return) 916 clc 4(4,%r12),BASED(cleanup_table_sysc_restore)
955 bl BASED(0f) 917 bl BASED(0f)
956 clc 4(4,%r12),BASED(cleanup_table_io_return+4) 918 clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4)
957 bl BASED(cleanup_io_return) 919 bl BASED(cleanup_sysc_restore)
9580: 9200:
959 clc 4(4,%r12),BASED(cleanup_table_io_leave) 921 clc 4(4,%r12),BASED(cleanup_table_io_tif)
960 bl BASED(0f) 922 bl BASED(0f)
961 clc 4(4,%r12),BASED(cleanup_table_io_leave+4) 923 clc 4(4,%r12),BASED(cleanup_table_io_tif+4)
962 bl BASED(cleanup_io_leave) 924 bl BASED(cleanup_io_tif)
9630: 9250:
964 clc 4(4,%r12),BASED(cleanup_table_io_work_loop) 926 clc 4(4,%r12),BASED(cleanup_table_io_restore)
965 bl BASED(0f) 927 bl BASED(0f)
966 clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4) 928 clc 4(4,%r12),BASED(cleanup_table_io_restore+4)
967 bl BASED(cleanup_io_work_loop) 929 bl BASED(cleanup_io_restore)
9680: 9300:
969 br %r14 931 br %r14
970 932
971cleanup_system_call: 933cleanup_system_call:
972 mvc __LC_RETURN_PSW(8),0(%r12) 934 mvc __LC_RETURN_PSW(8),0(%r12)
973 c %r12,BASED(.Lmck_old_psw)
974 be BASED(0f)
975 la %r12,__LC_SAVE_AREA+16
976 b BASED(1f)
9770: la %r12,__LC_SAVE_AREA+32
9781:
979 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) 935 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
980 bh BASED(0f) 936 bh BASED(0f)
937 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
938 c %r12,BASED(.Lmck_old_psw)
939 be BASED(0f)
981 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 940 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9410: c %r12,BASED(.Lmck_old_psw)
942 la %r12,__LC_SAVE_AREA+32
943 be BASED(0f)
944 la %r12,__LC_SAVE_AREA+16
9820: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) 9450: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
983 bhe BASED(cleanup_vtime) 946 bhe BASED(cleanup_vtime)
984 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) 947 clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
@@ -1011,61 +974,54 @@ cleanup_system_call_insn:
1011 .long sysc_stime + 0x80000000 974 .long sysc_stime + 0x80000000
1012 .long sysc_update + 0x80000000 975 .long sysc_update + 0x80000000
1013 976
1014cleanup_sysc_return: 977cleanup_sysc_tif:
1015 mvc __LC_RETURN_PSW(4),0(%r12) 978 mvc __LC_RETURN_PSW(4),0(%r12)
1016 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return) 979 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif)
1017 la %r12,__LC_RETURN_PSW 980 la %r12,__LC_RETURN_PSW
1018 br %r14 981 br %r14
1019 982
1020cleanup_sysc_leave: 983cleanup_sysc_restore:
1021 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn) 984 clc 4(4,%r12),BASED(cleanup_sysc_restore_insn)
1022 be BASED(2f) 985 be BASED(2f)
986 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
987 c %r12,BASED(.Lmck_old_psw)
988 be BASED(0f)
1023 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 989 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1024 clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4) 9900: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4)
1025 be BASED(2f) 991 be BASED(2f)
1026 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 992 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1027 c %r12,BASED(.Lmck_old_psw) 993 c %r12,BASED(.Lmck_old_psw)
1028 bne BASED(0f) 994 la %r12,__LC_SAVE_AREA+32
1029 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 995 be BASED(1f)
1030 b BASED(1f) 996 la %r12,__LC_SAVE_AREA+16
10310: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15) 9971: mvc 0(16,%r12),SP_R12(%r15)
10321: lm %r0,%r11,SP_R0(%r15) 998 lm %r0,%r11,SP_R0(%r15)
1033 l %r15,SP_R15(%r15) 999 l %r15,SP_R15(%r15)
10342: la %r12,__LC_RETURN_PSW 10002: la %r12,__LC_RETURN_PSW
1035 br %r14 1001 br %r14
1036cleanup_sysc_leave_insn: 1002cleanup_sysc_restore_insn:
1037 .long sysc_done - 4 + 0x80000000 1003 .long sysc_done - 4 + 0x80000000
1038 .long sysc_done - 8 + 0x80000000 1004 .long sysc_done - 8 + 0x80000000
1039 1005
1040cleanup_io_return: 1006cleanup_io_tif:
1041 mvc __LC_RETURN_PSW(4),0(%r12) 1007 mvc __LC_RETURN_PSW(4),0(%r12)
1042 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_return) 1008 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif)
1043 la %r12,__LC_RETURN_PSW 1009 la %r12,__LC_RETURN_PSW
1044 br %r14 1010 br %r14
1045 1011
1046cleanup_io_work_loop: 1012cleanup_io_restore:
1047 mvc __LC_RETURN_PSW(4),0(%r12) 1013 clc 4(4,%r12),BASED(cleanup_io_restore_insn)
1048 mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop) 1014 be BASED(1f)
1049 la %r12,__LC_RETURN_PSW 1015 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1050 br %r14 1016 clc 4(4,%r12),BASED(cleanup_io_restore_insn+4)
1051 1017 be BASED(1f)
1052cleanup_io_leave:
1053 clc 4(4,%r12),BASED(cleanup_io_leave_insn)
1054 be BASED(2f)
1055 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1056 clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
1057 be BASED(2f)
1058 mvc __LC_RETURN_PSW(8),SP_PSW(%r15) 1018 mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
1059 c %r12,BASED(.Lmck_old_psw)
1060 bne BASED(0f)
1061 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) 1019 mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
1062 b BASED(1f) 1020 lm %r0,%r11,SP_R0(%r15)
10630: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
10641: lm %r0,%r11,SP_R0(%r15)
1065 l %r15,SP_R15(%r15) 1021 l %r15,SP_R15(%r15)
10662: la %r12,__LC_RETURN_PSW 10221: la %r12,__LC_RETURN_PSW
1067 br %r14 1023 br %r14
1068cleanup_io_leave_insn: 1024cleanup_io_restore_insn:
1069 .long io_done - 4 + 0x80000000 1025 .long io_done - 4 + 0x80000000
1070 .long io_done - 8 + 0x80000000 1026 .long io_done - 8 + 0x80000000
1071 1027
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 52106d53271c..178d92536d90 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/entry64.S 2 * arch/s390/kernel/entry64.S
3 * S390 low-level entry points. 3 * S390 low-level entry points.
4 * 4 *
5 * Copyright (C) IBM Corp. 1999,2006 5 * Copyright (C) IBM Corp. 1999,2010
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), 6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com), 7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -59,30 +59,45 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
59 59
60#define BASED(name) name-system_call(%r13) 60#define BASED(name) name-system_call(%r13)
61 61
62 .macro HANDLE_SIE_INTERCEPT
63#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
64 lg %r3,__LC_SIE_HOOK
65 ltgr %r3,%r3
66 jz 0f
67 basr %r14,%r3
68 0:
69#endif
70 .endm
71
62#ifdef CONFIG_TRACE_IRQFLAGS 72#ifdef CONFIG_TRACE_IRQFLAGS
63 .macro TRACE_IRQS_ON 73 .macro TRACE_IRQS_ON
64 basr %r2,%r0 74 basr %r2,%r0
65 brasl %r14,trace_hardirqs_on_caller 75 brasl %r14,trace_hardirqs_on_caller
66 .endm 76 .endm
67 77
68 .macro TRACE_IRQS_OFF 78 .macro TRACE_IRQS_OFF
69 basr %r2,%r0 79 basr %r2,%r0
70 brasl %r14,trace_hardirqs_off_caller 80 brasl %r14,trace_hardirqs_off_caller
71 .endm 81 .endm
72 82
73 .macro TRACE_IRQS_CHECK 83 .macro TRACE_IRQS_CHECK_ON
74 basr %r2,%r0
75 tm SP_PSW(%r15),0x03 # irqs enabled? 84 tm SP_PSW(%r15),0x03 # irqs enabled?
76 jz 0f 85 jz 0f
77 brasl %r14,trace_hardirqs_on_caller 86 TRACE_IRQS_ON
78 j 1f 870:
790: brasl %r14,trace_hardirqs_off_caller 88 .endm
801: 89
90 .macro TRACE_IRQS_CHECK_OFF
91 tm SP_PSW(%r15),0x03 # irqs enabled?
92 jz 0f
93 TRACE_IRQS_OFF
940:
81 .endm 95 .endm
82#else 96#else
83#define TRACE_IRQS_ON 97#define TRACE_IRQS_ON
84#define TRACE_IRQS_OFF 98#define TRACE_IRQS_OFF
85#define TRACE_IRQS_CHECK 99#define TRACE_IRQS_CHECK_ON
100#define TRACE_IRQS_CHECK_OFF
86#endif 101#endif
87 102
88#ifdef CONFIG_LOCKDEP 103#ifdef CONFIG_LOCKDEP
@@ -111,31 +126,35 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
111 * R15 - kernel stack pointer 126 * R15 - kernel stack pointer
112 */ 127 */
113 128
114 .macro SAVE_ALL_BASE savearea
115 stmg %r12,%r15,\savearea
116 larl %r13,system_call
117 .endm
118
119 .macro SAVE_ALL_SVC psworg,savearea 129 .macro SAVE_ALL_SVC psworg,savearea
120 la %r12,\psworg 130 stmg %r11,%r15,\savearea
121 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp 131 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
132 aghi %r15,-SP_SIZE # make room for registers & psw
133 lg %r11,__LC_LAST_BREAK
122 .endm 134 .endm
123 135
124 .macro SAVE_ALL_SYNC psworg,savearea 136 .macro SAVE_ALL_PGM psworg,savearea
125 la %r12,\psworg 137 stmg %r11,%r15,\savearea
126 tm \psworg+1,0x01 # test problem state bit 138 tm \psworg+1,0x01 # test problem state bit
127 jz 2f # skip stack setup save
128 lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
129#ifdef CONFIG_CHECK_STACK 139#ifdef CONFIG_CHECK_STACK
130 j 3f 140 jnz 1f
1312: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 141 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
132 jz stack_overflow 142 jnz 2f
1333: 143 la %r12,\psworg
144 j stack_overflow
145#else
146 jz 2f
134#endif 147#endif
1352: 1481: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
1492: aghi %r15,-SP_SIZE # make room for registers & psw
150 larl %r13,system_call
151 lg %r11,__LC_LAST_BREAK
136 .endm 152 .endm
137 153
138 .macro SAVE_ALL_ASYNC psworg,savearea 154 .macro SAVE_ALL_ASYNC psworg,savearea
155 stmg %r11,%r15,\savearea
156 larl %r13,system_call
157 lg %r11,__LC_LAST_BREAK
139 la %r12,\psworg 158 la %r12,\psworg
140 tm \psworg+1,0x01 # test problem state bit 159 tm \psworg+1,0x01 # test problem state bit
141 jnz 1f # from user -> load kernel stack 160 jnz 1f # from user -> load kernel stack
@@ -149,27 +168,23 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
1490: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? 1680: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ?
150 slgr %r14,%r15 169 slgr %r14,%r15
151 srag %r14,%r14,STACK_SHIFT 170 srag %r14,%r14,STACK_SHIFT
152 jz 2f
1531: lg %r15,__LC_ASYNC_STACK # load async stack
154#ifdef CONFIG_CHECK_STACK 171#ifdef CONFIG_CHECK_STACK
155 j 3f 172 jnz 1f
1562: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD 173 tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
157 jz stack_overflow 174 jnz 2f
1583: 175 j stack_overflow
176#else
177 jz 2f
159#endif 178#endif
1602: 1791: lg %r15,__LC_ASYNC_STACK # load async stack
1802: aghi %r15,-SP_SIZE # make room for registers & psw
161 .endm 181 .endm
162 182
163 .macro CREATE_STACK_FRAME psworg,savearea 183 .macro CREATE_STACK_FRAME savearea
164 aghi %r15,-SP_SIZE # make room for registers & psw 184 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
165 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
166 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 185 stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
167 icm %r12,3,__LC_SVC_ILC 186 mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack
168 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 187 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
169 st %r12,SP_SVCNR(%r15)
170 mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
171 la %r12,0
172 stg %r12,__SF_BACKCHAIN(%r15)
173 .endm 188 .endm
174 189
175 .macro RESTORE_ALL psworg,sync 190 .macro RESTORE_ALL psworg,sync
@@ -185,6 +200,13 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
185 lpswe \psworg # back to caller 200 lpswe \psworg # back to caller
186 .endm 201 .endm
187 202
203 .macro LAST_BREAK
204 srag %r10,%r11,23
205 jz 0f
206 stg %r11,__TI_last_break(%r12)
2070:
208 .endm
209
188/* 210/*
189 * Scheduler resume function, called by switch_to 211 * Scheduler resume function, called by switch_to
190 * gpr2 = (task_struct *) prev 212 * gpr2 = (task_struct *) prev
@@ -230,143 +252,129 @@ __critical_start:
230system_call: 252system_call:
231 stpt __LC_SYNC_ENTER_TIMER 253 stpt __LC_SYNC_ENTER_TIMER
232sysc_saveall: 254sysc_saveall:
233 SAVE_ALL_BASE __LC_SAVE_AREA
234 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 255 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
235 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 256 CREATE_STACK_FRAME __LC_SAVE_AREA
236 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 257 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
258 mvc SP_ILC(4,%r15),__LC_SVC_ILC
259 stg %r7,SP_ARGS(%r15)
260 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
237sysc_vtime: 261sysc_vtime:
238 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 262 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
239sysc_stime: 263sysc_stime:
240 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 264 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
241sysc_update: 265sysc_update:
242 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 266 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
267 LAST_BREAK
243sysc_do_svc: 268sysc_do_svc:
244 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 269 llgh %r7,SP_SVCNR(%r15)
245 ltgr %r7,%r7 # test for svc 0 270 slag %r7,%r7,2 # shift and test for svc 0
246 jnz sysc_nr_ok 271 jnz sysc_nr_ok
247 # svc 0: system call number in %r1 272 # svc 0: system call number in %r1
248 cl %r1,BASED(.Lnr_syscalls) 273 llgfr %r1,%r1 # clear high word in r1
274 cghi %r1,NR_syscalls
249 jnl sysc_nr_ok 275 jnl sysc_nr_ok
250 lgfr %r7,%r1 # clear high word in r1 276 sth %r1,SP_SVCNR(%r15)
277 slag %r7,%r1,2 # shift and test for svc 0
251sysc_nr_ok: 278sysc_nr_ok:
252 mvc SP_ARGS(8,%r15),SP_R7(%r15)
253sysc_do_restart:
254 sth %r7,SP_SVCNR(%r15)
255 sllg %r7,%r7,2 # svc number * 4
256 larl %r10,sys_call_table 279 larl %r10,sys_call_table
257#ifdef CONFIG_COMPAT 280#ifdef CONFIG_COMPAT
258 tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ? 281 tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ?
259 jno sysc_noemu 282 jno sysc_noemu
260 larl %r10,sys_call_table_emu # use 31 bit emulation system calls 283 larl %r10,sys_call_table_emu # use 31 bit emulation system calls
261sysc_noemu: 284sysc_noemu:
262#endif 285#endif
263 tm __TI_flags+6(%r9),_TIF_SYSCALL 286 tm __TI_flags+6(%r12),_TIF_SYSCALL
264 lgf %r8,0(%r7,%r10) # load address of system call routine 287 lgf %r8,0(%r7,%r10) # load address of system call routine
265 jnz sysc_tracesys 288 jnz sysc_tracesys
266 basr %r14,%r8 # call sys_xxxx 289 basr %r14,%r8 # call sys_xxxx
267 stg %r2,SP_R2(%r15) # store return value (change R2 on stack) 290 stg %r2,SP_R2(%r15) # store return value (change R2 on stack)
268 291
269sysc_return: 292sysc_return:
270 tm __TI_flags+7(%r9),_TIF_WORK_SVC 293 LOCKDEP_SYS_EXIT
294sysc_tif:
295 tm __TI_flags+7(%r12),_TIF_WORK_SVC
271 jnz sysc_work # there is work to do (signals etc.) 296 jnz sysc_work # there is work to do (signals etc.)
272sysc_restore: 297sysc_restore:
273#ifdef CONFIG_TRACE_IRQFLAGS
274 larl %r1,sysc_restore_trace_psw
275 lpswe 0(%r1)
276sysc_restore_trace:
277 TRACE_IRQS_CHECK
278 LOCKDEP_SYS_EXIT
279#endif
280sysc_leave:
281 RESTORE_ALL __LC_RETURN_PSW,1 298 RESTORE_ALL __LC_RETURN_PSW,1
282sysc_done: 299sysc_done:
283 300
284#ifdef CONFIG_TRACE_IRQFLAGS
285 .section .data,"aw",@progbits
286 .align 8
287 .globl sysc_restore_trace_psw
288sysc_restore_trace_psw:
289 .quad 0, sysc_restore_trace
290 .previous
291#endif
292
293#
294# recheck if there is more work to do
295# 301#
296sysc_work_loop: 302# There is work to do, but first we need to check if we return to userspace.
297 tm __TI_flags+7(%r9),_TIF_WORK_SVC
298 jz sysc_restore # there is no work to do
299#
300# One of the work bits is on. Find out which one.
301# 303#
302sysc_work: 304sysc_work:
303 tm SP_PSW+1(%r15),0x01 # returning to user ? 305 tm SP_PSW+1(%r15),0x01 # returning to user ?
304 jno sysc_restore 306 jno sysc_restore
305 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 307
308#
309# One of the work bits is on. Find out which one.
310#
311sysc_work_tif:
312 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
306 jo sysc_mcck_pending 313 jo sysc_mcck_pending
307 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 314 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
308 jo sysc_reschedule 315 jo sysc_reschedule
309 tm __TI_flags+7(%r9),_TIF_SIGPENDING 316 tm __TI_flags+7(%r12),_TIF_SIGPENDING
310 jnz sysc_sigpending 317 jo sysc_sigpending
311 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 318 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
312 jnz sysc_notify_resume 319 jo sysc_notify_resume
313 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 320 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
314 jo sysc_restart 321 jo sysc_restart
315 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 322 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
316 jo sysc_singlestep 323 jo sysc_singlestep
317 j sysc_restore 324 j sysc_return # beware of critical section cleanup
318sysc_work_done:
319 325
320# 326#
321# _TIF_NEED_RESCHED is set, call schedule 327# _TIF_NEED_RESCHED is set, call schedule
322# 328#
323sysc_reschedule: 329sysc_reschedule:
324 larl %r14,sysc_work_loop 330 larl %r14,sysc_return
325 jg schedule # return point is sysc_return 331 jg schedule # return point is sysc_return
326 332
327# 333#
328# _TIF_MCCK_PENDING is set, call handler 334# _TIF_MCCK_PENDING is set, call handler
329# 335#
330sysc_mcck_pending: 336sysc_mcck_pending:
331 larl %r14,sysc_work_loop 337 larl %r14,sysc_return
332 jg s390_handle_mcck # TIF bit will be cleared by handler 338 jg s390_handle_mcck # TIF bit will be cleared by handler
333 339
334# 340#
335# _TIF_SIGPENDING is set, call do_signal 341# _TIF_SIGPENDING is set, call do_signal
336# 342#
337sysc_sigpending: 343sysc_sigpending:
338 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 344 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
339 la %r2,SP_PTREGS(%r15) # load pt_regs 345 la %r2,SP_PTREGS(%r15) # load pt_regs
340 brasl %r14,do_signal # call do_signal 346 brasl %r14,do_signal # call do_signal
341 tm __TI_flags+7(%r9),_TIF_RESTART_SVC 347 tm __TI_flags+7(%r12),_TIF_RESTART_SVC
342 jo sysc_restart 348 jo sysc_restart
343 tm __TI_flags+7(%r9),_TIF_SINGLE_STEP 349 tm __TI_flags+7(%r12),_TIF_SINGLE_STEP
344 jo sysc_singlestep 350 jo sysc_singlestep
345 j sysc_work_loop 351 j sysc_return
346 352
347# 353#
348# _TIF_NOTIFY_RESUME is set, call do_notify_resume 354# _TIF_NOTIFY_RESUME is set, call do_notify_resume
349# 355#
350sysc_notify_resume: 356sysc_notify_resume:
351 la %r2,SP_PTREGS(%r15) # load pt_regs 357 la %r2,SP_PTREGS(%r15) # load pt_regs
352 larl %r14,sysc_work_loop 358 larl %r14,sysc_return
353 jg do_notify_resume # call do_notify_resume 359 jg do_notify_resume # call do_notify_resume
354 360
355# 361#
356# _TIF_RESTART_SVC is set, set up registers and restart svc 362# _TIF_RESTART_SVC is set, set up registers and restart svc
357# 363#
358sysc_restart: 364sysc_restart:
359 ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC 365 ni __TI_flags+7(%r12),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
360 lg %r7,SP_R2(%r15) # load new svc number 366 lg %r7,SP_R2(%r15) # load new svc number
361 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument 367 mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
362 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 368 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
363 j sysc_do_restart # restart svc 369 sth %r7,SP_SVCNR(%r15)
370 slag %r7,%r7,2
371 j sysc_nr_ok # restart svc
364 372
365# 373#
366# _TIF_SINGLE_STEP is set, call do_single_step 374# _TIF_SINGLE_STEP is set, call do_single_step
367# 375#
368sysc_singlestep: 376sysc_singlestep:
369 ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 377 ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
370 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 378 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
371 la %r2,SP_PTREGS(%r15) # address of register-save area 379 la %r2,SP_PTREGS(%r15) # address of register-save area
372 larl %r14,sysc_return # load adr. of system return 380 larl %r14,sysc_return # load adr. of system return
@@ -379,8 +387,8 @@ sysc_singlestep:
379sysc_tracesys: 387sysc_tracesys:
380 la %r2,SP_PTREGS(%r15) # load pt_regs 388 la %r2,SP_PTREGS(%r15) # load pt_regs
381 la %r3,0 389 la %r3,0
382 srl %r7,2 390 llgh %r0,SP_SVCNR(%r15)
383 stg %r7,SP_R2(%r15) 391 stg %r0,SP_R2(%r15)
384 brasl %r14,do_syscall_trace_enter 392 brasl %r14,do_syscall_trace_enter
385 lghi %r0,NR_syscalls 393 lghi %r0,NR_syscalls
386 clgr %r0,%r2 394 clgr %r0,%r2
@@ -393,7 +401,7 @@ sysc_tracego:
393 basr %r14,%r8 # call sys_xxx 401 basr %r14,%r8 # call sys_xxx
394 stg %r2,SP_R2(%r15) # store return value 402 stg %r2,SP_R2(%r15) # store return value
395sysc_tracenogo: 403sysc_tracenogo:
396 tm __TI_flags+6(%r9),_TIF_SYSCALL 404 tm __TI_flags+6(%r12),_TIF_SYSCALL
397 jz sysc_return 405 jz sysc_return
398 la %r2,SP_PTREGS(%r15) # load pt_regs 406 la %r2,SP_PTREGS(%r15) # load pt_regs
399 larl %r14,sysc_return # return point is sysc_return 407 larl %r14,sysc_return # return point is sysc_return
@@ -405,7 +413,7 @@ sysc_tracenogo:
405 .globl ret_from_fork 413 .globl ret_from_fork
406ret_from_fork: 414ret_from_fork:
407 lg %r13,__LC_SVC_NEW_PSW+8 415 lg %r13,__LC_SVC_NEW_PSW+8
408 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 416 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
409 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 417 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
410 jo 0f 418 jo 0f
411 stg %r15,SP_R15(%r15) # store stack pointer for new kthread 419 stg %r15,SP_R15(%r15) # store stack pointer for new kthread
@@ -435,12 +443,14 @@ kernel_execve:
435 br %r14 443 br %r14
436 # execve succeeded. 444 # execve succeeded.
4370: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts 4450: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts
446# TRACE_IRQS_OFF
438 lg %r15,__LC_KERNEL_STACK # load ksp 447 lg %r15,__LC_KERNEL_STACK # load ksp
439 aghi %r15,-SP_SIZE # make room for registers & psw 448 aghi %r15,-SP_SIZE # make room for registers & psw
440 lg %r13,__LC_SVC_NEW_PSW+8 449 lg %r13,__LC_SVC_NEW_PSW+8
441 lg %r9,__LC_THREAD_INFO
442 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs 450 mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs
451 lg %r12,__LC_THREAD_INFO
443 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 452 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
453# TRACE_IRQS_ON
444 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 454 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
445 brasl %r14,execve_tail 455 brasl %r14,execve_tail
446 j sysc_return 456 j sysc_return
@@ -465,20 +475,23 @@ pgm_check_handler:
465 * for LPSW?). 475 * for LPSW?).
466 */ 476 */
467 stpt __LC_SYNC_ENTER_TIMER 477 stpt __LC_SYNC_ENTER_TIMER
468 SAVE_ALL_BASE __LC_SAVE_AREA
469 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception 478 tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
470 jnz pgm_per # got per exception -> special case 479 jnz pgm_per # got per exception -> special case
471 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 480 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
472 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 481 CREATE_STACK_FRAME __LC_SAVE_AREA
482 xc SP_ILC(4,%r15),SP_ILC(%r15)
483 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
484 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
473 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 485 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
474 jz pgm_no_vtime 486 jz pgm_no_vtime
475 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 487 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
476 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 488 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
477 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 489 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
490 LAST_BREAK
478pgm_no_vtime: 491pgm_no_vtime:
479 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 492 HANDLE_SIE_INTERCEPT
480 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 493 TRACE_IRQS_CHECK_OFF
481 TRACE_IRQS_OFF 494 stg %r11,SP_ARGS(%r15)
482 lgf %r3,__LC_PGM_ILC # load program interruption code 495 lgf %r3,__LC_PGM_ILC # load program interruption code
483 lghi %r8,0x7f 496 lghi %r8,0x7f
484 ngr %r8,%r3 497 ngr %r8,%r3
@@ -487,8 +500,10 @@ pgm_do_call:
487 larl %r1,pgm_check_table 500 larl %r1,pgm_check_table
488 lg %r1,0(%r8,%r1) # load address of handler routine 501 lg %r1,0(%r8,%r1) # load address of handler routine
489 la %r2,SP_PTREGS(%r15) # address of register-save area 502 la %r2,SP_PTREGS(%r15) # address of register-save area
490 larl %r14,sysc_return 503 basr %r14,%r1 # branch to interrupt-handler
491 br %r1 # branch to interrupt-handler 504pgm_exit:
505 TRACE_IRQS_CHECK_ON
506 j sysc_return
492 507
493# 508#
494# handle per exception 509# handle per exception
@@ -500,55 +515,60 @@ pgm_per:
500 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW 515 clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW
501 je pgm_svcper 516 je pgm_svcper
502# no interesting special case, ignore PER event 517# no interesting special case, ignore PER event
503 lmg %r12,%r15,__LC_SAVE_AREA
504 lpswe __LC_PGM_OLD_PSW 518 lpswe __LC_PGM_OLD_PSW
505 519
506# 520#
507# Normal per exception 521# Normal per exception
508# 522#
509pgm_per_std: 523pgm_per_std:
510 SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA 524 SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA
511 CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA 525 CREATE_STACK_FRAME __LC_SAVE_AREA
526 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
527 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
512 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 528 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
513 jz pgm_no_vtime2 529 jz pgm_no_vtime2
514 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 530 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
515 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 531 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
516 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 532 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
533 LAST_BREAK
517pgm_no_vtime2: 534pgm_no_vtime2:
518 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 535 HANDLE_SIE_INTERCEPT
519 TRACE_IRQS_OFF 536 TRACE_IRQS_CHECK_OFF
520 lg %r1,__TI_task(%r9) 537 lg %r1,__TI_task(%r12)
521 tm SP_PSW+1(%r15),0x01 # kernel per event ? 538 tm SP_PSW+1(%r15),0x01 # kernel per event ?
522 jz kernel_per 539 jz kernel_per
523 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 540 mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
524 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 541 mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
525 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 542 mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
526 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 543 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
527 lgf %r3,__LC_PGM_ILC # load program interruption code 544 lgf %r3,__LC_PGM_ILC # load program interruption code
528 lghi %r8,0x7f 545 lghi %r8,0x7f
529 ngr %r8,%r3 # clear per-event-bit and ilc 546 ngr %r8,%r3 # clear per-event-bit and ilc
530 je sysc_return 547 je pgm_exit
531 j pgm_do_call 548 j pgm_do_call
532 549
533# 550#
534# it was a single stepped SVC that is causing all the trouble 551# it was a single stepped SVC that is causing all the trouble
535# 552#
536pgm_svcper: 553pgm_svcper:
537 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 554 SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA
538 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 555 CREATE_STACK_FRAME __LC_SAVE_AREA
556 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
557 mvc SP_ILC(4,%r15),__LC_SVC_ILC
558 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
539 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 559 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
540 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 560 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
541 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 561 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
542 llgh %r7,__LC_SVC_INT_CODE # get svc number from lowcore 562 LAST_BREAK
543 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 563 TRACE_IRQS_OFF
544 lg %r8,__TI_task(%r9) 564 lg %r8,__TI_task(%r12)
545 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 565 mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID
546 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 566 mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS
547 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 567 mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
548 oi __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 568 oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
549 TRACE_IRQS_ON 569 TRACE_IRQS_ON
550 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
551 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 570 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
571 lmg %r2,%r6,SP_R2(%r15) # load svc arguments
552 j sysc_do_svc 572 j sysc_do_svc
553 573
554# 574#
@@ -557,8 +577,8 @@ pgm_svcper:
557kernel_per: 577kernel_per:
558 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 578 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
559 la %r2,SP_PTREGS(%r15) # address of register-save area 579 la %r2,SP_PTREGS(%r15) # address of register-save area
560 larl %r14,sysc_restore # load adr. of system ret, no work 580 brasl %r14,do_single_step
561 jg do_single_step # branch to do_single_step 581 j pgm_exit
562 582
563/* 583/*
564 * IO interrupt handler routine 584 * IO interrupt handler routine
@@ -567,162 +587,133 @@ kernel_per:
567io_int_handler: 587io_int_handler:
568 stck __LC_INT_CLOCK 588 stck __LC_INT_CLOCK
569 stpt __LC_ASYNC_ENTER_TIMER 589 stpt __LC_ASYNC_ENTER_TIMER
570 SAVE_ALL_BASE __LC_SAVE_AREA+32 590 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
571 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 591 CREATE_STACK_FRAME __LC_SAVE_AREA+40
572 CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32 592 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
593 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
573 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 594 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
574 jz io_no_vtime 595 jz io_no_vtime
575 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 596 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
576 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 597 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
577 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 598 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
599 LAST_BREAK
578io_no_vtime: 600io_no_vtime:
579 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 601 HANDLE_SIE_INTERCEPT
580 TRACE_IRQS_OFF 602 TRACE_IRQS_OFF
581 la %r2,SP_PTREGS(%r15) # address of register-save area 603 la %r2,SP_PTREGS(%r15) # address of register-save area
582 brasl %r14,do_IRQ # call standard irq handler 604 brasl %r14,do_IRQ # call standard irq handler
583io_return: 605io_return:
584 tm __TI_flags+7(%r9),_TIF_WORK_INT 606 LOCKDEP_SYS_EXIT
607 TRACE_IRQS_ON
608io_tif:
609 tm __TI_flags+7(%r12),_TIF_WORK_INT
585 jnz io_work # there is work to do (signals etc.) 610 jnz io_work # there is work to do (signals etc.)
586io_restore: 611io_restore:
587#ifdef CONFIG_TRACE_IRQFLAGS
588 larl %r1,io_restore_trace_psw
589 lpswe 0(%r1)
590io_restore_trace:
591 TRACE_IRQS_CHECK
592 LOCKDEP_SYS_EXIT
593#endif
594io_leave:
595 RESTORE_ALL __LC_RETURN_PSW,0 612 RESTORE_ALL __LC_RETURN_PSW,0
596io_done: 613io_done:
597 614
598#ifdef CONFIG_TRACE_IRQFLAGS
599 .section .data,"aw",@progbits
600 .align 8
601 .globl io_restore_trace_psw
602io_restore_trace_psw:
603 .quad 0, io_restore_trace
604 .previous
605#endif
606
607# 615#
608# There is work todo, we need to check if we return to userspace, then 616# There is work todo, find out in which context we have been interrupted:
609# check, if we are in SIE, if yes leave it 617# 1) if we return to user space we can do all _TIF_WORK_INT work
618# 2) if we return to kernel code and kvm is enabled check if we need to
619# modify the psw to leave SIE
620# 3) if we return to kernel code and preemptive scheduling is enabled check
621# the preemption counter and if it is zero call preempt_schedule_irq
622# Before any work can be done, a switch to the kernel stack is required.
610# 623#
611io_work: 624io_work:
612 tm SP_PSW+1(%r15),0x01 # returning to user ? 625 tm SP_PSW+1(%r15),0x01 # returning to user ?
613#ifndef CONFIG_PREEMPT 626 jo io_work_user # yes -> do resched & signal
614#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 627#ifdef CONFIG_PREEMPT
615 jnz io_work_user # yes -> no need to check for SIE
616 la %r1, BASED(sie_opcode) # we return to kernel here
617 lg %r2, SP_PSW+8(%r15)
618 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
619 jne io_restore # no-> return to kernel
620 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
621 aghi %r1, 4
622 stg %r1, SP_PSW+8(%r15)
623 j io_restore # return to kernel
624#else
625 jno io_restore # no-> skip resched & signal
626#endif
627#else
628 jnz io_work_user # yes -> do resched & signal
629#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
630 la %r1, BASED(sie_opcode)
631 lg %r2, SP_PSW+8(%r15)
632 clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
633 jne 0f # no -> leave PSW alone
634 lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
635 aghi %r1, 4
636 stg %r1, SP_PSW+8(%r15)
6370:
638#endif
639 # check for preemptive scheduling 628 # check for preemptive scheduling
640 icm %r0,15,__TI_precount(%r9) 629 icm %r0,15,__TI_precount(%r12)
641 jnz io_restore # preemption is disabled 630 jnz io_restore # preemption is disabled
631 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
632 jno io_restore
642 # switch to kernel stack 633 # switch to kernel stack
643 lg %r1,SP_R15(%r15) 634 lg %r1,SP_R15(%r15)
644 aghi %r1,-SP_SIZE 635 aghi %r1,-SP_SIZE
645 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 636 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
646 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 637 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
647 lgr %r15,%r1 638 lgr %r15,%r1
648io_resume_loop: 639 # TRACE_IRQS_ON already done at io_return, call
649 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 640 # TRACE_IRQS_OFF to keep things symmetrical
650 jno io_restore 641 TRACE_IRQS_OFF
651 larl %r14,io_resume_loop 642 brasl %r14,preempt_schedule_irq
652 jg preempt_schedule_irq 643 j io_return
644#else
645 j io_restore
653#endif 646#endif
654 647
648#
649# Need to do work before returning to userspace, switch to kernel stack
650#
655io_work_user: 651io_work_user:
656 lg %r1,__LC_KERNEL_STACK 652 lg %r1,__LC_KERNEL_STACK
657 aghi %r1,-SP_SIZE 653 aghi %r1,-SP_SIZE
658 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) 654 mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
659 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 655 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
660 lgr %r15,%r1 656 lgr %r15,%r1
657
661# 658#
662# One of the work bits is on. Find out which one. 659# One of the work bits is on. Find out which one.
663# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED 660# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
664# and _TIF_MCCK_PENDING 661# and _TIF_MCCK_PENDING
665# 662#
666io_work_loop: 663io_work_tif:
667 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 664 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
668 jo io_mcck_pending 665 jo io_mcck_pending
669 tm __TI_flags+7(%r9),_TIF_NEED_RESCHED 666 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
670 jo io_reschedule 667 jo io_reschedule
671 tm __TI_flags+7(%r9),_TIF_SIGPENDING 668 tm __TI_flags+7(%r12),_TIF_SIGPENDING
672 jnz io_sigpending 669 jo io_sigpending
673 tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME 670 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
674 jnz io_notify_resume 671 jo io_notify_resume
675 j io_restore 672 j io_return # beware of critical section cleanup
676io_work_done:
677
678#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
679sie_opcode:
680 .long 0xb2140000
681#endif
682 673
683# 674#
684# _TIF_MCCK_PENDING is set, call handler 675# _TIF_MCCK_PENDING is set, call handler
685# 676#
686io_mcck_pending: 677io_mcck_pending:
678 # TRACE_IRQS_ON already done at io_return
687 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 679 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
688 j io_work_loop 680 TRACE_IRQS_OFF
681 j io_return
689 682
690# 683#
691# _TIF_NEED_RESCHED is set, call schedule 684# _TIF_NEED_RESCHED is set, call schedule
692# 685#
693io_reschedule: 686io_reschedule:
694 TRACE_IRQS_ON 687 # TRACE_IRQS_ON already done at io_return
695 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 688 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
696 brasl %r14,schedule # call scheduler 689 brasl %r14,schedule # call scheduler
697 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 690 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
698 TRACE_IRQS_OFF 691 TRACE_IRQS_OFF
699 tm __TI_flags+7(%r9),_TIF_WORK_INT 692 j io_return
700 jz io_restore # there is no work to do
701 j io_work_loop
702 693
703# 694#
704# _TIF_SIGPENDING or is set, call do_signal 695# _TIF_SIGPENDING or is set, call do_signal
705# 696#
706io_sigpending: 697io_sigpending:
707 TRACE_IRQS_ON 698 # TRACE_IRQS_ON already done at io_return
708 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 699 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
709 la %r2,SP_PTREGS(%r15) # load pt_regs 700 la %r2,SP_PTREGS(%r15) # load pt_regs
710 brasl %r14,do_signal # call do_signal 701 brasl %r14,do_signal # call do_signal
711 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 702 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
712 TRACE_IRQS_OFF 703 TRACE_IRQS_OFF
713 j io_work_loop 704 j io_return
714 705
715# 706#
716# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 707# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
717# 708#
718io_notify_resume: 709io_notify_resume:
719 TRACE_IRQS_ON 710 # TRACE_IRQS_ON already done at io_return
720 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 711 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
721 la %r2,SP_PTREGS(%r15) # load pt_regs 712 la %r2,SP_PTREGS(%r15) # load pt_regs
722 brasl %r14,do_notify_resume # call do_notify_resume 713 brasl %r14,do_notify_resume # call do_notify_resume
723 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts 714 stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
724 TRACE_IRQS_OFF 715 TRACE_IRQS_OFF
725 j io_work_loop 716 j io_return
726 717
727/* 718/*
728 * External interrupt handler routine 719 * External interrupt handler routine
@@ -731,16 +722,18 @@ io_notify_resume:
731ext_int_handler: 722ext_int_handler:
732 stck __LC_INT_CLOCK 723 stck __LC_INT_CLOCK
733 stpt __LC_ASYNC_ENTER_TIMER 724 stpt __LC_ASYNC_ENTER_TIMER
734 SAVE_ALL_BASE __LC_SAVE_AREA+32 725 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
735 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 726 CREATE_STACK_FRAME __LC_SAVE_AREA+40
736 CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32 727 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
728 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
737 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 729 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
738 jz ext_no_vtime 730 jz ext_no_vtime
739 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 731 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
740 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 732 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
741 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 733 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
734 LAST_BREAK
742ext_no_vtime: 735ext_no_vtime:
743 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct 736 HANDLE_SIE_INTERCEPT
744 TRACE_IRQS_OFF 737 TRACE_IRQS_OFF
745 la %r2,SP_PTREGS(%r15) # address of register-save area 738 la %r2,SP_PTREGS(%r15) # address of register-save area
746 llgh %r3,__LC_EXT_INT_CODE # get interruption code 739 llgh %r3,__LC_EXT_INT_CODE # get interruption code
@@ -754,17 +747,18 @@ __critical_end:
754 */ 747 */
755 .globl mcck_int_handler 748 .globl mcck_int_handler
756mcck_int_handler: 749mcck_int_handler:
757 stck __LC_INT_CLOCK 750 stck __LC_MCCK_CLOCK
758 la %r1,4095 # revalidate r1 751 la %r1,4095 # revalidate r1
759 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 752 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
760 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs 753 lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
761 SAVE_ALL_BASE __LC_SAVE_AREA+64 754 stmg %r11,%r15,__LC_SAVE_AREA+80
755 larl %r13,system_call
756 lg %r11,__LC_LAST_BREAK
762 la %r12,__LC_MCK_OLD_PSW 757 la %r12,__LC_MCK_OLD_PSW
763 tm __LC_MCCK_CODE,0x80 # system damage? 758 tm __LC_MCCK_CODE,0x80 # system damage?
764 jo mcck_int_main # yes -> rest of mcck code invalid 759 jo mcck_int_main # yes -> rest of mcck code invalid
765 la %r14,4095 760 la %r14,4095
766 mvc __LC_SAVE_AREA+104(8),__LC_ASYNC_ENTER_TIMER 761 mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
767 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14)
768 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 762 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
769 jo 1f 763 jo 1f
770 la %r14,__LC_SYNC_ENTER_TIMER 764 la %r14,__LC_SYNC_ENTER_TIMER
@@ -778,7 +772,7 @@ mcck_int_handler:
778 jl 0f 772 jl 0f
779 la %r14,__LC_LAST_UPDATE_TIMER 773 la %r14,__LC_LAST_UPDATE_TIMER
7800: spt 0(%r14) 7740: spt 0(%r14)
781 mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14) 775 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7821: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7761: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
783 jno mcck_int_main # no -> skip cleanup critical 777 jno mcck_int_main # no -> skip cleanup critical
784 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit 778 tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
@@ -794,16 +788,19 @@ mcck_int_main:
794 srag %r14,%r14,PAGE_SHIFT 788 srag %r14,%r14,PAGE_SHIFT
795 jz 0f 789 jz 0f
796 lg %r15,__LC_PANIC_STACK # load panic stack 790 lg %r15,__LC_PANIC_STACK # load panic stack
7970: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+64 7910: aghi %r15,-SP_SIZE # make room for registers & psw
792 CREATE_STACK_FRAME __LC_SAVE_AREA+80
793 mvc SP_PSW(16,%r15),0(%r12)
794 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
798 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 795 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
799 jno mcck_no_vtime # no -> no timer update 796 jno mcck_no_vtime # no -> no timer update
800 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 797 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
801 jz mcck_no_vtime 798 jz mcck_no_vtime
802 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 799 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
803 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 800 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
804 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 801 mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER
802 LAST_BREAK
805mcck_no_vtime: 803mcck_no_vtime:
806 lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
807 la %r2,SP_PTREGS(%r15) # load pt_regs 804 la %r2,SP_PTREGS(%r15) # load pt_regs
808 brasl %r14,s390_do_machine_check 805 brasl %r14,s390_do_machine_check
809 tm SP_PSW+1(%r15),0x01 # returning to user ? 806 tm SP_PSW+1(%r15),0x01 # returning to user ?
@@ -814,8 +811,9 @@ mcck_no_vtime:
814 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain 811 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
815 lgr %r15,%r1 812 lgr %r15,%r1
816 stosm __SF_EMPTY(%r15),0x04 # turn dat on 813 stosm __SF_EMPTY(%r15),0x04 # turn dat on
817 tm __TI_flags+7(%r9),_TIF_MCCK_PENDING 814 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
818 jno mcck_return 815 jno mcck_return
816 HANDLE_SIE_INTERCEPT
819 TRACE_IRQS_OFF 817 TRACE_IRQS_OFF
820 brasl %r14,s390_handle_mcck 818 brasl %r14,s390_handle_mcck
821 TRACE_IRQS_ON 819 TRACE_IRQS_ON
@@ -823,11 +821,11 @@ mcck_return:
823 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW 821 mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW
824 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit 822 ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
825 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 823 lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15
826 mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+104
827 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 824 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
828 jno 0f 825 jno 0f
829 stpt __LC_EXIT_TIMER 826 stpt __LC_EXIT_TIMER
8300: lpswe __LC_RETURN_MCCK_PSW # back to caller 8270: lpswe __LC_RETURN_MCCK_PSW # back to caller
828mcck_done:
831 829
832/* 830/*
833 * Restart interruption handler, kick starter for additional CPUs 831 * Restart interruption handler, kick starter for additional CPUs
@@ -883,14 +881,14 @@ stack_overflow:
883 lg %r15,__LC_PANIC_STACK # change to panic stack 881 lg %r15,__LC_PANIC_STACK # change to panic stack
884 aghi %r15,-SP_SIZE 882 aghi %r15,-SP_SIZE
885 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 883 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
886 stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack 884 stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack
887 la %r1,__LC_SAVE_AREA 885 la %r1,__LC_SAVE_AREA
888 chi %r12,__LC_SVC_OLD_PSW 886 chi %r12,__LC_SVC_OLD_PSW
889 je 0f 887 je 0f
890 chi %r12,__LC_PGM_OLD_PSW 888 chi %r12,__LC_PGM_OLD_PSW
891 je 0f 889 je 0f
892 la %r1,__LC_SAVE_AREA+32 890 la %r1,__LC_SAVE_AREA+40
8930: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack 8910: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack
894 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK 892 mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
895 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain 893 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
896 la %r2,SP_PTREGS(%r15) # load pt_regs 894 la %r2,SP_PTREGS(%r15) # load pt_regs
@@ -899,18 +897,14 @@ stack_overflow:
899 897
900cleanup_table_system_call: 898cleanup_table_system_call:
901 .quad system_call, sysc_do_svc 899 .quad system_call, sysc_do_svc
902cleanup_table_sysc_return: 900cleanup_table_sysc_tif:
903 .quad sysc_return, sysc_leave 901 .quad sysc_tif, sysc_restore
904cleanup_table_sysc_leave: 902cleanup_table_sysc_restore:
905 .quad sysc_leave, sysc_done 903 .quad sysc_restore, sysc_done
906cleanup_table_sysc_work_loop: 904cleanup_table_io_tif:
907 .quad sysc_work_loop, sysc_work_done 905 .quad io_tif, io_restore
908cleanup_table_io_return: 906cleanup_table_io_restore:
909 .quad io_return, io_leave 907 .quad io_restore, io_done
910cleanup_table_io_leave:
911 .quad io_leave, io_done
912cleanup_table_io_work_loop:
913 .quad io_work_loop, io_work_done
914 908
915cleanup_critical: 909cleanup_critical:
916 clc 8(8,%r12),BASED(cleanup_table_system_call) 910 clc 8(8,%r12),BASED(cleanup_table_system_call)
@@ -918,61 +912,54 @@ cleanup_critical:
918 clc 8(8,%r12),BASED(cleanup_table_system_call+8) 912 clc 8(8,%r12),BASED(cleanup_table_system_call+8)
919 jl cleanup_system_call 913 jl cleanup_system_call
9200: 9140:
921 clc 8(8,%r12),BASED(cleanup_table_sysc_return) 915 clc 8(8,%r12),BASED(cleanup_table_sysc_tif)
922 jl 0f
923 clc 8(8,%r12),BASED(cleanup_table_sysc_return+8)
924 jl cleanup_sysc_return
9250:
926 clc 8(8,%r12),BASED(cleanup_table_sysc_leave)
927 jl 0f 916 jl 0f
928 clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8) 917 clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8)
929 jl cleanup_sysc_leave 918 jl cleanup_sysc_tif
9300: 9190:
931 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop) 920 clc 8(8,%r12),BASED(cleanup_table_sysc_restore)
932 jl 0f 921 jl 0f
933 clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8) 922 clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8)
934 jl cleanup_sysc_return 923 jl cleanup_sysc_restore
9350: 9240:
936 clc 8(8,%r12),BASED(cleanup_table_io_return) 925 clc 8(8,%r12),BASED(cleanup_table_io_tif)
937 jl 0f 926 jl 0f
938 clc 8(8,%r12),BASED(cleanup_table_io_return+8) 927 clc 8(8,%r12),BASED(cleanup_table_io_tif+8)
939 jl cleanup_io_return 928 jl cleanup_io_tif
9400: 9290:
941 clc 8(8,%r12),BASED(cleanup_table_io_leave) 930 clc 8(8,%r12),BASED(cleanup_table_io_restore)
942 jl 0f 931 jl 0f
943 clc 8(8,%r12),BASED(cleanup_table_io_leave+8) 932 clc 8(8,%r12),BASED(cleanup_table_io_restore+8)
944 jl cleanup_io_leave 933 jl cleanup_io_restore
9450:
946 clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
947 jl 0f
948 clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
949 jl cleanup_io_work_loop
9500: 9340:
951 br %r14 935 br %r14
952 936
953cleanup_system_call: 937cleanup_system_call:
954 mvc __LC_RETURN_PSW(16),0(%r12) 938 mvc __LC_RETURN_PSW(16),0(%r12)
955 cghi %r12,__LC_MCK_OLD_PSW
956 je 0f
957 la %r12,__LC_SAVE_AREA+32
958 j 1f
9590: la %r12,__LC_SAVE_AREA+64
9601:
961 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) 939 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8)
962 jh 0f 940 jh 0f
941 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
942 cghi %r12,__LC_MCK_OLD_PSW
943 je 0f
963 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 944 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
9450: cghi %r12,__LC_MCK_OLD_PSW
946 la %r12,__LC_SAVE_AREA+80
947 je 0f
948 la %r12,__LC_SAVE_AREA+40
9640: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) 9490: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16)
965 jhe cleanup_vtime 950 jhe cleanup_vtime
966 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) 951 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn)
967 jh 0f 952 jh 0f
968 mvc __LC_SAVE_AREA(32),0(%r12) 953 mvc __LC_SAVE_AREA(40),0(%r12)
9690: stg %r13,8(%r12) 9540: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp
970 stg %r12,__LC_SAVE_AREA+96 # argh 955 aghi %r15,-SP_SIZE # make room for registers & psw
971 SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 956 stg %r15,32(%r12)
972 CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA 957 stg %r11,0(%r12)
973 lg %r12,__LC_SAVE_AREA+96 # argh 958 CREATE_STACK_FRAME __LC_SAVE_AREA
974 stg %r15,24(%r12) 959 mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW
975 llgh %r7,__LC_SVC_INT_CODE 960 mvc SP_ILC(4,%r15),__LC_SVC_ILC
961 stg %r7,SP_ARGS(%r15)
962 mvc 8(8,%r12),__LC_THREAD_INFO
976cleanup_vtime: 963cleanup_vtime:
977 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) 964 clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24)
978 jhe cleanup_stime 965 jhe cleanup_stime
@@ -983,7 +970,11 @@ cleanup_stime:
983 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 970 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
984cleanup_update: 971cleanup_update:
985 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 972 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
986 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) 973 srag %r12,%r11,23
974 lg %r12,__LC_THREAD_INFO
975 jz 0f
976 stg %r11,__TI_last_break(%r12)
9770: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8)
987 la %r12,__LC_RETURN_PSW 978 la %r12,__LC_RETURN_PSW
988 br %r14 979 br %r14
989cleanup_system_call_insn: 980cleanup_system_call_insn:
@@ -993,61 +984,54 @@ cleanup_system_call_insn:
993 .quad sysc_stime 984 .quad sysc_stime
994 .quad sysc_update 985 .quad sysc_update
995 986
996cleanup_sysc_return: 987cleanup_sysc_tif:
997 mvc __LC_RETURN_PSW(8),0(%r12) 988 mvc __LC_RETURN_PSW(8),0(%r12)
998 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_return) 989 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif)
999 la %r12,__LC_RETURN_PSW 990 la %r12,__LC_RETURN_PSW
1000 br %r14 991 br %r14
1001 992
1002cleanup_sysc_leave: 993cleanup_sysc_restore:
1003 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn) 994 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn)
1004 je 3f 995 je 2f
1005 clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8) 996 clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8)
1006 jhe 0f 997 jhe 0f
998 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
999 cghi %r12,__LC_MCK_OLD_PSW
1000 je 0f
1007 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1001 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
10080: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 10020: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1009 cghi %r12,__LC_MCK_OLD_PSW 1003 cghi %r12,__LC_MCK_OLD_PSW
1010 jne 1f 1004 la %r12,__LC_SAVE_AREA+80
1011 mvc __LC_SAVE_AREA+64(32),SP_R12(%r15) 1005 je 1f
1012 j 2f 1006 la %r12,__LC_SAVE_AREA+40
10131: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15) 10071: mvc 0(40,%r12),SP_R11(%r15)
10142: lmg %r0,%r11,SP_R0(%r15) 1008 lmg %r0,%r10,SP_R0(%r15)
1015 lg %r15,SP_R15(%r15) 1009 lg %r15,SP_R15(%r15)
10163: la %r12,__LC_RETURN_PSW 10102: la %r12,__LC_RETURN_PSW
1017 br %r14 1011 br %r14
1018cleanup_sysc_leave_insn: 1012cleanup_sysc_restore_insn:
1019 .quad sysc_done - 4 1013 .quad sysc_done - 4
1020 .quad sysc_done - 16 1014 .quad sysc_done - 16
1021 1015
1022cleanup_io_return: 1016cleanup_io_tif:
1023 mvc __LC_RETURN_PSW(8),0(%r12)
1024 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_return)
1025 la %r12,__LC_RETURN_PSW
1026 br %r14
1027
1028cleanup_io_work_loop:
1029 mvc __LC_RETURN_PSW(8),0(%r12) 1017 mvc __LC_RETURN_PSW(8),0(%r12)
1030 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop) 1018 mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif)
1031 la %r12,__LC_RETURN_PSW 1019 la %r12,__LC_RETURN_PSW
1032 br %r14 1020 br %r14
1033 1021
1034cleanup_io_leave: 1022cleanup_io_restore:
1035 clc 8(8,%r12),BASED(cleanup_io_leave_insn) 1023 clc 8(8,%r12),BASED(cleanup_io_restore_insn)
1036 je 3f 1024 je 1f
1037 clc 8(8,%r12),BASED(cleanup_io_leave_insn+8) 1025 clc 8(8,%r12),BASED(cleanup_io_restore_insn+8)
1038 jhe 0f 1026 jhe 0f
1039 mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER 1027 mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
10400: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) 10280: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
1041 cghi %r12,__LC_MCK_OLD_PSW 1029 mvc __LC_SAVE_AREA+80(40),SP_R11(%r15)
1042 jne 1f 1030 lmg %r0,%r10,SP_R0(%r15)
1043 mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
1044 j 2f
10451: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
10462: lmg %r0,%r11,SP_R0(%r15)
1047 lg %r15,SP_R15(%r15) 1031 lg %r15,SP_R15(%r15)
10483: la %r12,__LC_RETURN_PSW 10321: la %r12,__LC_RETURN_PSW
1049 br %r14 1033 br %r14
1050cleanup_io_leave_insn: 1034cleanup_io_restore_insn:
1051 .quad io_done - 4 1035 .quad io_done - 4
1052 .quad io_done - 16 1036 .quad io_done - 16
1053 1037
@@ -1055,13 +1039,6 @@ cleanup_io_leave_insn:
1055 * Integer constants 1039 * Integer constants
1056 */ 1040 */
1057 .align 4 1041 .align 4
1058.Lconst:
1059.Lnr_syscalls: .long NR_syscalls
1060.L0x0130: .short 0x130
1061.L0x0140: .short 0x140
1062.L0x0150: .short 0x150
1063.L0x0160: .short 0x160
1064.L0x0170: .short 0x170
1065.Lcritical_start: 1042.Lcritical_start:
1066 .quad __critical_start 1043 .quad __critical_start
1067.Lcritical_end: 1044.Lcritical_end:
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 9d1f76702d47..51838ad42d56 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -328,8 +328,8 @@ iplstart:
328# 328#
329# reset files in VM reader 329# reset files in VM reader
330# 330#
331 stidp __LC_CPUID # store cpuid 331 stidp __LC_SAVE_AREA # store cpuid
332 tm __LC_CPUID,0xff # running VM ? 332 tm __LC_SAVE_AREA,0xff # running VM ?
333 bno .Lnoreset 333 bno .Lnoreset
334 la %r2,.Lreset 334 la %r2,.Lreset
335 lhi %r3,26 335 lhi %r3,26
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 015e27da40eb..ac151399ef34 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -255,7 +255,8 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
255 int umode; 255 int umode;
256 256
257 nmi_enter(); 257 nmi_enter();
258 s390_idle_check(); 258 s390_idle_check(regs, S390_lowcore.mcck_clock,
259 S390_lowcore.mcck_enter_timer);
259 260
260 mci = (struct mci *) &S390_lowcore.mcck_interruption_code; 261 mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
261 mcck = &__get_cpu_var(cpu_mcck); 262 mcck = &__get_cpu_var(cpu_mcck);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 0729f36c2fe3..ecb2d02b02e4 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -18,24 +18,42 @@
18#include <asm/lowcore.h> 18#include <asm/lowcore.h>
19#include <asm/param.h> 19#include <asm/param.h>
20 20
21static DEFINE_PER_CPU(struct cpuid, cpu_id);
22
23/*
24 * cpu_init - initializes state that is per-CPU.
25 */
26void __cpuinit cpu_init(void)
27{
28 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
29
30 get_cpu_id(id);
31 atomic_inc(&init_mm.mm_count);
32 current->active_mm = &init_mm;
33 BUG_ON(current->mm);
34 enter_lazy_tlb(&init_mm, current);
35}
36
37/*
38 * print_cpu_info - print basic information about a cpu
39 */
21void __cpuinit print_cpu_info(void) 40void __cpuinit print_cpu_info(void)
22{ 41{
42 struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
43
23 pr_info("Processor %d started, address %d, identification %06X\n", 44 pr_info("Processor %d started, address %d, identification %06X\n",
24 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, 45 S390_lowcore.cpu_nr, S390_lowcore.cpu_addr, id->ident);
25 S390_lowcore.cpu_id.ident);
26} 46}
27 47
28/* 48/*
29 * show_cpuinfo - Get information on one CPU for use by procfs. 49 * show_cpuinfo - Get information on one CPU for use by procfs.
30 */ 50 */
31
32static int show_cpuinfo(struct seq_file *m, void *v) 51static int show_cpuinfo(struct seq_file *m, void *v)
33{ 52{
34 static const char *hwcap_str[10] = { 53 static const char *hwcap_str[10] = {
35 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", 54 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
36 "edat", "etf3eh", "highgprs" 55 "edat", "etf3eh", "highgprs"
37 }; 56 };
38 struct _lowcore *lc;
39 unsigned long n = (unsigned long) v - 1; 57 unsigned long n = (unsigned long) v - 1;
40 int i; 58 int i;
41 59
@@ -55,19 +73,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
55 } 73 }
56 74
57 if (cpu_online(n)) { 75 if (cpu_online(n)) {
58#ifdef CONFIG_SMP 76 struct cpuid *id = &per_cpu(cpu_id, n);
59 lc = (smp_processor_id() == n) ?
60 &S390_lowcore : lowcore_ptr[n];
61#else
62 lc = &S390_lowcore;
63#endif
64 seq_printf(m, "processor %li: " 77 seq_printf(m, "processor %li: "
65 "version = %02X, " 78 "version = %02X, "
66 "identification = %06X, " 79 "identification = %06X, "
67 "machine = %04X\n", 80 "machine = %04X\n",
68 n, lc->cpu_id.version, 81 n, id->version, id->ident, id->machine);
69 lc->cpu_id.ident,
70 lc->cpu_id.machine);
71 } 82 }
72 preempt_enable(); 83 preempt_enable();
73 return 0; 84 return 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 9f654da4cecc..83339d33c4b1 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -57,6 +57,7 @@
57enum s390_regset { 57enum s390_regset {
58 REGSET_GENERAL, 58 REGSET_GENERAL,
59 REGSET_FP, 59 REGSET_FP,
60 REGSET_LAST_BREAK,
60 REGSET_GENERAL_EXTENDED, 61 REGSET_GENERAL_EXTENDED,
61}; 62};
62 63
@@ -381,6 +382,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
381 copied += sizeof(unsigned long); 382 copied += sizeof(unsigned long);
382 } 383 }
383 return 0; 384 return 0;
385 case PTRACE_GET_LAST_BREAK:
386 put_user(task_thread_info(child)->last_break,
387 (unsigned long __user *) data);
388 return 0;
384 default: 389 default:
385 /* Removing high order bit from addr (only for 31 bit). */ 390 /* Removing high order bit from addr (only for 31 bit). */
386 addr &= PSW_ADDR_INSN; 391 addr &= PSW_ADDR_INSN;
@@ -633,6 +638,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
633 copied += sizeof(unsigned int); 638 copied += sizeof(unsigned int);
634 } 639 }
635 return 0; 640 return 0;
641 case PTRACE_GET_LAST_BREAK:
642 put_user(task_thread_info(child)->last_break,
643 (unsigned int __user *) data);
644 return 0;
636 } 645 }
637 return compat_ptrace_request(child, request, addr, data); 646 return compat_ptrace_request(child, request, addr, data);
638} 647}
@@ -797,6 +806,28 @@ static int s390_fpregs_set(struct task_struct *target,
797 return rc; 806 return rc;
798} 807}
799 808
809#ifdef CONFIG_64BIT
810
811static int s390_last_break_get(struct task_struct *target,
812 const struct user_regset *regset,
813 unsigned int pos, unsigned int count,
814 void *kbuf, void __user *ubuf)
815{
816 if (count > 0) {
817 if (kbuf) {
818 unsigned long *k = kbuf;
819 *k = task_thread_info(target)->last_break;
820 } else {
821 unsigned long __user *u = ubuf;
822 if (__put_user(task_thread_info(target)->last_break, u))
823 return -EFAULT;
824 }
825 }
826 return 0;
827}
828
829#endif
830
800static const struct user_regset s390_regsets[] = { 831static const struct user_regset s390_regsets[] = {
801 [REGSET_GENERAL] = { 832 [REGSET_GENERAL] = {
802 .core_note_type = NT_PRSTATUS, 833 .core_note_type = NT_PRSTATUS,
@@ -814,6 +845,15 @@ static const struct user_regset s390_regsets[] = {
814 .get = s390_fpregs_get, 845 .get = s390_fpregs_get,
815 .set = s390_fpregs_set, 846 .set = s390_fpregs_set,
816 }, 847 },
848#ifdef CONFIG_64BIT
849 [REGSET_LAST_BREAK] = {
850 .core_note_type = NT_S390_LAST_BREAK,
851 .n = 1,
852 .size = sizeof(long),
853 .align = sizeof(long),
854 .get = s390_last_break_get,
855 },
856#endif
817}; 857};
818 858
819static const struct user_regset_view user_s390_view = { 859static const struct user_regset_view user_s390_view = {
@@ -948,6 +988,27 @@ static int s390_compat_regs_high_set(struct task_struct *target,
948 return rc; 988 return rc;
949} 989}
950 990
991static int s390_compat_last_break_get(struct task_struct *target,
992 const struct user_regset *regset,
993 unsigned int pos, unsigned int count,
994 void *kbuf, void __user *ubuf)
995{
996 compat_ulong_t last_break;
997
998 if (count > 0) {
999 last_break = task_thread_info(target)->last_break;
1000 if (kbuf) {
1001 unsigned long *k = kbuf;
1002 *k = last_break;
1003 } else {
1004 unsigned long __user *u = ubuf;
1005 if (__put_user(last_break, u))
1006 return -EFAULT;
1007 }
1008 }
1009 return 0;
1010}
1011
951static const struct user_regset s390_compat_regsets[] = { 1012static const struct user_regset s390_compat_regsets[] = {
952 [REGSET_GENERAL] = { 1013 [REGSET_GENERAL] = {
953 .core_note_type = NT_PRSTATUS, 1014 .core_note_type = NT_PRSTATUS,
@@ -965,6 +1026,13 @@ static const struct user_regset s390_compat_regsets[] = {
965 .get = s390_fpregs_get, 1026 .get = s390_fpregs_get,
966 .set = s390_fpregs_set, 1027 .set = s390_fpregs_set,
967 }, 1028 },
1029 [REGSET_LAST_BREAK] = {
1030 .core_note_type = NT_S390_LAST_BREAK,
1031 .n = 1,
1032 .size = sizeof(long),
1033 .align = sizeof(long),
1034 .get = s390_compat_last_break_get,
1035 },
968 [REGSET_GENERAL_EXTENDED] = { 1036 [REGSET_GENERAL_EXTENDED] = {
969 .core_note_type = NT_S390_HIGH_GPRS, 1037 .core_note_type = NT_S390_HIGH_GPRS,
970 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t), 1038 .n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index 59618bcd99b7..9ce641b5291f 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -120,7 +120,8 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned short code)
120 struct pt_regs *old_regs; 120 struct pt_regs *old_regs;
121 121
122 old_regs = set_irq_regs(regs); 122 old_regs = set_irq_regs(regs);
123 s390_idle_check(); 123 s390_idle_check(regs, S390_lowcore.int_clock,
124 S390_lowcore.async_enter_timer);
124 irq_enter(); 125 irq_enter();
125 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 126 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
126 /* Serve timer interrupts first. */ 127 /* Serve timer interrupts first. */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 91625f759ccd..7d893248d265 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -2,7 +2,7 @@
2 * arch/s390/kernel/setup.c 2 * arch/s390/kernel/setup.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Copyright (C) IBM Corp. 1999,2010
6 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 8 *
@@ -113,22 +113,6 @@ static struct resource data_resource = {
113}; 113};
114 114
115/* 115/*
116 * cpu_init() initializes state that is per-CPU.
117 */
118void __cpuinit cpu_init(void)
119{
120 /*
121 * Store processor id in lowcore (used e.g. in timer_interrupt)
122 */
123 get_cpu_id(&S390_lowcore.cpu_id);
124
125 atomic_inc(&init_mm.mm_count);
126 current->active_mm = &init_mm;
127 BUG_ON(current->mm);
128 enter_lazy_tlb(&init_mm, current);
129}
130
131/*
132 * condev= and conmode= setup parameter. 116 * condev= and conmode= setup parameter.
133 */ 117 */
134 118
@@ -385,10 +369,6 @@ static void setup_addressing_mode(void)
385 pr_info("Address spaces switched, " 369 pr_info("Address spaces switched, "
386 "mvcos not available\n"); 370 "mvcos not available\n");
387 } 371 }
388#ifdef CONFIG_TRACE_IRQFLAGS
389 sysc_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
390 io_restore_trace_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
391#endif
392} 372}
393 373
394static void __init 374static void __init
@@ -421,6 +401,7 @@ setup_lowcore(void)
421 lc->io_new_psw.mask = psw_kernel_bits; 401 lc->io_new_psw.mask = psw_kernel_bits;
422 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 402 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
423 lc->clock_comparator = -1ULL; 403 lc->clock_comparator = -1ULL;
404 lc->cmf_hpp = -1ULL;
424 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 405 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
425 lc->async_stack = (unsigned long) 406 lc->async_stack = (unsigned long)
426 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 407 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@@ -695,6 +676,7 @@ static void __init setup_hwcaps(void)
695 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 }; 676 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
696 unsigned long long facility_list_extended; 677 unsigned long long facility_list_extended;
697 unsigned int facility_list; 678 unsigned int facility_list;
679 struct cpuid cpu_id;
698 int i; 680 int i;
699 681
700 facility_list = stfl(); 682 facility_list = stfl();
@@ -756,7 +738,8 @@ static void __init setup_hwcaps(void)
756 */ 738 */
757 elf_hwcap |= HWCAP_S390_HIGH_GPRS; 739 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
758 740
759 switch (S390_lowcore.cpu_id.machine) { 741 get_cpu_id(&cpu_id);
742 switch (cpu_id.machine) {
760 case 0x9672: 743 case 0x9672:
761#if !defined(CONFIG_64BIT) 744#if !defined(CONFIG_64BIT)
762 default: /* Use "g5" as default for 31 bit kernels. */ 745 default: /* Use "g5" as default for 31 bit kernels. */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6289945562b0..ee7ac8b11782 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -313,6 +313,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
313 To avoid breaking binary compatibility, they are passed as args. */ 313 To avoid breaking binary compatibility, they are passed as args. */
314 regs->gprs[4] = current->thread.trap_no; 314 regs->gprs[4] = current->thread.trap_no;
315 regs->gprs[5] = current->thread.prot_addr; 315 regs->gprs[5] = current->thread.prot_addr;
316 regs->gprs[6] = task_thread_info(current)->last_break;
316 317
317 /* Place signal number on stack to allow backtrace from handler. */ 318 /* Place signal number on stack to allow backtrace from handler. */
318 if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) 319 if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
@@ -376,6 +377,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
376 regs->gprs[2] = map_signal(sig); 377 regs->gprs[2] = map_signal(sig);
377 regs->gprs[3] = (unsigned long) &frame->info; 378 regs->gprs[3] = (unsigned long) &frame->info;
378 regs->gprs[4] = (unsigned long) &frame->uc; 379 regs->gprs[4] = (unsigned long) &frame->uc;
380 regs->gprs[5] = task_thread_info(current)->last_break;
379 return 0; 381 return 0;
380 382
381give_sigsegv: 383give_sigsegv:
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 247b4c2d1e51..bcef00766a64 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -37,7 +37,8 @@ struct tl_cpu {
37}; 37};
38 38
39struct tl_container { 39struct tl_container {
40 unsigned char reserved[8]; 40 unsigned char reserved[7];
41 unsigned char id;
41}; 42};
42 43
43union tl_entry { 44union tl_entry {
@@ -58,6 +59,7 @@ struct tl_info {
58 59
59struct core_info { 60struct core_info {
60 struct core_info *next; 61 struct core_info *next;
62 unsigned char id;
61 cpumask_t mask; 63 cpumask_t mask;
62}; 64};
63 65
@@ -73,6 +75,7 @@ static DECLARE_WORK(topology_work, topology_work_fn);
73static DEFINE_SPINLOCK(topology_lock); 75static DEFINE_SPINLOCK(topology_lock);
74 76
75cpumask_t cpu_core_map[NR_CPUS]; 77cpumask_t cpu_core_map[NR_CPUS];
78unsigned char cpu_core_id[NR_CPUS];
76 79
77static cpumask_t cpu_coregroup_map(unsigned int cpu) 80static cpumask_t cpu_coregroup_map(unsigned int cpu)
78{ 81{
@@ -116,6 +119,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
116 for_each_present_cpu(lcpu) { 119 for_each_present_cpu(lcpu) {
117 if (cpu_logical_map(lcpu) == rcpu) { 120 if (cpu_logical_map(lcpu) == rcpu) {
118 cpu_set(lcpu, core->mask); 121 cpu_set(lcpu, core->mask);
122 cpu_core_id[lcpu] = core->id;
119 smp_cpu_polarization[lcpu] = tl_cpu->pp; 123 smp_cpu_polarization[lcpu] = tl_cpu->pp;
120 } 124 }
121 } 125 }
@@ -158,6 +162,7 @@ static void tl_to_cores(struct tl_info *info)
158 break; 162 break;
159 case 1: 163 case 1:
160 core = core->next; 164 core = core->next;
165 core->id = tle->container.id;
161 break; 166 break;
162 case 0: 167 case 0:
163 add_cpus_to_core(&tle->cpu, core); 168 add_cpus_to_core(&tle->cpu, core);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 6e7ad63854c0..5d8f0f3d0250 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -46,13 +46,7 @@
46 46
47pgm_check_handler_t *pgm_check_table[128]; 47pgm_check_handler_t *pgm_check_table[128];
48 48
49#ifdef CONFIG_SYSCTL 49int show_unhandled_signals;
50#ifdef CONFIG_PROCESS_DEBUG
51int sysctl_userprocess_debug = 1;
52#else
53int sysctl_userprocess_debug = 0;
54#endif
55#endif
56 50
57extern pgm_check_handler_t do_protection_exception; 51extern pgm_check_handler_t do_protection_exception;
58extern pgm_check_handler_t do_dat_exception; 52extern pgm_check_handler_t do_dat_exception;
@@ -315,18 +309,19 @@ void die(const char * str, struct pt_regs * regs, long err)
315 do_exit(SIGSEGV); 309 do_exit(SIGSEGV);
316} 310}
317 311
318static void inline 312static void inline report_user_fault(struct pt_regs *regs, long int_code,
319report_user_fault(long interruption_code, struct pt_regs *regs) 313 int signr)
320{ 314{
321#if defined(CONFIG_SYSCTL) 315 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
322 if (!sysctl_userprocess_debug)
323 return; 316 return;
324#endif 317 if (!unhandled_signal(current, signr))
325#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 318 return;
326 printk("User process fault: interruption code 0x%lX\n", 319 if (!printk_ratelimit())
327 interruption_code); 320 return;
321 printk("User process fault: interruption code 0x%lX ", int_code);
322 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
323 printk("\n");
328 show_regs(regs); 324 show_regs(regs);
329#endif
330} 325}
331 326
332int is_valid_bugaddr(unsigned long addr) 327int is_valid_bugaddr(unsigned long addr)
@@ -354,7 +349,7 @@ static void __kprobes inline do_trap(long interruption_code, int signr,
354 349
355 tsk->thread.trap_no = interruption_code & 0xffff; 350 tsk->thread.trap_no = interruption_code & 0xffff;
356 force_sig_info(signr, info, tsk); 351 force_sig_info(signr, info, tsk);
357 report_user_fault(interruption_code, regs); 352 report_user_fault(regs, interruption_code, signr);
358 } else { 353 } else {
359 const struct exception_table_entry *fixup; 354 const struct exception_table_entry *fixup;
360 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 355 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
@@ -390,8 +385,8 @@ static void default_trap_handler(struct pt_regs * regs, long interruption_code)
390{ 385{
391 if (regs->psw.mask & PSW_MASK_PSTATE) { 386 if (regs->psw.mask & PSW_MASK_PSTATE) {
392 local_irq_enable(); 387 local_irq_enable();
388 report_user_fault(regs, interruption_code, SIGSEGV);
393 do_exit(SIGSEGV); 389 do_exit(SIGSEGV);
394 report_user_fault(interruption_code, regs);
395 } else 390 } else
396 die("Unknown program exception", regs, interruption_code); 391 die("Unknown program exception", regs, interruption_code);
397} 392}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 6bc9c197aa91..6b83870507d5 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -102,11 +102,7 @@ static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
102/* 102/*
103 * Allocate/free per cpu vdso data. 103 * Allocate/free per cpu vdso data.
104 */ 104 */
105#ifdef CONFIG_64BIT
106#define SEGMENT_ORDER 2 105#define SEGMENT_ORDER 2
107#else
108#define SEGMENT_ORDER 1
109#endif
110 106
111int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore) 107int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
112{ 108{
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index b59a812a010e..3479f1b0d4e0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -121,32 +121,35 @@ void account_system_vtime(struct task_struct *tsk)
121} 121}
122EXPORT_SYMBOL_GPL(account_system_vtime); 122EXPORT_SYMBOL_GPL(account_system_vtime);
123 123
124void vtime_start_cpu(void) 124void vtime_start_cpu(__u64 int_clock, __u64 enter_timer)
125{ 125{
126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 126 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer); 127 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
128 __u64 idle_time, expires; 128 __u64 idle_time, expires;
129 129
130 if (idle->idle_enter == 0ULL)
131 return;
132
130 /* Account time spent with enabled wait psw loaded as idle time. */ 133 /* Account time spent with enabled wait psw loaded as idle time. */
131 idle_time = S390_lowcore.int_clock - idle->idle_enter; 134 idle_time = int_clock - idle->idle_enter;
132 account_idle_time(idle_time); 135 account_idle_time(idle_time);
133 S390_lowcore.steal_timer += 136 S390_lowcore.steal_timer +=
134 idle->idle_enter - S390_lowcore.last_update_clock; 137 idle->idle_enter - S390_lowcore.last_update_clock;
135 S390_lowcore.last_update_clock = S390_lowcore.int_clock; 138 S390_lowcore.last_update_clock = int_clock;
136 139
137 /* Account system time spent going idle. */ 140 /* Account system time spent going idle. */
138 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle; 141 S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
139 S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer; 142 S390_lowcore.last_update_timer = enter_timer;
140 143
141 /* Restart vtime CPU timer */ 144 /* Restart vtime CPU timer */
142 if (vq->do_spt) { 145 if (vq->do_spt) {
143 /* Program old expire value but first save progress. */ 146 /* Program old expire value but first save progress. */
144 expires = vq->idle - S390_lowcore.async_enter_timer; 147 expires = vq->idle - enter_timer;
145 expires += get_vtimer(); 148 expires += get_vtimer();
146 set_vtimer(expires); 149 set_vtimer(expires);
147 } else { 150 } else {
148 /* Don't account the CPU timer delta while the cpu was idle. */ 151 /* Don't account the CPU timer delta while the cpu was idle. */
149 vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer; 152 vq->elapsed -= vq->idle - enter_timer;
150 } 153 }
151 154
152 idle->sequence++; 155 idle->sequence++;
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index a7251580891c..2f4b687cc7fa 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -33,6 +33,17 @@ config KVM
33 33
34 If unsure, say N. 34 If unsure, say N.
35 35
36config KVM_AWARE_CMF
37 depends on KVM
38 bool "KVM aware sampling"
39 ---help---
40 This option enhances the sampling data from the CPU Measurement
41 Facility with additional information, that allows to distinguish
42 guest(s) and host when using the kernel based virtual machine
43 functionality.
44
45 If unsure, say N.
46
36# OK, it's a little counter-intuitive to do this, but it puts it neatly under 47# OK, it's a little counter-intuitive to do this, but it puts it neatly under
37# the virtualization menu. 48# the virtualization menu.
38source drivers/vhost/Kconfig 49source drivers/vhost/Kconfig
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index 934fd6a885f6..31646bd0e469 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -1,20 +1,60 @@
1/* 1/*
2 * sie64a.S - low level sie call 2 * sie64a.S - low level sie call
3 * 3 *
4 * Copyright IBM Corp. 2008 4 * Copyright IBM Corp. 2008,2010
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only) 7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation. 8 * as published by the Free Software Foundation.
9 * 9 *
10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> 10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
11 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
11 */ 12 */
12 13
13#include <linux/errno.h> 14#include <linux/errno.h>
14#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/setup.h>
17#include <asm/asm-offsets.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20
21_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
22
23/*
24 * offsets into stackframe
25 * SP_ = offsets into stack sie64 is called with
26 * SPI_ = offsets into irq stack
27 */
28SP_GREGS = __SF_EMPTY
29SP_HOOK = __SF_EMPTY+8
30SP_GPP = __SF_EMPTY+16
31SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
32
15 33
16SP_R5 = 5 * 8 # offset into stackframe 34 .macro SPP newpp
17SP_R6 = 6 * 8 35#ifdef CONFIG_KVM_AWARE_CMF
36 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
37 jz 0f
38 .insn s,0xb2800000,\newpp
39 0:
40#endif
41 .endm
42
43sie_irq_handler:
44 SPP __LC_CMF_HPP # set host id
45 larl %r2,sie_inst
46 clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
47 jne 1f
48 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
49 lg %r2,__LC_THREAD_INFO # pointer thread_info struct
50 tm __TI_flags+7(%r2),_TIF_EXIT_SIE
51 jz 0f
52 larl %r2,sie_exit # work pending, leave sie
53 stg %r2,__LC_RETURN_PSW+8
54 br %r14
550: larl %r2,sie_reenter # re-enter with guest id
56 stg %r2,__LC_RETURN_PSW+8
571: br %r14
18 58
19/* 59/*
20 * sie64a calling convention: 60 * sie64a calling convention:
@@ -23,23 +63,34 @@ SP_R6 = 6 * 8
23 */ 63 */
24 .globl sie64a 64 .globl sie64a
25sie64a: 65sie64a:
26 lgr %r5,%r3 66 stg %r3,SP_GREGS(%r15) # save guest register save area
27 stmg %r5,%r14,SP_R5(%r15) # save register on entry 67 stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
28 lgr %r14,%r2 # pointer to sie control block 68 lgr %r14,%r2 # pointer to sie control block
29 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 69 larl %r5,sie_irq_handler
70 stg %r2,SP_GPP(%r15)
71 stg %r5,SP_HOOK(%r15) # save hook target
72 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
73sie_reenter:
74 mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
75 SPP SP_GPP(%r15) # set guest id
30sie_inst: 76sie_inst:
31 sie 0(%r14) 77 sie 0(%r14)
32 lg %r14,SP_R5(%r15) 78 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
33 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 79 SPP __LC_CMF_HPP # set host id
80sie_exit:
81 lg %r14,SP_GREGS(%r15)
82 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
34 lghi %r2,0 83 lghi %r2,0
35 lmg %r6,%r14,SP_R6(%r15) 84 lmg %r6,%r14,__SF_GPRS(%r15)
36 br %r14 85 br %r14
37 86
38sie_err: 87sie_err:
39 lg %r14,SP_R5(%r15) 88 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
40 stmg %r0,%r13,0(%r14) # save guest gprs 0-13 89 SPP __LC_CMF_HPP # set host id
90 lg %r14,SP_GREGS(%r15)
91 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
41 lghi %r2,-EFAULT 92 lghi %r2,-EFAULT
42 lmg %r6,%r14,SP_R6(%r15) 93 lmg %r6,%r14,__SF_GPRS(%r15)
43 br %r14 94 br %r14
44 95
45 .section __ex_table,"a" 96 .section __ex_table,"a"
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3040d7c78fe0..2505b2ea0ef1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -48,10 +48,6 @@
48#define __PF_RES_FIELD 0x8000000000000000ULL 48#define __PF_RES_FIELD 0x8000000000000000ULL
49#endif /* CONFIG_64BIT */ 49#endif /* CONFIG_64BIT */
50 50
51#ifdef CONFIG_SYSCTL
52extern int sysctl_userprocess_debug;
53#endif
54
55#define VM_FAULT_BADCONTEXT 0x010000 51#define VM_FAULT_BADCONTEXT 0x010000
56#define VM_FAULT_BADMAP 0x020000 52#define VM_FAULT_BADMAP 0x020000
57#define VM_FAULT_BADACCESS 0x040000 53#define VM_FAULT_BADACCESS 0x040000
@@ -120,6 +116,22 @@ static inline int user_space_fault(unsigned long trans_exc_code)
120 return trans_exc_code != 3; 116 return trans_exc_code != 3;
121} 117}
122 118
119static inline void report_user_fault(struct pt_regs *regs, long int_code,
120 int signr, unsigned long address)
121{
122 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
123 return;
124 if (!unhandled_signal(current, signr))
125 return;
126 if (!printk_ratelimit())
127 return;
128 printk("User process fault: interruption code 0x%lX ", int_code);
129 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
130 printk("\n");
131 printk("failing address: %lX\n", address);
132 show_regs(regs);
133}
134
123/* 135/*
124 * Send SIGSEGV to task. This is an external routine 136 * Send SIGSEGV to task. This is an external routine
125 * to keep the stack usage of do_page_fault small. 137 * to keep the stack usage of do_page_fault small.
@@ -133,17 +145,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
133 address = trans_exc_code & __FAIL_ADDR_MASK; 145 address = trans_exc_code & __FAIL_ADDR_MASK;
134 current->thread.prot_addr = address; 146 current->thread.prot_addr = address;
135 current->thread.trap_no = int_code; 147 current->thread.trap_no = int_code;
136#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 148 report_user_fault(regs, int_code, SIGSEGV, address);
137#if defined(CONFIG_SYSCTL)
138 if (sysctl_userprocess_debug)
139#endif
140 {
141 printk("User process fault: interruption code 0x%lX\n",
142 int_code);
143 printk("failing address: %lX\n", address);
144 show_regs(regs);
145 }
146#endif
147 si.si_signo = SIGSEGV; 149 si.si_signo = SIGSEGV;
148 si.si_code = si_code; 150 si.si_code = si_code;
149 si.si_addr = (void __user *) address; 151 si.si_addr = (void __user *) address;