aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-24 12:55:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-24 12:55:45 -0400
commit21c7075fa5a756f1c95f6b463ff42cd320cc0301 (patch)
tree69524dd01fbebe662abe3b7296664592d3ce562b /arch/s390
parentff0c4ad2c3a75ccfe6adca916e50804eb45bb2d9 (diff)
parent73b7d40ff1bcd44b4245c2714b88cf872fe44685 (diff)
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (21 commits) [S390] use siginfo for sigtrap signals [S390] dasd: add enhanced DASD statistics interface [S390] kvm: make sigp emerg smp capable [S390] disable cpu measurement alerts on a dying cpu [S390] initial cr0 bits [S390] iucv cr0 enablement bit [S390] race safe external interrupt registration [S390] remove tape block docu [S390] ap: toleration support for ap device type 10 [S390] cleanup program check handler prototypes [S390] remove kvm mmu reload on s390 [S390] Use gmap translation for accessing guest memory [S390] use gmap address spaces for kvm guest images [S390] kvm guest address space mapping [S390] fix s390 assembler code alignments [S390] move sie code to entry.S [S390] kvm: handle tprot intercepts [S390] qdio: clear shared DSCI before scheduling the queue handler [S390] reference bit testing for unmapped pages [S390] irqs: Do not trace arch_local_{*,irq_*} functions ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/boot/compressed/head31.S4
-rw-r--r--arch/s390/boot/compressed/head64.S4
-rw-r--r--arch/s390/include/asm/irqflags.h16
-rw-r--r--arch/s390/include/asm/kvm_host.h12
-rw-r--r--arch/s390/include/asm/linkage.h5
-rw-r--r--arch/s390/include/asm/lowcore.h2
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h7
-rw-r--r--arch/s390/include/asm/pgtable.h42
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c2
-rw-r--r--arch/s390/kernel/base.S25
-rw-r--r--arch/s390/kernel/compat_wrapper.S836
-rw-r--r--arch/s390/kernel/entry.S32
-rw-r--r--arch/s390/kernel/entry.h7
-rw-r--r--arch/s390/kernel/entry64.S111
-rw-r--r--arch/s390/kernel/head.S7
-rw-r--r--arch/s390/kernel/head31.S13
-rw-r--r--arch/s390/kernel/head64.S13
-rw-r--r--arch/s390/kernel/irq.c83
-rw-r--r--arch/s390/kernel/mcount.S16
-rw-r--r--arch/s390/kernel/mcount64.S16
-rw-r--r--arch/s390/kernel/reipl.S5
-rw-r--r--arch/s390/kernel/reipl64.S5
-rw-r--r--arch/s390/kernel/relocate_kernel.S6
-rw-r--r--arch/s390/kernel/relocate_kernel64.S6
-rw-r--r--arch/s390/kernel/s390_ksyms.c4
-rw-r--r--arch/s390/kernel/sclp.S5
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/switch_cpu.S8
-rw-r--r--arch/s390/kernel/switch_cpu64.S8
-rw-r--r--arch/s390/kernel/swsusp_asm64.S8
-rw-r--r--arch/s390/kernel/traps.c36
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/gaccess.h243
-rw-r--r--arch/s390/kvm/intercept.c35
-rw-r--r--arch/s390/kvm/interrupt.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c48
-rw-r--r--arch/s390/kvm/kvm-s390.h28
-rw-r--r--arch/s390/kvm/priv.c49
-rw-r--r--arch/s390/kvm/sie64a.S98
-rw-r--r--arch/s390/kvm/sigp.c6
-rw-r--r--arch/s390/lib/qrnnd.S5
-rw-r--r--arch/s390/mm/fault.c18
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/pgtable.c421
-rw-r--r--arch/s390/mm/vmem.c8
49 files changed, 1301 insertions, 1022 deletions
diff --git a/arch/s390/boot/compressed/head31.S b/arch/s390/boot/compressed/head31.S
index 2a5523a32bcc..e8c9e18b8039 100644
--- a/arch/s390/boot/compressed/head31.S
+++ b/arch/s390/boot/compressed/head31.S
@@ -7,14 +7,14 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/linkage.h>
10#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
11#include <asm/thread_info.h> 12#include <asm/thread_info.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include "sizes.h" 14#include "sizes.h"
14 15
15__HEAD 16__HEAD
16 .globl startup_continue 17ENTRY(startup_continue)
17startup_continue:
18 basr %r13,0 # get base 18 basr %r13,0 # get base
19.LPG1: 19.LPG1:
20 # setup stack 20 # setup stack
diff --git a/arch/s390/boot/compressed/head64.S b/arch/s390/boot/compressed/head64.S
index 2982cb140550..f86a4eef28a9 100644
--- a/arch/s390/boot/compressed/head64.S
+++ b/arch/s390/boot/compressed/head64.S
@@ -7,14 +7,14 @@
7 */ 7 */
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/linkage.h>
10#include <asm/asm-offsets.h> 11#include <asm/asm-offsets.h>
11#include <asm/thread_info.h> 12#include <asm/thread_info.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include "sizes.h" 14#include "sizes.h"
14 15
15__HEAD 16__HEAD
16 .globl startup_continue 17ENTRY(startup_continue)
17startup_continue:
18 basr %r13,0 # get base 18 basr %r13,0 # get base
19.LPG1: 19.LPG1:
20 # setup stack 20 # setup stack
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 865d6d891ace..38fdf451febb 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -29,42 +29,42 @@
29}) 29})
30 30
31/* set system mask. */ 31/* set system mask. */
32static inline void __arch_local_irq_ssm(unsigned long flags) 32static inline notrace void __arch_local_irq_ssm(unsigned long flags)
33{ 33{
34 asm volatile("ssm %0" : : "Q" (flags) : "memory"); 34 asm volatile("ssm %0" : : "Q" (flags) : "memory");
35} 35}
36 36
37static inline unsigned long arch_local_save_flags(void) 37static inline notrace unsigned long arch_local_save_flags(void)
38{ 38{
39 return __arch_local_irq_stosm(0x00); 39 return __arch_local_irq_stosm(0x00);
40} 40}
41 41
42static inline unsigned long arch_local_irq_save(void) 42static inline notrace unsigned long arch_local_irq_save(void)
43{ 43{
44 return __arch_local_irq_stnsm(0xfc); 44 return __arch_local_irq_stnsm(0xfc);
45} 45}
46 46
47static inline void arch_local_irq_disable(void) 47static inline notrace void arch_local_irq_disable(void)
48{ 48{
49 arch_local_irq_save(); 49 arch_local_irq_save();
50} 50}
51 51
52static inline void arch_local_irq_enable(void) 52static inline notrace void arch_local_irq_enable(void)
53{ 53{
54 __arch_local_irq_stosm(0x03); 54 __arch_local_irq_stosm(0x03);
55} 55}
56 56
57static inline void arch_local_irq_restore(unsigned long flags) 57static inline notrace void arch_local_irq_restore(unsigned long flags)
58{ 58{
59 __arch_local_irq_ssm(flags); 59 __arch_local_irq_ssm(flags);
60} 60}
61 61
62static inline bool arch_irqs_disabled_flags(unsigned long flags) 62static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
63{ 63{
64 return !(flags & (3UL << (BITS_PER_LONG - 8))); 64 return !(flags & (3UL << (BITS_PER_LONG - 8)));
65} 65}
66 66
67static inline bool arch_irqs_disabled(void) 67static inline notrace bool arch_irqs_disabled(void)
68{ 68{
69 return arch_irqs_disabled_flags(arch_local_save_flags()); 69 return arch_irqs_disabled_flags(arch_local_save_flags());
70} 70}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index cef7dbf69dfc..00ff00dfb24c 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -93,9 +93,7 @@ struct kvm_s390_sie_block {
93 __u32 scaol; /* 0x0064 */ 93 __u32 scaol; /* 0x0064 */
94 __u8 reserved68[4]; /* 0x0068 */ 94 __u8 reserved68[4]; /* 0x0068 */
95 __u32 todpr; /* 0x006c */ 95 __u32 todpr; /* 0x006c */
96 __u8 reserved70[16]; /* 0x0070 */ 96 __u8 reserved70[32]; /* 0x0070 */
97 __u64 gmsor; /* 0x0080 */
98 __u64 gmslm; /* 0x0088 */
99 psw_t gpsw; /* 0x0090 */ 97 psw_t gpsw; /* 0x0090 */
100 __u64 gg14; /* 0x00a0 */ 98 __u64 gg14; /* 0x00a0 */
101 __u64 gg15; /* 0x00a8 */ 99 __u64 gg15; /* 0x00a8 */
@@ -138,6 +136,7 @@ struct kvm_vcpu_stat {
138 u32 instruction_chsc; 136 u32 instruction_chsc;
139 u32 instruction_stsi; 137 u32 instruction_stsi;
140 u32 instruction_stfl; 138 u32 instruction_stfl;
139 u32 instruction_tprot;
141 u32 instruction_sigp_sense; 140 u32 instruction_sigp_sense;
142 u32 instruction_sigp_emergency; 141 u32 instruction_sigp_emergency;
143 u32 instruction_sigp_stop; 142 u32 instruction_sigp_stop;
@@ -175,6 +174,10 @@ struct kvm_s390_prefix_info {
175 __u32 address; 174 __u32 address;
176}; 175};
177 176
177struct kvm_s390_emerg_info {
178 __u16 code;
179};
180
178struct kvm_s390_interrupt_info { 181struct kvm_s390_interrupt_info {
179 struct list_head list; 182 struct list_head list;
180 u64 type; 183 u64 type;
@@ -182,6 +185,7 @@ struct kvm_s390_interrupt_info {
182 struct kvm_s390_io_info io; 185 struct kvm_s390_io_info io;
183 struct kvm_s390_ext_info ext; 186 struct kvm_s390_ext_info ext;
184 struct kvm_s390_pgm_info pgm; 187 struct kvm_s390_pgm_info pgm;
188 struct kvm_s390_emerg_info emerg;
185 struct kvm_s390_prefix_info prefix; 189 struct kvm_s390_prefix_info prefix;
186 }; 190 };
187}; 191};
@@ -226,6 +230,7 @@ struct kvm_vcpu_arch {
226 struct cpuid cpu_id; 230 struct cpuid cpu_id;
227 u64 stidp_data; 231 u64 stidp_data;
228 }; 232 };
233 struct gmap *gmap;
229}; 234};
230 235
231struct kvm_vm_stat { 236struct kvm_vm_stat {
@@ -236,6 +241,7 @@ struct kvm_arch{
236 struct sca_block *sca; 241 struct sca_block *sca;
237 debug_info_t *dbf; 242 debug_info_t *dbf;
238 struct kvm_s390_float_interrupt float_int; 243 struct kvm_s390_float_interrupt float_int;
244 struct gmap *gmap;
239}; 245};
240 246
241extern int sie64a(struct kvm_s390_sie_block *, unsigned long *); 247extern int sie64a(struct kvm_s390_sie_block *, unsigned long *);
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index 291c2d01c44f..fc8a8284778e 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -1,6 +1,9 @@
1#ifndef __ASM_LINKAGE_H 1#ifndef __ASM_LINKAGE_H
2#define __ASM_LINKAGE_H 2#define __ASM_LINKAGE_H
3 3
4/* Nothing to see here... */ 4#include <linux/stringify.h>
5
6#define __ALIGN .align 4, 0x07
7#define __ALIGN_STR __stringify(__ALIGN)
5 8
6#endif 9#endif
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 228cf0b295db..f26280d9e88d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -268,7 +268,7 @@ struct _lowcore {
268 __u64 vdso_per_cpu_data; /* 0x0358 */ 268 __u64 vdso_per_cpu_data; /* 0x0358 */
269 __u64 machine_flags; /* 0x0360 */ 269 __u64 machine_flags; /* 0x0360 */
270 __u64 ftrace_func; /* 0x0368 */ 270 __u64 ftrace_func; /* 0x0368 */
271 __u64 sie_hook; /* 0x0370 */ 271 __u64 gmap; /* 0x0370 */
272 __u64 cmf_hpp; /* 0x0378 */ 272 __u64 cmf_hpp; /* 0x0378 */
273 273
274 /* Interrupt response block. */ 274 /* Interrupt response block. */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 82d0847896a0..4506791adcd5 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,6 +6,7 @@ typedef struct {
6 unsigned int flush_mm; 6 unsigned int flush_mm;
7 spinlock_t list_lock; 7 spinlock_t list_lock;
8 struct list_head pgtable_list; 8 struct list_head pgtable_list;
9 struct list_head gmap_list;
9 unsigned long asce_bits; 10 unsigned long asce_bits;
10 unsigned long asce_limit; 11 unsigned long asce_limit;
11 unsigned long vdso_base; 12 unsigned long vdso_base;
@@ -17,6 +18,7 @@ typedef struct {
17 18
18#define INIT_MM_CONTEXT(name) \ 19#define INIT_MM_CONTEXT(name) \
19 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ 20 .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
20 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), 21 .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
22 .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
21 23
22#endif 24#endif
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 38e71ebcd3c2..8eef9b5b3cf4 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -20,7 +20,7 @@
20unsigned long *crst_table_alloc(struct mm_struct *); 20unsigned long *crst_table_alloc(struct mm_struct *);
21void crst_table_free(struct mm_struct *, unsigned long *); 21void crst_table_free(struct mm_struct *, unsigned long *);
22 22
23unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
24void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
25#ifdef CONFIG_HAVE_RCU_TABLE_FREE 25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *); 26void page_table_free_rcu(struct mmu_gather *, unsigned long *);
@@ -115,6 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
115{ 115{
116 spin_lock_init(&mm->context.list_lock); 116 spin_lock_init(&mm->context.list_lock);
117 INIT_LIST_HEAD(&mm->context.pgtable_list); 117 INIT_LIST_HEAD(&mm->context.pgtable_list);
118 INIT_LIST_HEAD(&mm->context.gmap_list);
118 return (pgd_t *) crst_table_alloc(mm); 119 return (pgd_t *) crst_table_alloc(mm);
119} 120}
120#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd) 121#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
@@ -133,8 +134,8 @@ static inline void pmd_populate(struct mm_struct *mm,
133/* 134/*
134 * page table entry allocation/free routines. 135 * page table entry allocation/free routines.
135 */ 136 */
136#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 137#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
137#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm)) 138#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
138 139
139#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte) 140#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
140#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte) 141#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 801fbe1d837d..519eb5f187ef 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -654,6 +654,48 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste)
654#endif 654#endif
655} 655}
656 656
657/**
658 * struct gmap_struct - guest address space
659 * @mm: pointer to the parent mm_struct
660 * @table: pointer to the page directory
661 * @crst_list: list of all crst tables used in the guest address space
662 */
663struct gmap {
664 struct list_head list;
665 struct mm_struct *mm;
666 unsigned long *table;
667 struct list_head crst_list;
668};
669
670/**
671 * struct gmap_rmap - reverse mapping for segment table entries
672 * @next: pointer to the next gmap_rmap structure in the list
673 * @entry: pointer to a segment table entry
674 */
675struct gmap_rmap {
676 struct list_head list;
677 unsigned long *entry;
678};
679
680/**
681 * struct gmap_pgtable - gmap information attached to a page table
682 * @vmaddr: address of the 1MB segment in the process virtual memory
683 * @mapper: list of segment table entries maping a page table
684 */
685struct gmap_pgtable {
686 unsigned long vmaddr;
687 struct list_head mapper;
688};
689
690struct gmap *gmap_alloc(struct mm_struct *mm);
691void gmap_free(struct gmap *gmap);
692void gmap_enable(struct gmap *gmap);
693void gmap_disable(struct gmap *gmap);
694int gmap_map_segment(struct gmap *gmap, unsigned long from,
695 unsigned long to, unsigned long length);
696int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
697unsigned long gmap_fault(unsigned long address, struct gmap *);
698
657/* 699/*
658 * Certain architectures need to do special things when PTEs 700 * Certain architectures need to do special things when PTEs
659 * within a page table are directly modified. Thus, the following 701 * within a page table are directly modified. Thus, the following
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 1300c3025334..55dfcc8bdc0d 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -80,6 +80,7 @@ struct thread_struct {
80 mm_segment_t mm_segment; 80 mm_segment_t mm_segment;
81 unsigned long prot_addr; /* address of protection-excep. */ 81 unsigned long prot_addr; /* address of protection-excep. */
82 unsigned int trap_no; 82 unsigned int trap_no;
83 unsigned long gmap_addr; /* address of last gmap fault. */
83 struct per_regs per_user; /* User specified PER registers */ 84 struct per_regs per_user; /* User specified PER registers */
84 struct per_event per_event; /* Cause of the last PER trap */ 85 struct per_event per_event; /* Cause of the last PER trap */
85 /* pfault_wait is used to block the process on a pfault event */ 86 /* pfault_wait is used to block the process on a pfault event */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index ad1382f7932e..1a5dbb6f1495 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -94,6 +94,7 @@ static inline struct thread_info *current_thread_info(void)
94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ 94#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
95#define TIF_SECCOMP 10 /* secure computing */ 95#define TIF_SECCOMP 10 /* secure computing */
96#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */ 96#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
97#define TIF_SIE 12 /* guest execution active */
97#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling 98#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
98 TIF_NEED_RESCHED */ 99 TIF_NEED_RESCHED */
99#define TIF_31BIT 17 /* 32bit process */ 100#define TIF_31BIT 17 /* 32bit process */
@@ -113,6 +114,7 @@ static inline struct thread_info *current_thread_info(void)
113#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) 114#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
114#define _TIF_SECCOMP (1<<TIF_SECCOMP) 115#define _TIF_SECCOMP (1<<TIF_SECCOMP)
115#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 116#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
117#define _TIF_SIE (1<<TIF_SIE)
116#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 118#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
117#define _TIF_31BIT (1<<TIF_31BIT) 119#define _TIF_31BIT (1<<TIF_31BIT)
118#define _TIF_SINGLE_STEP (1<<TIF_FREEZE) 120#define _TIF_SINGLE_STEP (1<<TIF_FREEZE)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index b7a4f2eb0057..304445382382 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -80,7 +80,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
80 * on all cpus instead of doing a local flush if the mm 80 * on all cpus instead of doing a local flush if the mm
81 * only ran on the local cpu. 81 * only ran on the local cpu.
82 */ 82 */
83 if (MACHINE_HAS_IDTE) 83 if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
84 __tlb_flush_idte((unsigned long) mm->pgd | 84 __tlb_flush_idte((unsigned long) mm->pgd |
85 mm->context.asce_bits); 85 mm->context.asce_bits);
86 else 86 else
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index edfbd17d7082..05d8f38734ec 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -151,7 +151,7 @@ int main(void)
151 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); 151 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
152 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 152 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
153 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 153 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
154 DEFINE(__LC_SIE_HOOK, offsetof(struct _lowcore, sie_hook)); 154 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
155 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); 155 DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp));
156#endif /* CONFIG_32BIT */ 156#endif /* CONFIG_32BIT */
157 return 0; 157 return 0;
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 15e46ca94335..209938c1dfc8 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -6,13 +6,13 @@
6 * Michael Holzheu <holzheu@de.ibm.com> 6 * Michael Holzheu <holzheu@de.ibm.com>
7 */ 7 */
8 8
9#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
10#include <asm/ptrace.h> 11#include <asm/ptrace.h>
11 12
12#ifdef CONFIG_64BIT 13#ifdef CONFIG_64BIT
13 14
14 .globl s390_base_mcck_handler 15ENTRY(s390_base_mcck_handler)
15s390_base_mcck_handler:
16 basr %r13,0 16 basr %r13,0
170: lg %r15,__LC_PANIC_STACK # load panic stack 170: lg %r15,__LC_PANIC_STACK # load panic stack
18 aghi %r15,-STACK_FRAME_OVERHEAD 18 aghi %r15,-STACK_FRAME_OVERHEAD
@@ -26,13 +26,13 @@ s390_base_mcck_handler:
26 lpswe __LC_MCK_OLD_PSW 26 lpswe __LC_MCK_OLD_PSW
27 27
28 .section .bss 28 .section .bss
29 .align 8
29 .globl s390_base_mcck_handler_fn 30 .globl s390_base_mcck_handler_fn
30s390_base_mcck_handler_fn: 31s390_base_mcck_handler_fn:
31 .quad 0 32 .quad 0
32 .previous 33 .previous
33 34
34 .globl s390_base_ext_handler 35ENTRY(s390_base_ext_handler)
35s390_base_ext_handler:
36 stmg %r0,%r15,__LC_SAVE_AREA 36 stmg %r0,%r15,__LC_SAVE_AREA
37 basr %r13,0 37 basr %r13,0
380: aghi %r15,-STACK_FRAME_OVERHEAD 380: aghi %r15,-STACK_FRAME_OVERHEAD
@@ -46,13 +46,13 @@ s390_base_ext_handler:
46 lpswe __LC_EXT_OLD_PSW 46 lpswe __LC_EXT_OLD_PSW
47 47
48 .section .bss 48 .section .bss
49 .align 8
49 .globl s390_base_ext_handler_fn 50 .globl s390_base_ext_handler_fn
50s390_base_ext_handler_fn: 51s390_base_ext_handler_fn:
51 .quad 0 52 .quad 0
52 .previous 53 .previous
53 54
54 .globl s390_base_pgm_handler 55ENTRY(s390_base_pgm_handler)
55s390_base_pgm_handler:
56 stmg %r0,%r15,__LC_SAVE_AREA 56 stmg %r0,%r15,__LC_SAVE_AREA
57 basr %r13,0 57 basr %r13,0
580: aghi %r15,-STACK_FRAME_OVERHEAD 580: aghi %r15,-STACK_FRAME_OVERHEAD
@@ -70,6 +70,7 @@ disabled_wait_psw:
70 .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler 70 .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
71 71
72 .section .bss 72 .section .bss
73 .align 8
73 .globl s390_base_pgm_handler_fn 74 .globl s390_base_pgm_handler_fn
74s390_base_pgm_handler_fn: 75s390_base_pgm_handler_fn:
75 .quad 0 76 .quad 0
@@ -77,8 +78,7 @@ s390_base_pgm_handler_fn:
77 78
78#else /* CONFIG_64BIT */ 79#else /* CONFIG_64BIT */
79 80
80 .globl s390_base_mcck_handler 81ENTRY(s390_base_mcck_handler)
81s390_base_mcck_handler:
82 basr %r13,0 82 basr %r13,0
830: l %r15,__LC_PANIC_STACK # load panic stack 830: l %r15,__LC_PANIC_STACK # load panic stack
84 ahi %r15,-STACK_FRAME_OVERHEAD 84 ahi %r15,-STACK_FRAME_OVERHEAD
@@ -93,13 +93,13 @@ s390_base_mcck_handler:
932: .long s390_base_mcck_handler_fn 932: .long s390_base_mcck_handler_fn
94 94
95 .section .bss 95 .section .bss
96 .align 4
96 .globl s390_base_mcck_handler_fn 97 .globl s390_base_mcck_handler_fn
97s390_base_mcck_handler_fn: 98s390_base_mcck_handler_fn:
98 .long 0 99 .long 0
99 .previous 100 .previous
100 101
101 .globl s390_base_ext_handler 102ENTRY(s390_base_ext_handler)
102s390_base_ext_handler:
103 stm %r0,%r15,__LC_SAVE_AREA 103 stm %r0,%r15,__LC_SAVE_AREA
104 basr %r13,0 104 basr %r13,0
1050: ahi %r15,-STACK_FRAME_OVERHEAD 1050: ahi %r15,-STACK_FRAME_OVERHEAD
@@ -115,13 +115,13 @@ s390_base_ext_handler:
1152: .long s390_base_ext_handler_fn 1152: .long s390_base_ext_handler_fn
116 116
117 .section .bss 117 .section .bss
118 .align 4
118 .globl s390_base_ext_handler_fn 119 .globl s390_base_ext_handler_fn
119s390_base_ext_handler_fn: 120s390_base_ext_handler_fn:
120 .long 0 121 .long 0
121 .previous 122 .previous
122 123
123 .globl s390_base_pgm_handler 124ENTRY(s390_base_pgm_handler)
124s390_base_pgm_handler:
125 stm %r0,%r15,__LC_SAVE_AREA 125 stm %r0,%r15,__LC_SAVE_AREA
126 basr %r13,0 126 basr %r13,0
1270: ahi %r15,-STACK_FRAME_OVERHEAD 1270: ahi %r15,-STACK_FRAME_OVERHEAD
@@ -142,6 +142,7 @@ disabled_wait_psw:
142 .long 0x000a0000,0x00000000 + s390_base_pgm_handler 142 .long 0x000a0000,0x00000000 + s390_base_pgm_handler
143 143
144 .section .bss 144 .section .bss
145 .align 4
145 .globl s390_base_pgm_handler_fn 146 .globl s390_base_pgm_handler_fn
146s390_base_pgm_handler_fn: 147s390_base_pgm_handler_fn:
147 .long 0 148 .long 0
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 1f5eb789c3a7..08ab9aa6a0d5 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -7,86 +7,74 @@
7* Thomas Spatzier (tspat@de.ibm.com) 7* Thomas Spatzier (tspat@de.ibm.com)
8*/ 8*/
9 9
10 .globl sys32_exit_wrapper 10#include <linux/linkage.h>
11sys32_exit_wrapper: 11
12ENTRY(sys32_exit_wrapper)
12 lgfr %r2,%r2 # int 13 lgfr %r2,%r2 # int
13 jg sys_exit # branch to sys_exit 14 jg sys_exit # branch to sys_exit
14 15
15 .globl sys32_read_wrapper 16ENTRY(sys32_read_wrapper)
16sys32_read_wrapper:
17 llgfr %r2,%r2 # unsigned int 17 llgfr %r2,%r2 # unsigned int
18 llgtr %r3,%r3 # char * 18 llgtr %r3,%r3 # char *
19 llgfr %r4,%r4 # size_t 19 llgfr %r4,%r4 # size_t
20 jg sys32_read # branch to sys_read 20 jg sys32_read # branch to sys_read
21 21
22 .globl sys32_write_wrapper 22ENTRY(sys32_write_wrapper)
23sys32_write_wrapper:
24 llgfr %r2,%r2 # unsigned int 23 llgfr %r2,%r2 # unsigned int
25 llgtr %r3,%r3 # const char * 24 llgtr %r3,%r3 # const char *
26 llgfr %r4,%r4 # size_t 25 llgfr %r4,%r4 # size_t
27 jg sys32_write # branch to system call 26 jg sys32_write # branch to system call
28 27
29 .globl sys32_open_wrapper 28ENTRY(sys32_open_wrapper)
30sys32_open_wrapper:
31 llgtr %r2,%r2 # const char * 29 llgtr %r2,%r2 # const char *
32 lgfr %r3,%r3 # int 30 lgfr %r3,%r3 # int
33 lgfr %r4,%r4 # int 31 lgfr %r4,%r4 # int
34 jg sys_open # branch to system call 32 jg sys_open # branch to system call
35 33
36 .globl sys32_close_wrapper 34ENTRY(sys32_close_wrapper)
37sys32_close_wrapper:
38 llgfr %r2,%r2 # unsigned int 35 llgfr %r2,%r2 # unsigned int
39 jg sys_close # branch to system call 36 jg sys_close # branch to system call
40 37
41 .globl sys32_creat_wrapper 38ENTRY(sys32_creat_wrapper)
42sys32_creat_wrapper:
43 llgtr %r2,%r2 # const char * 39 llgtr %r2,%r2 # const char *
44 lgfr %r3,%r3 # int 40 lgfr %r3,%r3 # int
45 jg sys_creat # branch to system call 41 jg sys_creat # branch to system call
46 42
47 .globl sys32_link_wrapper 43ENTRY(sys32_link_wrapper)
48sys32_link_wrapper:
49 llgtr %r2,%r2 # const char * 44 llgtr %r2,%r2 # const char *
50 llgtr %r3,%r3 # const char * 45 llgtr %r3,%r3 # const char *
51 jg sys_link # branch to system call 46 jg sys_link # branch to system call
52 47
53 .globl sys32_unlink_wrapper 48ENTRY(sys32_unlink_wrapper)
54sys32_unlink_wrapper:
55 llgtr %r2,%r2 # const char * 49 llgtr %r2,%r2 # const char *
56 jg sys_unlink # branch to system call 50 jg sys_unlink # branch to system call
57 51
58 .globl sys32_chdir_wrapper 52ENTRY(sys32_chdir_wrapper)
59sys32_chdir_wrapper:
60 llgtr %r2,%r2 # const char * 53 llgtr %r2,%r2 # const char *
61 jg sys_chdir # branch to system call 54 jg sys_chdir # branch to system call
62 55
63 .globl sys32_time_wrapper 56ENTRY(sys32_time_wrapper)
64sys32_time_wrapper:
65 llgtr %r2,%r2 # int * 57 llgtr %r2,%r2 # int *
66 jg compat_sys_time # branch to system call 58 jg compat_sys_time # branch to system call
67 59
68 .globl sys32_mknod_wrapper 60ENTRY(sys32_mknod_wrapper)
69sys32_mknod_wrapper:
70 llgtr %r2,%r2 # const char * 61 llgtr %r2,%r2 # const char *
71 lgfr %r3,%r3 # int 62 lgfr %r3,%r3 # int
72 llgfr %r4,%r4 # dev 63 llgfr %r4,%r4 # dev
73 jg sys_mknod # branch to system call 64 jg sys_mknod # branch to system call
74 65
75 .globl sys32_chmod_wrapper 66ENTRY(sys32_chmod_wrapper)
76sys32_chmod_wrapper:
77 llgtr %r2,%r2 # const char * 67 llgtr %r2,%r2 # const char *
78 llgfr %r3,%r3 # mode_t 68 llgfr %r3,%r3 # mode_t
79 jg sys_chmod # branch to system call 69 jg sys_chmod # branch to system call
80 70
81 .globl sys32_lchown16_wrapper 71ENTRY(sys32_lchown16_wrapper)
82sys32_lchown16_wrapper:
83 llgtr %r2,%r2 # const char * 72 llgtr %r2,%r2 # const char *
84 llgfr %r3,%r3 # __kernel_old_uid_emu31_t 73 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
85 llgfr %r4,%r4 # __kernel_old_uid_emu31_t 74 llgfr %r4,%r4 # __kernel_old_uid_emu31_t
86 jg sys32_lchown16 # branch to system call 75 jg sys32_lchown16 # branch to system call
87 76
88 .globl sys32_lseek_wrapper 77ENTRY(sys32_lseek_wrapper)
89sys32_lseek_wrapper:
90 llgfr %r2,%r2 # unsigned int 78 llgfr %r2,%r2 # unsigned int
91 lgfr %r3,%r3 # off_t 79 lgfr %r3,%r3 # off_t
92 llgfr %r4,%r4 # unsigned int 80 llgfr %r4,%r4 # unsigned int
@@ -94,8 +82,7 @@ sys32_lseek_wrapper:
94 82
95#sys32_getpid_wrapper # void 83#sys32_getpid_wrapper # void
96 84
97 .globl sys32_mount_wrapper 85ENTRY(sys32_mount_wrapper)
98sys32_mount_wrapper:
99 llgtr %r2,%r2 # char * 86 llgtr %r2,%r2 # char *
100 llgtr %r3,%r3 # char * 87 llgtr %r3,%r3 # char *
101 llgtr %r4,%r4 # char * 88 llgtr %r4,%r4 # char *
@@ -103,102 +90,85 @@ sys32_mount_wrapper:
103 llgtr %r6,%r6 # void * 90 llgtr %r6,%r6 # void *
104 jg compat_sys_mount # branch to system call 91 jg compat_sys_mount # branch to system call
105 92
106 .globl sys32_oldumount_wrapper 93ENTRY(sys32_oldumount_wrapper)
107sys32_oldumount_wrapper:
108 llgtr %r2,%r2 # char * 94 llgtr %r2,%r2 # char *
109 jg sys_oldumount # branch to system call 95 jg sys_oldumount # branch to system call
110 96
111 .globl sys32_setuid16_wrapper 97ENTRY(sys32_setuid16_wrapper)
112sys32_setuid16_wrapper:
113 llgfr %r2,%r2 # __kernel_old_uid_emu31_t 98 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
114 jg sys32_setuid16 # branch to system call 99 jg sys32_setuid16 # branch to system call
115 100
116#sys32_getuid16_wrapper # void 101#sys32_getuid16_wrapper # void
117 102
118 .globl sys32_ptrace_wrapper 103ENTRY(sys32_ptrace_wrapper)
119sys32_ptrace_wrapper:
120 lgfr %r2,%r2 # long 104 lgfr %r2,%r2 # long
121 lgfr %r3,%r3 # long 105 lgfr %r3,%r3 # long
122 llgtr %r4,%r4 # long 106 llgtr %r4,%r4 # long
123 llgfr %r5,%r5 # long 107 llgfr %r5,%r5 # long
124 jg compat_sys_ptrace # branch to system call 108 jg compat_sys_ptrace # branch to system call
125 109
126 .globl sys32_alarm_wrapper 110ENTRY(sys32_alarm_wrapper)
127sys32_alarm_wrapper:
128 llgfr %r2,%r2 # unsigned int 111 llgfr %r2,%r2 # unsigned int
129 jg sys_alarm # branch to system call 112 jg sys_alarm # branch to system call
130 113
131 .globl compat_sys_utime_wrapper 114ENTRY(compat_sys_utime_wrapper)
132compat_sys_utime_wrapper:
133 llgtr %r2,%r2 # char * 115 llgtr %r2,%r2 # char *
134 llgtr %r3,%r3 # struct compat_utimbuf * 116 llgtr %r3,%r3 # struct compat_utimbuf *
135 jg compat_sys_utime # branch to system call 117 jg compat_sys_utime # branch to system call
136 118
137 .globl sys32_access_wrapper 119ENTRY(sys32_access_wrapper)
138sys32_access_wrapper:
139 llgtr %r2,%r2 # const char * 120 llgtr %r2,%r2 # const char *
140 lgfr %r3,%r3 # int 121 lgfr %r3,%r3 # int
141 jg sys_access # branch to system call 122 jg sys_access # branch to system call
142 123
143 .globl sys32_nice_wrapper 124ENTRY(sys32_nice_wrapper)
144sys32_nice_wrapper:
145 lgfr %r2,%r2 # int 125 lgfr %r2,%r2 # int
146 jg sys_nice # branch to system call 126 jg sys_nice # branch to system call
147 127
148#sys32_sync_wrapper # void 128#sys32_sync_wrapper # void
149 129
150 .globl sys32_kill_wrapper 130ENTRY(sys32_kill_wrapper)
151sys32_kill_wrapper:
152 lgfr %r2,%r2 # int 131 lgfr %r2,%r2 # int
153 lgfr %r3,%r3 # int 132 lgfr %r3,%r3 # int
154 jg sys_kill # branch to system call 133 jg sys_kill # branch to system call
155 134
156 .globl sys32_rename_wrapper 135ENTRY(sys32_rename_wrapper)
157sys32_rename_wrapper:
158 llgtr %r2,%r2 # const char * 136 llgtr %r2,%r2 # const char *
159 llgtr %r3,%r3 # const char * 137 llgtr %r3,%r3 # const char *
160 jg sys_rename # branch to system call 138 jg sys_rename # branch to system call
161 139
162 .globl sys32_mkdir_wrapper 140ENTRY(sys32_mkdir_wrapper)
163sys32_mkdir_wrapper:
164 llgtr %r2,%r2 # const char * 141 llgtr %r2,%r2 # const char *
165 lgfr %r3,%r3 # int 142 lgfr %r3,%r3 # int
166 jg sys_mkdir # branch to system call 143 jg sys_mkdir # branch to system call
167 144
168 .globl sys32_rmdir_wrapper 145ENTRY(sys32_rmdir_wrapper)
169sys32_rmdir_wrapper:
170 llgtr %r2,%r2 # const char * 146 llgtr %r2,%r2 # const char *
171 jg sys_rmdir # branch to system call 147 jg sys_rmdir # branch to system call
172 148
173 .globl sys32_dup_wrapper 149ENTRY(sys32_dup_wrapper)
174sys32_dup_wrapper:
175 llgfr %r2,%r2 # unsigned int 150 llgfr %r2,%r2 # unsigned int
176 jg sys_dup # branch to system call 151 jg sys_dup # branch to system call
177 152
178 .globl sys32_pipe_wrapper 153ENTRY(sys32_pipe_wrapper)
179sys32_pipe_wrapper:
180 llgtr %r2,%r2 # u32 * 154 llgtr %r2,%r2 # u32 *
181 jg sys_pipe # branch to system call 155 jg sys_pipe # branch to system call
182 156
183 .globl compat_sys_times_wrapper 157ENTRY(compat_sys_times_wrapper)
184compat_sys_times_wrapper:
185 llgtr %r2,%r2 # struct compat_tms * 158 llgtr %r2,%r2 # struct compat_tms *
186 jg compat_sys_times # branch to system call 159 jg compat_sys_times # branch to system call
187 160
188 .globl sys32_brk_wrapper 161ENTRY(sys32_brk_wrapper)
189sys32_brk_wrapper:
190 llgtr %r2,%r2 # unsigned long 162 llgtr %r2,%r2 # unsigned long
191 jg sys_brk # branch to system call 163 jg sys_brk # branch to system call
192 164
193 .globl sys32_setgid16_wrapper 165ENTRY(sys32_setgid16_wrapper)
194sys32_setgid16_wrapper:
195 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 166 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
196 jg sys32_setgid16 # branch to system call 167 jg sys32_setgid16 # branch to system call
197 168
198#sys32_getgid16_wrapper # void 169#sys32_getgid16_wrapper # void
199 170
200 .globl sys32_signal_wrapper 171ENTRY(sys32_signal_wrapper)
201sys32_signal_wrapper:
202 lgfr %r2,%r2 # int 172 lgfr %r2,%r2 # int
203 llgtr %r3,%r3 # __sighandler_t 173 llgtr %r3,%r3 # __sighandler_t
204 jg sys_signal 174 jg sys_signal
@@ -207,55 +177,46 @@ sys32_signal_wrapper:
207 177
208#sys32_getegid16_wrapper # void 178#sys32_getegid16_wrapper # void
209 179
210 .globl sys32_acct_wrapper 180ENTRY(sys32_acct_wrapper)
211sys32_acct_wrapper:
212 llgtr %r2,%r2 # char * 181 llgtr %r2,%r2 # char *
213 jg sys_acct # branch to system call 182 jg sys_acct # branch to system call
214 183
215 .globl sys32_umount_wrapper 184ENTRY(sys32_umount_wrapper)
216sys32_umount_wrapper:
217 llgtr %r2,%r2 # char * 185 llgtr %r2,%r2 # char *
218 lgfr %r3,%r3 # int 186 lgfr %r3,%r3 # int
219 jg sys_umount # branch to system call 187 jg sys_umount # branch to system call
220 188
221 .globl compat_sys_ioctl_wrapper 189ENTRY(compat_sys_ioctl_wrapper)
222compat_sys_ioctl_wrapper:
223 llgfr %r2,%r2 # unsigned int 190 llgfr %r2,%r2 # unsigned int
224 llgfr %r3,%r3 # unsigned int 191 llgfr %r3,%r3 # unsigned int
225 llgfr %r4,%r4 # unsigned int 192 llgfr %r4,%r4 # unsigned int
226 jg compat_sys_ioctl # branch to system call 193 jg compat_sys_ioctl # branch to system call
227 194
228 .globl compat_sys_fcntl_wrapper 195ENTRY(compat_sys_fcntl_wrapper)
229compat_sys_fcntl_wrapper:
230 llgfr %r2,%r2 # unsigned int 196 llgfr %r2,%r2 # unsigned int
231 llgfr %r3,%r3 # unsigned int 197 llgfr %r3,%r3 # unsigned int
232 llgfr %r4,%r4 # unsigned long 198 llgfr %r4,%r4 # unsigned long
233 jg compat_sys_fcntl # branch to system call 199 jg compat_sys_fcntl # branch to system call
234 200
235 .globl sys32_setpgid_wrapper 201ENTRY(sys32_setpgid_wrapper)
236sys32_setpgid_wrapper:
237 lgfr %r2,%r2 # pid_t 202 lgfr %r2,%r2 # pid_t
238 lgfr %r3,%r3 # pid_t 203 lgfr %r3,%r3 # pid_t
239 jg sys_setpgid # branch to system call 204 jg sys_setpgid # branch to system call
240 205
241 .globl sys32_umask_wrapper 206ENTRY(sys32_umask_wrapper)
242sys32_umask_wrapper:
243 lgfr %r2,%r2 # int 207 lgfr %r2,%r2 # int
244 jg sys_umask # branch to system call 208 jg sys_umask # branch to system call
245 209
246 .globl sys32_chroot_wrapper 210ENTRY(sys32_chroot_wrapper)
247sys32_chroot_wrapper:
248 llgtr %r2,%r2 # char * 211 llgtr %r2,%r2 # char *
249 jg sys_chroot # branch to system call 212 jg sys_chroot # branch to system call
250 213
251 .globl sys32_ustat_wrapper 214ENTRY(sys32_ustat_wrapper)
252sys32_ustat_wrapper:
253 llgfr %r2,%r2 # dev_t 215 llgfr %r2,%r2 # dev_t
254 llgtr %r3,%r3 # struct ustat * 216 llgtr %r3,%r3 # struct ustat *
255 jg compat_sys_ustat 217 jg compat_sys_ustat
256 218
257 .globl sys32_dup2_wrapper 219ENTRY(sys32_dup2_wrapper)
258sys32_dup2_wrapper:
259 llgfr %r2,%r2 # unsigned int 220 llgfr %r2,%r2 # unsigned int
260 llgfr %r3,%r3 # unsigned int 221 llgfr %r3,%r3 # unsigned int
261 jg sys_dup2 # branch to system call 222 jg sys_dup2 # branch to system call
@@ -266,262 +227,220 @@ sys32_dup2_wrapper:
266 227
267#sys32_setsid_wrapper # void 228#sys32_setsid_wrapper # void
268 229
269 .globl sys32_sigaction_wrapper 230ENTRY(sys32_sigaction_wrapper)
270sys32_sigaction_wrapper:
271 lgfr %r2,%r2 # int 231 lgfr %r2,%r2 # int
272 llgtr %r3,%r3 # const struct old_sigaction * 232 llgtr %r3,%r3 # const struct old_sigaction *
273 llgtr %r4,%r4 # struct old_sigaction32 * 233 llgtr %r4,%r4 # struct old_sigaction32 *
274 jg sys32_sigaction # branch to system call 234 jg sys32_sigaction # branch to system call
275 235
276 .globl sys32_setreuid16_wrapper 236ENTRY(sys32_setreuid16_wrapper)
277sys32_setreuid16_wrapper:
278 llgfr %r2,%r2 # __kernel_old_uid_emu31_t 237 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
279 llgfr %r3,%r3 # __kernel_old_uid_emu31_t 238 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
280 jg sys32_setreuid16 # branch to system call 239 jg sys32_setreuid16 # branch to system call
281 240
282 .globl sys32_setregid16_wrapper 241ENTRY(sys32_setregid16_wrapper)
283sys32_setregid16_wrapper:
284 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 242 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
285 llgfr %r3,%r3 # __kernel_old_gid_emu31_t 243 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
286 jg sys32_setregid16 # branch to system call 244 jg sys32_setregid16 # branch to system call
287 245
288 .globl sys_sigsuspend_wrapper 246ENTRY(sys_sigsuspend_wrapper)
289sys_sigsuspend_wrapper:
290 lgfr %r2,%r2 # int 247 lgfr %r2,%r2 # int
291 lgfr %r3,%r3 # int 248 lgfr %r3,%r3 # int
292 llgfr %r4,%r4 # old_sigset_t 249 llgfr %r4,%r4 # old_sigset_t
293 jg sys_sigsuspend 250 jg sys_sigsuspend
294 251
295 .globl compat_sys_sigpending_wrapper 252ENTRY(compat_sys_sigpending_wrapper)
296compat_sys_sigpending_wrapper:
297 llgtr %r2,%r2 # compat_old_sigset_t * 253 llgtr %r2,%r2 # compat_old_sigset_t *
298 jg compat_sys_sigpending # branch to system call 254 jg compat_sys_sigpending # branch to system call
299 255
300 .globl sys32_sethostname_wrapper 256ENTRY(sys32_sethostname_wrapper)
301sys32_sethostname_wrapper:
302 llgtr %r2,%r2 # char * 257 llgtr %r2,%r2 # char *
303 lgfr %r3,%r3 # int 258 lgfr %r3,%r3 # int
304 jg sys_sethostname # branch to system call 259 jg sys_sethostname # branch to system call
305 260
306 .globl compat_sys_setrlimit_wrapper 261ENTRY(compat_sys_setrlimit_wrapper)
307compat_sys_setrlimit_wrapper:
308 llgfr %r2,%r2 # unsigned int 262 llgfr %r2,%r2 # unsigned int
309 llgtr %r3,%r3 # struct rlimit_emu31 * 263 llgtr %r3,%r3 # struct rlimit_emu31 *
310 jg compat_sys_setrlimit # branch to system call 264 jg compat_sys_setrlimit # branch to system call
311 265
312 .globl compat_sys_old_getrlimit_wrapper 266ENTRY(compat_sys_old_getrlimit_wrapper)
313compat_sys_old_getrlimit_wrapper:
314 llgfr %r2,%r2 # unsigned int 267 llgfr %r2,%r2 # unsigned int
315 llgtr %r3,%r3 # struct rlimit_emu31 * 268 llgtr %r3,%r3 # struct rlimit_emu31 *
316 jg compat_sys_old_getrlimit # branch to system call 269 jg compat_sys_old_getrlimit # branch to system call
317 270
318 .globl compat_sys_getrlimit_wrapper 271ENTRY(compat_sys_getrlimit_wrapper)
319compat_sys_getrlimit_wrapper:
320 llgfr %r2,%r2 # unsigned int 272 llgfr %r2,%r2 # unsigned int
321 llgtr %r3,%r3 # struct rlimit_emu31 * 273 llgtr %r3,%r3 # struct rlimit_emu31 *
322 jg compat_sys_getrlimit # branch to system call 274 jg compat_sys_getrlimit # branch to system call
323 275
324 .globl sys32_mmap2_wrapper 276ENTRY(sys32_mmap2_wrapper)
325sys32_mmap2_wrapper:
326 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * 277 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
327 jg sys32_mmap2 # branch to system call 278 jg sys32_mmap2 # branch to system call
328 279
329 .globl compat_sys_getrusage_wrapper 280ENTRY(compat_sys_getrusage_wrapper)
330compat_sys_getrusage_wrapper:
331 lgfr %r2,%r2 # int 281 lgfr %r2,%r2 # int
332 llgtr %r3,%r3 # struct rusage_emu31 * 282 llgtr %r3,%r3 # struct rusage_emu31 *
333 jg compat_sys_getrusage # branch to system call 283 jg compat_sys_getrusage # branch to system call
334 284
335 .globl compat_sys_gettimeofday_wrapper 285ENTRY(compat_sys_gettimeofday_wrapper)
336compat_sys_gettimeofday_wrapper:
337 llgtr %r2,%r2 # struct timeval_emu31 * 286 llgtr %r2,%r2 # struct timeval_emu31 *
338 llgtr %r3,%r3 # struct timezone * 287 llgtr %r3,%r3 # struct timezone *
339 jg compat_sys_gettimeofday # branch to system call 288 jg compat_sys_gettimeofday # branch to system call
340 289
341 .globl compat_sys_settimeofday_wrapper 290ENTRY(compat_sys_settimeofday_wrapper)
342compat_sys_settimeofday_wrapper:
343 llgtr %r2,%r2 # struct timeval_emu31 * 291 llgtr %r2,%r2 # struct timeval_emu31 *
344 llgtr %r3,%r3 # struct timezone * 292 llgtr %r3,%r3 # struct timezone *
345 jg compat_sys_settimeofday # branch to system call 293 jg compat_sys_settimeofday # branch to system call
346 294
347 .globl sys32_getgroups16_wrapper 295ENTRY(sys32_getgroups16_wrapper)
348sys32_getgroups16_wrapper:
349 lgfr %r2,%r2 # int 296 lgfr %r2,%r2 # int
350 llgtr %r3,%r3 # __kernel_old_gid_emu31_t * 297 llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
351 jg sys32_getgroups16 # branch to system call 298 jg sys32_getgroups16 # branch to system call
352 299
353 .globl sys32_setgroups16_wrapper 300ENTRY(sys32_setgroups16_wrapper)
354sys32_setgroups16_wrapper:
355 lgfr %r2,%r2 # int 301 lgfr %r2,%r2 # int
356 llgtr %r3,%r3 # __kernel_old_gid_emu31_t * 302 llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
357 jg sys32_setgroups16 # branch to system call 303 jg sys32_setgroups16 # branch to system call
358 304
359 .globl sys32_symlink_wrapper 305ENTRY(sys32_symlink_wrapper)
360sys32_symlink_wrapper:
361 llgtr %r2,%r2 # const char * 306 llgtr %r2,%r2 # const char *
362 llgtr %r3,%r3 # const char * 307 llgtr %r3,%r3 # const char *
363 jg sys_symlink # branch to system call 308 jg sys_symlink # branch to system call
364 309
365 .globl sys32_readlink_wrapper 310ENTRY(sys32_readlink_wrapper)
366sys32_readlink_wrapper:
367 llgtr %r2,%r2 # const char * 311 llgtr %r2,%r2 # const char *
368 llgtr %r3,%r3 # char * 312 llgtr %r3,%r3 # char *
369 lgfr %r4,%r4 # int 313 lgfr %r4,%r4 # int
370 jg sys_readlink # branch to system call 314 jg sys_readlink # branch to system call
371 315
372 .globl sys32_uselib_wrapper 316ENTRY(sys32_uselib_wrapper)
373sys32_uselib_wrapper:
374 llgtr %r2,%r2 # const char * 317 llgtr %r2,%r2 # const char *
375 jg sys_uselib # branch to system call 318 jg sys_uselib # branch to system call
376 319
377 .globl sys32_swapon_wrapper 320ENTRY(sys32_swapon_wrapper)
378sys32_swapon_wrapper:
379 llgtr %r2,%r2 # const char * 321 llgtr %r2,%r2 # const char *
380 lgfr %r3,%r3 # int 322 lgfr %r3,%r3 # int
381 jg sys_swapon # branch to system call 323 jg sys_swapon # branch to system call
382 324
383 .globl sys32_reboot_wrapper 325ENTRY(sys32_reboot_wrapper)
384sys32_reboot_wrapper:
385 lgfr %r2,%r2 # int 326 lgfr %r2,%r2 # int
386 lgfr %r3,%r3 # int 327 lgfr %r3,%r3 # int
387 llgfr %r4,%r4 # unsigned int 328 llgfr %r4,%r4 # unsigned int
388 llgtr %r5,%r5 # void * 329 llgtr %r5,%r5 # void *
389 jg sys_reboot # branch to system call 330 jg sys_reboot # branch to system call
390 331
391 .globl old32_readdir_wrapper 332ENTRY(old32_readdir_wrapper)
392old32_readdir_wrapper:
393 llgfr %r2,%r2 # unsigned int 333 llgfr %r2,%r2 # unsigned int
394 llgtr %r3,%r3 # void * 334 llgtr %r3,%r3 # void *
395 llgfr %r4,%r4 # unsigned int 335 llgfr %r4,%r4 # unsigned int
396 jg compat_sys_old_readdir # branch to system call 336 jg compat_sys_old_readdir # branch to system call
397 337
398 .globl old32_mmap_wrapper 338ENTRY(old32_mmap_wrapper)
399old32_mmap_wrapper:
400 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 * 339 llgtr %r2,%r2 # struct mmap_arg_struct_emu31 *
401 jg old32_mmap # branch to system call 340 jg old32_mmap # branch to system call
402 341
403 .globl sys32_munmap_wrapper 342ENTRY(sys32_munmap_wrapper)
404sys32_munmap_wrapper:
405 llgfr %r2,%r2 # unsigned long 343 llgfr %r2,%r2 # unsigned long
406 llgfr %r3,%r3 # size_t 344 llgfr %r3,%r3 # size_t
407 jg sys_munmap # branch to system call 345 jg sys_munmap # branch to system call
408 346
409 .globl sys32_truncate_wrapper 347ENTRY(sys32_truncate_wrapper)
410sys32_truncate_wrapper:
411 llgtr %r2,%r2 # const char * 348 llgtr %r2,%r2 # const char *
412 lgfr %r3,%r3 # long 349 lgfr %r3,%r3 # long
413 jg sys_truncate # branch to system call 350 jg sys_truncate # branch to system call
414 351
415 .globl sys32_ftruncate_wrapper 352ENTRY(sys32_ftruncate_wrapper)
416sys32_ftruncate_wrapper:
417 llgfr %r2,%r2 # unsigned int 353 llgfr %r2,%r2 # unsigned int
418 llgfr %r3,%r3 # unsigned long 354 llgfr %r3,%r3 # unsigned long
419 jg sys_ftruncate # branch to system call 355 jg sys_ftruncate # branch to system call
420 356
421 .globl sys32_fchmod_wrapper 357ENTRY(sys32_fchmod_wrapper)
422sys32_fchmod_wrapper:
423 llgfr %r2,%r2 # unsigned int 358 llgfr %r2,%r2 # unsigned int
424 llgfr %r3,%r3 # mode_t 359 llgfr %r3,%r3 # mode_t
425 jg sys_fchmod # branch to system call 360 jg sys_fchmod # branch to system call
426 361
427 .globl sys32_fchown16_wrapper 362ENTRY(sys32_fchown16_wrapper)
428sys32_fchown16_wrapper:
429 llgfr %r2,%r2 # unsigned int 363 llgfr %r2,%r2 # unsigned int
430 llgfr %r3,%r3 # compat_uid_t 364 llgfr %r3,%r3 # compat_uid_t
431 llgfr %r4,%r4 # compat_uid_t 365 llgfr %r4,%r4 # compat_uid_t
432 jg sys32_fchown16 # branch to system call 366 jg sys32_fchown16 # branch to system call
433 367
434 .globl sys32_getpriority_wrapper 368ENTRY(sys32_getpriority_wrapper)
435sys32_getpriority_wrapper:
436 lgfr %r2,%r2 # int 369 lgfr %r2,%r2 # int
437 lgfr %r3,%r3 # int 370 lgfr %r3,%r3 # int
438 jg sys_getpriority # branch to system call 371 jg sys_getpriority # branch to system call
439 372
440 .globl sys32_setpriority_wrapper 373ENTRY(sys32_setpriority_wrapper)
441sys32_setpriority_wrapper:
442 lgfr %r2,%r2 # int 374 lgfr %r2,%r2 # int
443 lgfr %r3,%r3 # int 375 lgfr %r3,%r3 # int
444 lgfr %r4,%r4 # int 376 lgfr %r4,%r4 # int
445 jg sys_setpriority # branch to system call 377 jg sys_setpriority # branch to system call
446 378
447 .globl compat_sys_statfs_wrapper 379ENTRY(compat_sys_statfs_wrapper)
448compat_sys_statfs_wrapper:
449 llgtr %r2,%r2 # char * 380 llgtr %r2,%r2 # char *
450 llgtr %r3,%r3 # struct compat_statfs * 381 llgtr %r3,%r3 # struct compat_statfs *
451 jg compat_sys_statfs # branch to system call 382 jg compat_sys_statfs # branch to system call
452 383
453 .globl compat_sys_fstatfs_wrapper 384ENTRY(compat_sys_fstatfs_wrapper)
454compat_sys_fstatfs_wrapper:
455 llgfr %r2,%r2 # unsigned int 385 llgfr %r2,%r2 # unsigned int
456 llgtr %r3,%r3 # struct compat_statfs * 386 llgtr %r3,%r3 # struct compat_statfs *
457 jg compat_sys_fstatfs # branch to system call 387 jg compat_sys_fstatfs # branch to system call
458 388
459 .globl compat_sys_socketcall_wrapper 389ENTRY(compat_sys_socketcall_wrapper)
460compat_sys_socketcall_wrapper:
461 lgfr %r2,%r2 # int 390 lgfr %r2,%r2 # int
462 llgtr %r3,%r3 # u32 * 391 llgtr %r3,%r3 # u32 *
463 jg compat_sys_socketcall # branch to system call 392 jg compat_sys_socketcall # branch to system call
464 393
465 .globl sys32_syslog_wrapper 394ENTRY(sys32_syslog_wrapper)
466sys32_syslog_wrapper:
467 lgfr %r2,%r2 # int 395 lgfr %r2,%r2 # int
468 llgtr %r3,%r3 # char * 396 llgtr %r3,%r3 # char *
469 lgfr %r4,%r4 # int 397 lgfr %r4,%r4 # int
470 jg sys_syslog # branch to system call 398 jg sys_syslog # branch to system call
471 399
472 .globl compat_sys_setitimer_wrapper 400ENTRY(compat_sys_setitimer_wrapper)
473compat_sys_setitimer_wrapper:
474 lgfr %r2,%r2 # int 401 lgfr %r2,%r2 # int
475 llgtr %r3,%r3 # struct itimerval_emu31 * 402 llgtr %r3,%r3 # struct itimerval_emu31 *
476 llgtr %r4,%r4 # struct itimerval_emu31 * 403 llgtr %r4,%r4 # struct itimerval_emu31 *
477 jg compat_sys_setitimer # branch to system call 404 jg compat_sys_setitimer # branch to system call
478 405
479 .globl compat_sys_getitimer_wrapper 406ENTRY(compat_sys_getitimer_wrapper)
480compat_sys_getitimer_wrapper:
481 lgfr %r2,%r2 # int 407 lgfr %r2,%r2 # int
482 llgtr %r3,%r3 # struct itimerval_emu31 * 408 llgtr %r3,%r3 # struct itimerval_emu31 *
483 jg compat_sys_getitimer # branch to system call 409 jg compat_sys_getitimer # branch to system call
484 410
485 .globl compat_sys_newstat_wrapper 411ENTRY(compat_sys_newstat_wrapper)
486compat_sys_newstat_wrapper:
487 llgtr %r2,%r2 # char * 412 llgtr %r2,%r2 # char *
488 llgtr %r3,%r3 # struct stat_emu31 * 413 llgtr %r3,%r3 # struct stat_emu31 *
489 jg compat_sys_newstat # branch to system call 414 jg compat_sys_newstat # branch to system call
490 415
491 .globl compat_sys_newlstat_wrapper 416ENTRY(compat_sys_newlstat_wrapper)
492compat_sys_newlstat_wrapper:
493 llgtr %r2,%r2 # char * 417 llgtr %r2,%r2 # char *
494 llgtr %r3,%r3 # struct stat_emu31 * 418 llgtr %r3,%r3 # struct stat_emu31 *
495 jg compat_sys_newlstat # branch to system call 419 jg compat_sys_newlstat # branch to system call
496 420
497 .globl compat_sys_newfstat_wrapper 421ENTRY(compat_sys_newfstat_wrapper)
498compat_sys_newfstat_wrapper:
499 llgfr %r2,%r2 # unsigned int 422 llgfr %r2,%r2 # unsigned int
500 llgtr %r3,%r3 # struct stat_emu31 * 423 llgtr %r3,%r3 # struct stat_emu31 *
501 jg compat_sys_newfstat # branch to system call 424 jg compat_sys_newfstat # branch to system call
502 425
503#sys32_vhangup_wrapper # void 426#sys32_vhangup_wrapper # void
504 427
505 .globl compat_sys_wait4_wrapper 428ENTRY(compat_sys_wait4_wrapper)
506compat_sys_wait4_wrapper:
507 lgfr %r2,%r2 # pid_t 429 lgfr %r2,%r2 # pid_t
508 llgtr %r3,%r3 # unsigned int * 430 llgtr %r3,%r3 # unsigned int *
509 lgfr %r4,%r4 # int 431 lgfr %r4,%r4 # int
510 llgtr %r5,%r5 # struct rusage * 432 llgtr %r5,%r5 # struct rusage *
511 jg compat_sys_wait4 # branch to system call 433 jg compat_sys_wait4 # branch to system call
512 434
513 .globl sys32_swapoff_wrapper 435ENTRY(sys32_swapoff_wrapper)
514sys32_swapoff_wrapper:
515 llgtr %r2,%r2 # const char * 436 llgtr %r2,%r2 # const char *
516 jg sys_swapoff # branch to system call 437 jg sys_swapoff # branch to system call
517 438
518 .globl compat_sys_sysinfo_wrapper 439ENTRY(compat_sys_sysinfo_wrapper)
519compat_sys_sysinfo_wrapper:
520 llgtr %r2,%r2 # struct sysinfo_emu31 * 440 llgtr %r2,%r2 # struct sysinfo_emu31 *
521 jg compat_sys_sysinfo # branch to system call 441 jg compat_sys_sysinfo # branch to system call
522 442
523 .globl sys32_ipc_wrapper 443ENTRY(sys32_ipc_wrapper)
524sys32_ipc_wrapper:
525 llgfr %r2,%r2 # uint 444 llgfr %r2,%r2 # uint
526 lgfr %r3,%r3 # int 445 lgfr %r3,%r3 # int
527 lgfr %r4,%r4 # int 446 lgfr %r4,%r4 # int
@@ -529,8 +448,7 @@ sys32_ipc_wrapper:
529 llgfr %r6,%r6 # u32 448 llgfr %r6,%r6 # u32
530 jg sys32_ipc # branch to system call 449 jg sys32_ipc # branch to system call
531 450
532 .globl sys32_fsync_wrapper 451ENTRY(sys32_fsync_wrapper)
533sys32_fsync_wrapper:
534 llgfr %r2,%r2 # unsigned int 452 llgfr %r2,%r2 # unsigned int
535 jg sys_fsync # branch to system call 453 jg sys_fsync # branch to system call
536 454
@@ -538,97 +456,81 @@ sys32_fsync_wrapper:
538 456
539#sys32_clone_wrapper # done in clone_glue 457#sys32_clone_wrapper # done in clone_glue
540 458
541 .globl sys32_setdomainname_wrapper 459ENTRY(sys32_setdomainname_wrapper)
542sys32_setdomainname_wrapper:
543 llgtr %r2,%r2 # char * 460 llgtr %r2,%r2 # char *
544 lgfr %r3,%r3 # int 461 lgfr %r3,%r3 # int
545 jg sys_setdomainname # branch to system call 462 jg sys_setdomainname # branch to system call
546 463
547 .globl sys32_newuname_wrapper 464ENTRY(sys32_newuname_wrapper)
548sys32_newuname_wrapper:
549 llgtr %r2,%r2 # struct new_utsname * 465 llgtr %r2,%r2 # struct new_utsname *
550 jg sys_newuname # branch to system call 466 jg sys_newuname # branch to system call
551 467
552 .globl compat_sys_adjtimex_wrapper 468ENTRY(compat_sys_adjtimex_wrapper)
553compat_sys_adjtimex_wrapper:
554 llgtr %r2,%r2 # struct compat_timex * 469 llgtr %r2,%r2 # struct compat_timex *
555 jg compat_sys_adjtimex # branch to system call 470 jg compat_sys_adjtimex # branch to system call
556 471
557 .globl sys32_mprotect_wrapper 472ENTRY(sys32_mprotect_wrapper)
558sys32_mprotect_wrapper:
559 llgtr %r2,%r2 # unsigned long (actually pointer 473 llgtr %r2,%r2 # unsigned long (actually pointer
560 llgfr %r3,%r3 # size_t 474 llgfr %r3,%r3 # size_t
561 llgfr %r4,%r4 # unsigned long 475 llgfr %r4,%r4 # unsigned long
562 jg sys_mprotect # branch to system call 476 jg sys_mprotect # branch to system call
563 477
564 .globl compat_sys_sigprocmask_wrapper 478ENTRY(compat_sys_sigprocmask_wrapper)
565compat_sys_sigprocmask_wrapper:
566 lgfr %r2,%r2 # int 479 lgfr %r2,%r2 # int
567 llgtr %r3,%r3 # compat_old_sigset_t * 480 llgtr %r3,%r3 # compat_old_sigset_t *
568 llgtr %r4,%r4 # compat_old_sigset_t * 481 llgtr %r4,%r4 # compat_old_sigset_t *
569 jg compat_sys_sigprocmask # branch to system call 482 jg compat_sys_sigprocmask # branch to system call
570 483
571 .globl sys_init_module_wrapper 484ENTRY(sys_init_module_wrapper)
572sys_init_module_wrapper:
573 llgtr %r2,%r2 # void * 485 llgtr %r2,%r2 # void *
574 llgfr %r3,%r3 # unsigned long 486 llgfr %r3,%r3 # unsigned long
575 llgtr %r4,%r4 # char * 487 llgtr %r4,%r4 # char *
576 jg sys_init_module # branch to system call 488 jg sys_init_module # branch to system call
577 489
578 .globl sys_delete_module_wrapper 490ENTRY(sys_delete_module_wrapper)
579sys_delete_module_wrapper:
580 llgtr %r2,%r2 # const char * 491 llgtr %r2,%r2 # const char *
581 llgfr %r3,%r3 # unsigned int 492 llgfr %r3,%r3 # unsigned int
582 jg sys_delete_module # branch to system call 493 jg sys_delete_module # branch to system call
583 494
584 .globl sys32_quotactl_wrapper 495ENTRY(sys32_quotactl_wrapper)
585sys32_quotactl_wrapper:
586 llgfr %r2,%r2 # unsigned int 496 llgfr %r2,%r2 # unsigned int
587 llgtr %r3,%r3 # const char * 497 llgtr %r3,%r3 # const char *
588 llgfr %r4,%r4 # qid_t 498 llgfr %r4,%r4 # qid_t
589 llgtr %r5,%r5 # caddr_t 499 llgtr %r5,%r5 # caddr_t
590 jg sys_quotactl # branch to system call 500 jg sys_quotactl # branch to system call
591 501
592 .globl sys32_getpgid_wrapper 502ENTRY(sys32_getpgid_wrapper)
593sys32_getpgid_wrapper:
594 lgfr %r2,%r2 # pid_t 503 lgfr %r2,%r2 # pid_t
595 jg sys_getpgid # branch to system call 504 jg sys_getpgid # branch to system call
596 505
597 .globl sys32_fchdir_wrapper 506ENTRY(sys32_fchdir_wrapper)
598sys32_fchdir_wrapper:
599 llgfr %r2,%r2 # unsigned int 507 llgfr %r2,%r2 # unsigned int
600 jg sys_fchdir # branch to system call 508 jg sys_fchdir # branch to system call
601 509
602 .globl sys32_bdflush_wrapper 510ENTRY(sys32_bdflush_wrapper)
603sys32_bdflush_wrapper:
604 lgfr %r2,%r2 # int 511 lgfr %r2,%r2 # int
605 lgfr %r3,%r3 # long 512 lgfr %r3,%r3 # long
606 jg sys_bdflush # branch to system call 513 jg sys_bdflush # branch to system call
607 514
608 .globl sys32_sysfs_wrapper 515ENTRY(sys32_sysfs_wrapper)
609sys32_sysfs_wrapper:
610 lgfr %r2,%r2 # int 516 lgfr %r2,%r2 # int
611 llgfr %r3,%r3 # unsigned long 517 llgfr %r3,%r3 # unsigned long
612 llgfr %r4,%r4 # unsigned long 518 llgfr %r4,%r4 # unsigned long
613 jg sys_sysfs # branch to system call 519 jg sys_sysfs # branch to system call
614 520
615 .globl sys32_personality_wrapper 521ENTRY(sys32_personality_wrapper)
616sys32_personality_wrapper:
617 llgfr %r2,%r2 # unsigned int 522 llgfr %r2,%r2 # unsigned int
618 jg sys_s390_personality # branch to system call 523 jg sys_s390_personality # branch to system call
619 524
620 .globl sys32_setfsuid16_wrapper 525ENTRY(sys32_setfsuid16_wrapper)
621sys32_setfsuid16_wrapper:
622 llgfr %r2,%r2 # __kernel_old_uid_emu31_t 526 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
623 jg sys32_setfsuid16 # branch to system call 527 jg sys32_setfsuid16 # branch to system call
624 528
625 .globl sys32_setfsgid16_wrapper 529ENTRY(sys32_setfsgid16_wrapper)
626sys32_setfsgid16_wrapper:
627 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 530 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
628 jg sys32_setfsgid16 # branch to system call 531 jg sys32_setfsgid16 # branch to system call
629 532
630 .globl sys32_llseek_wrapper 533ENTRY(sys32_llseek_wrapper)
631sys32_llseek_wrapper:
632 llgfr %r2,%r2 # unsigned int 534 llgfr %r2,%r2 # unsigned int
633 llgfr %r3,%r3 # unsigned long 535 llgfr %r3,%r3 # unsigned long
634 llgfr %r4,%r4 # unsigned long 536 llgfr %r4,%r4 # unsigned long
@@ -636,15 +538,13 @@ sys32_llseek_wrapper:
636 llgfr %r6,%r6 # unsigned int 538 llgfr %r6,%r6 # unsigned int
637 jg sys_llseek # branch to system call 539 jg sys_llseek # branch to system call
638 540
639 .globl sys32_getdents_wrapper 541ENTRY(sys32_getdents_wrapper)
640sys32_getdents_wrapper:
641 llgfr %r2,%r2 # unsigned int 542 llgfr %r2,%r2 # unsigned int
642 llgtr %r3,%r3 # void * 543 llgtr %r3,%r3 # void *
643 llgfr %r4,%r4 # unsigned int 544 llgfr %r4,%r4 # unsigned int
644 jg compat_sys_getdents # branch to system call 545 jg compat_sys_getdents # branch to system call
645 546
646 .globl compat_sys_select_wrapper 547ENTRY(compat_sys_select_wrapper)
647compat_sys_select_wrapper:
648 lgfr %r2,%r2 # int 548 lgfr %r2,%r2 # int
649 llgtr %r3,%r3 # compat_fd_set * 549 llgtr %r3,%r3 # compat_fd_set *
650 llgtr %r4,%r4 # compat_fd_set * 550 llgtr %r4,%r4 # compat_fd_set *
@@ -652,112 +552,94 @@ compat_sys_select_wrapper:
652 llgtr %r6,%r6 # struct compat_timeval * 552 llgtr %r6,%r6 # struct compat_timeval *
653 jg compat_sys_select # branch to system call 553 jg compat_sys_select # branch to system call
654 554
655 .globl sys32_flock_wrapper 555ENTRY(sys32_flock_wrapper)
656sys32_flock_wrapper:
657 llgfr %r2,%r2 # unsigned int 556 llgfr %r2,%r2 # unsigned int
658 llgfr %r3,%r3 # unsigned int 557 llgfr %r3,%r3 # unsigned int
659 jg sys_flock # branch to system call 558 jg sys_flock # branch to system call
660 559
661 .globl sys32_msync_wrapper 560ENTRY(sys32_msync_wrapper)
662sys32_msync_wrapper:
663 llgfr %r2,%r2 # unsigned long 561 llgfr %r2,%r2 # unsigned long
664 llgfr %r3,%r3 # size_t 562 llgfr %r3,%r3 # size_t
665 lgfr %r4,%r4 # int 563 lgfr %r4,%r4 # int
666 jg sys_msync # branch to system call 564 jg sys_msync # branch to system call
667 565
668 .globl compat_sys_readv_wrapper 566ENTRY(compat_sys_readv_wrapper)
669compat_sys_readv_wrapper:
670 lgfr %r2,%r2 # int 567 lgfr %r2,%r2 # int
671 llgtr %r3,%r3 # const struct compat_iovec * 568 llgtr %r3,%r3 # const struct compat_iovec *
672 llgfr %r4,%r4 # unsigned long 569 llgfr %r4,%r4 # unsigned long
673 jg compat_sys_readv # branch to system call 570 jg compat_sys_readv # branch to system call
674 571
675 .globl compat_sys_writev_wrapper 572ENTRY(compat_sys_writev_wrapper)
676compat_sys_writev_wrapper:
677 lgfr %r2,%r2 # int 573 lgfr %r2,%r2 # int
678 llgtr %r3,%r3 # const struct compat_iovec * 574 llgtr %r3,%r3 # const struct compat_iovec *
679 llgfr %r4,%r4 # unsigned long 575 llgfr %r4,%r4 # unsigned long
680 jg compat_sys_writev # branch to system call 576 jg compat_sys_writev # branch to system call
681 577
682 .globl sys32_getsid_wrapper 578ENTRY(sys32_getsid_wrapper)
683sys32_getsid_wrapper:
684 lgfr %r2,%r2 # pid_t 579 lgfr %r2,%r2 # pid_t
685 jg sys_getsid # branch to system call 580 jg sys_getsid # branch to system call
686 581
687 .globl sys32_fdatasync_wrapper 582ENTRY(sys32_fdatasync_wrapper)
688sys32_fdatasync_wrapper:
689 llgfr %r2,%r2 # unsigned int 583 llgfr %r2,%r2 # unsigned int
690 jg sys_fdatasync # branch to system call 584 jg sys_fdatasync # branch to system call
691 585
692 .globl sys32_mlock_wrapper 586ENTRY(sys32_mlock_wrapper)
693sys32_mlock_wrapper:
694 llgfr %r2,%r2 # unsigned long 587 llgfr %r2,%r2 # unsigned long
695 llgfr %r3,%r3 # size_t 588 llgfr %r3,%r3 # size_t
696 jg sys_mlock # branch to system call 589 jg sys_mlock # branch to system call
697 590
698 .globl sys32_munlock_wrapper 591ENTRY(sys32_munlock_wrapper)
699sys32_munlock_wrapper:
700 llgfr %r2,%r2 # unsigned long 592 llgfr %r2,%r2 # unsigned long
701 llgfr %r3,%r3 # size_t 593 llgfr %r3,%r3 # size_t
702 jg sys_munlock # branch to system call 594 jg sys_munlock # branch to system call
703 595
704 .globl sys32_mlockall_wrapper 596ENTRY(sys32_mlockall_wrapper)
705sys32_mlockall_wrapper:
706 lgfr %r2,%r2 # int 597 lgfr %r2,%r2 # int
707 jg sys_mlockall # branch to system call 598 jg sys_mlockall # branch to system call
708 599
709#sys32_munlockall_wrapper # void 600#sys32_munlockall_wrapper # void
710 601
711 .globl sys32_sched_setparam_wrapper 602ENTRY(sys32_sched_setparam_wrapper)
712sys32_sched_setparam_wrapper:
713 lgfr %r2,%r2 # pid_t 603 lgfr %r2,%r2 # pid_t
714 llgtr %r3,%r3 # struct sched_param * 604 llgtr %r3,%r3 # struct sched_param *
715 jg sys_sched_setparam # branch to system call 605 jg sys_sched_setparam # branch to system call
716 606
717 .globl sys32_sched_getparam_wrapper 607ENTRY(sys32_sched_getparam_wrapper)
718sys32_sched_getparam_wrapper:
719 lgfr %r2,%r2 # pid_t 608 lgfr %r2,%r2 # pid_t
720 llgtr %r3,%r3 # struct sched_param * 609 llgtr %r3,%r3 # struct sched_param *
721 jg sys_sched_getparam # branch to system call 610 jg sys_sched_getparam # branch to system call
722 611
723 .globl sys32_sched_setscheduler_wrapper 612ENTRY(sys32_sched_setscheduler_wrapper)
724sys32_sched_setscheduler_wrapper:
725 lgfr %r2,%r2 # pid_t 613 lgfr %r2,%r2 # pid_t
726 lgfr %r3,%r3 # int 614 lgfr %r3,%r3 # int
727 llgtr %r4,%r4 # struct sched_param * 615 llgtr %r4,%r4 # struct sched_param *
728 jg sys_sched_setscheduler # branch to system call 616 jg sys_sched_setscheduler # branch to system call
729 617
730 .globl sys32_sched_getscheduler_wrapper 618ENTRY(sys32_sched_getscheduler_wrapper)
731sys32_sched_getscheduler_wrapper:
732 lgfr %r2,%r2 # pid_t 619 lgfr %r2,%r2 # pid_t
733 jg sys_sched_getscheduler # branch to system call 620 jg sys_sched_getscheduler # branch to system call
734 621
735#sys32_sched_yield_wrapper # void 622#sys32_sched_yield_wrapper # void
736 623
737 .globl sys32_sched_get_priority_max_wrapper 624ENTRY(sys32_sched_get_priority_max_wrapper)
738sys32_sched_get_priority_max_wrapper:
739 lgfr %r2,%r2 # int 625 lgfr %r2,%r2 # int
740 jg sys_sched_get_priority_max # branch to system call 626 jg sys_sched_get_priority_max # branch to system call
741 627
742 .globl sys32_sched_get_priority_min_wrapper 628ENTRY(sys32_sched_get_priority_min_wrapper)
743sys32_sched_get_priority_min_wrapper:
744 lgfr %r2,%r2 # int 629 lgfr %r2,%r2 # int
745 jg sys_sched_get_priority_min # branch to system call 630 jg sys_sched_get_priority_min # branch to system call
746 631
747 .globl sys32_sched_rr_get_interval_wrapper 632ENTRY(sys32_sched_rr_get_interval_wrapper)
748sys32_sched_rr_get_interval_wrapper:
749 lgfr %r2,%r2 # pid_t 633 lgfr %r2,%r2 # pid_t
750 llgtr %r3,%r3 # struct compat_timespec * 634 llgtr %r3,%r3 # struct compat_timespec *
751 jg sys32_sched_rr_get_interval # branch to system call 635 jg sys32_sched_rr_get_interval # branch to system call
752 636
753 .globl compat_sys_nanosleep_wrapper 637ENTRY(compat_sys_nanosleep_wrapper)
754compat_sys_nanosleep_wrapper:
755 llgtr %r2,%r2 # struct compat_timespec * 638 llgtr %r2,%r2 # struct compat_timespec *
756 llgtr %r3,%r3 # struct compat_timespec * 639 llgtr %r3,%r3 # struct compat_timespec *
757 jg compat_sys_nanosleep # branch to system call 640 jg compat_sys_nanosleep # branch to system call
758 641
759 .globl sys32_mremap_wrapper 642ENTRY(sys32_mremap_wrapper)
760sys32_mremap_wrapper:
761 llgfr %r2,%r2 # unsigned long 643 llgfr %r2,%r2 # unsigned long
762 llgfr %r3,%r3 # unsigned long 644 llgfr %r3,%r3 # unsigned long
763 llgfr %r4,%r4 # unsigned long 645 llgfr %r4,%r4 # unsigned long
@@ -765,50 +647,43 @@ sys32_mremap_wrapper:
765 llgfr %r6,%r6 # unsigned long 647 llgfr %r6,%r6 # unsigned long
766 jg sys_mremap # branch to system call 648 jg sys_mremap # branch to system call
767 649
768 .globl sys32_setresuid16_wrapper 650ENTRY(sys32_setresuid16_wrapper)
769sys32_setresuid16_wrapper:
770 llgfr %r2,%r2 # __kernel_old_uid_emu31_t 651 llgfr %r2,%r2 # __kernel_old_uid_emu31_t
771 llgfr %r3,%r3 # __kernel_old_uid_emu31_t 652 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
772 llgfr %r4,%r4 # __kernel_old_uid_emu31_t 653 llgfr %r4,%r4 # __kernel_old_uid_emu31_t
773 jg sys32_setresuid16 # branch to system call 654 jg sys32_setresuid16 # branch to system call
774 655
775 .globl sys32_getresuid16_wrapper 656ENTRY(sys32_getresuid16_wrapper)
776sys32_getresuid16_wrapper:
777 llgtr %r2,%r2 # __kernel_old_uid_emu31_t * 657 llgtr %r2,%r2 # __kernel_old_uid_emu31_t *
778 llgtr %r3,%r3 # __kernel_old_uid_emu31_t * 658 llgtr %r3,%r3 # __kernel_old_uid_emu31_t *
779 llgtr %r4,%r4 # __kernel_old_uid_emu31_t * 659 llgtr %r4,%r4 # __kernel_old_uid_emu31_t *
780 jg sys32_getresuid16 # branch to system call 660 jg sys32_getresuid16 # branch to system call
781 661
782 .globl sys32_poll_wrapper 662ENTRY(sys32_poll_wrapper)
783sys32_poll_wrapper:
784 llgtr %r2,%r2 # struct pollfd * 663 llgtr %r2,%r2 # struct pollfd *
785 llgfr %r3,%r3 # unsigned int 664 llgfr %r3,%r3 # unsigned int
786 lgfr %r4,%r4 # long 665 lgfr %r4,%r4 # long
787 jg sys_poll # branch to system call 666 jg sys_poll # branch to system call
788 667
789 .globl compat_sys_nfsservctl_wrapper 668ENTRY(compat_sys_nfsservctl_wrapper)
790compat_sys_nfsservctl_wrapper:
791 lgfr %r2,%r2 # int 669 lgfr %r2,%r2 # int
792 llgtr %r3,%r3 # struct compat_nfsctl_arg* 670 llgtr %r3,%r3 # struct compat_nfsctl_arg*
793 llgtr %r4,%r4 # union compat_nfsctl_res* 671 llgtr %r4,%r4 # union compat_nfsctl_res*
794 jg compat_sys_nfsservctl # branch to system call 672 jg compat_sys_nfsservctl # branch to system call
795 673
796 .globl sys32_setresgid16_wrapper 674ENTRY(sys32_setresgid16_wrapper)
797sys32_setresgid16_wrapper:
798 llgfr %r2,%r2 # __kernel_old_gid_emu31_t 675 llgfr %r2,%r2 # __kernel_old_gid_emu31_t
799 llgfr %r3,%r3 # __kernel_old_gid_emu31_t 676 llgfr %r3,%r3 # __kernel_old_gid_emu31_t
800 llgfr %r4,%r4 # __kernel_old_gid_emu31_t 677 llgfr %r4,%r4 # __kernel_old_gid_emu31_t
801 jg sys32_setresgid16 # branch to system call 678 jg sys32_setresgid16 # branch to system call
802 679
803 .globl sys32_getresgid16_wrapper 680ENTRY(sys32_getresgid16_wrapper)
804sys32_getresgid16_wrapper:
805 llgtr %r2,%r2 # __kernel_old_gid_emu31_t * 681 llgtr %r2,%r2 # __kernel_old_gid_emu31_t *
806 llgtr %r3,%r3 # __kernel_old_gid_emu31_t * 682 llgtr %r3,%r3 # __kernel_old_gid_emu31_t *
807 llgtr %r4,%r4 # __kernel_old_gid_emu31_t * 683 llgtr %r4,%r4 # __kernel_old_gid_emu31_t *
808 jg sys32_getresgid16 # branch to system call 684 jg sys32_getresgid16 # branch to system call
809 685
810 .globl sys32_prctl_wrapper 686ENTRY(sys32_prctl_wrapper)
811sys32_prctl_wrapper:
812 lgfr %r2,%r2 # int 687 lgfr %r2,%r2 # int
813 llgfr %r3,%r3 # unsigned long 688 llgfr %r3,%r3 # unsigned long
814 llgfr %r4,%r4 # unsigned long 689 llgfr %r4,%r4 # unsigned long
@@ -818,51 +693,44 @@ sys32_prctl_wrapper:
818 693
819#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue 694#sys32_rt_sigreturn_wrapper # done in rt_sigreturn_glue
820 695
821 .globl sys32_rt_sigaction_wrapper 696ENTRY(sys32_rt_sigaction_wrapper)
822sys32_rt_sigaction_wrapper:
823 lgfr %r2,%r2 # int 697 lgfr %r2,%r2 # int
824 llgtr %r3,%r3 # const struct sigaction_emu31 * 698 llgtr %r3,%r3 # const struct sigaction_emu31 *
825 llgtr %r4,%r4 # const struct sigaction_emu31 * 699 llgtr %r4,%r4 # const struct sigaction_emu31 *
826 llgfr %r5,%r5 # size_t 700 llgfr %r5,%r5 # size_t
827 jg sys32_rt_sigaction # branch to system call 701 jg sys32_rt_sigaction # branch to system call
828 702
829 .globl sys32_rt_sigprocmask_wrapper 703ENTRY(sys32_rt_sigprocmask_wrapper)
830sys32_rt_sigprocmask_wrapper:
831 lgfr %r2,%r2 # int 704 lgfr %r2,%r2 # int
832 llgtr %r3,%r3 # old_sigset_emu31 * 705 llgtr %r3,%r3 # old_sigset_emu31 *
833 llgtr %r4,%r4 # old_sigset_emu31 * 706 llgtr %r4,%r4 # old_sigset_emu31 *
834 llgfr %r5,%r5 # size_t 707 llgfr %r5,%r5 # size_t
835 jg sys32_rt_sigprocmask # branch to system call 708 jg sys32_rt_sigprocmask # branch to system call
836 709
837 .globl sys32_rt_sigpending_wrapper 710ENTRY(sys32_rt_sigpending_wrapper)
838sys32_rt_sigpending_wrapper:
839 llgtr %r2,%r2 # sigset_emu31 * 711 llgtr %r2,%r2 # sigset_emu31 *
840 llgfr %r3,%r3 # size_t 712 llgfr %r3,%r3 # size_t
841 jg sys32_rt_sigpending # branch to system call 713 jg sys32_rt_sigpending # branch to system call
842 714
843 .globl compat_sys_rt_sigtimedwait_wrapper 715ENTRY(compat_sys_rt_sigtimedwait_wrapper)
844compat_sys_rt_sigtimedwait_wrapper:
845 llgtr %r2,%r2 # const sigset_emu31_t * 716 llgtr %r2,%r2 # const sigset_emu31_t *
846 llgtr %r3,%r3 # siginfo_emu31_t * 717 llgtr %r3,%r3 # siginfo_emu31_t *
847 llgtr %r4,%r4 # const struct compat_timespec * 718 llgtr %r4,%r4 # const struct compat_timespec *
848 llgfr %r5,%r5 # size_t 719 llgfr %r5,%r5 # size_t
849 jg compat_sys_rt_sigtimedwait # branch to system call 720 jg compat_sys_rt_sigtimedwait # branch to system call
850 721
851 .globl sys32_rt_sigqueueinfo_wrapper 722ENTRY(sys32_rt_sigqueueinfo_wrapper)
852sys32_rt_sigqueueinfo_wrapper:
853 lgfr %r2,%r2 # int 723 lgfr %r2,%r2 # int
854 lgfr %r3,%r3 # int 724 lgfr %r3,%r3 # int
855 llgtr %r4,%r4 # siginfo_emu31_t * 725 llgtr %r4,%r4 # siginfo_emu31_t *
856 jg sys32_rt_sigqueueinfo # branch to system call 726 jg sys32_rt_sigqueueinfo # branch to system call
857 727
858 .globl compat_sys_rt_sigsuspend_wrapper 728ENTRY(compat_sys_rt_sigsuspend_wrapper)
859compat_sys_rt_sigsuspend_wrapper:
860 llgtr %r2,%r2 # compat_sigset_t * 729 llgtr %r2,%r2 # compat_sigset_t *
861 llgfr %r3,%r3 # compat_size_t 730 llgfr %r3,%r3 # compat_size_t
862 jg compat_sys_rt_sigsuspend 731 jg compat_sys_rt_sigsuspend
863 732
864 .globl sys32_pread64_wrapper 733ENTRY(sys32_pread64_wrapper)
865sys32_pread64_wrapper:
866 llgfr %r2,%r2 # unsigned int 734 llgfr %r2,%r2 # unsigned int
867 llgtr %r3,%r3 # char * 735 llgtr %r3,%r3 # char *
868 llgfr %r4,%r4 # size_t 736 llgfr %r4,%r4 # size_t
@@ -870,8 +738,7 @@ sys32_pread64_wrapper:
870 llgfr %r6,%r6 # u32 738 llgfr %r6,%r6 # u32
871 jg sys32_pread64 # branch to system call 739 jg sys32_pread64 # branch to system call
872 740
873 .globl sys32_pwrite64_wrapper 741ENTRY(sys32_pwrite64_wrapper)
874sys32_pwrite64_wrapper:
875 llgfr %r2,%r2 # unsigned int 742 llgfr %r2,%r2 # unsigned int
876 llgtr %r3,%r3 # const char * 743 llgtr %r3,%r3 # const char *
877 llgfr %r4,%r4 # size_t 744 llgfr %r4,%r4 # size_t
@@ -879,39 +746,33 @@ sys32_pwrite64_wrapper:
879 llgfr %r6,%r6 # u32 746 llgfr %r6,%r6 # u32
880 jg sys32_pwrite64 # branch to system call 747 jg sys32_pwrite64 # branch to system call
881 748
882 .globl sys32_chown16_wrapper 749ENTRY(sys32_chown16_wrapper)
883sys32_chown16_wrapper:
884 llgtr %r2,%r2 # const char * 750 llgtr %r2,%r2 # const char *
885 llgfr %r3,%r3 # __kernel_old_uid_emu31_t 751 llgfr %r3,%r3 # __kernel_old_uid_emu31_t
886 llgfr %r4,%r4 # __kernel_old_gid_emu31_t 752 llgfr %r4,%r4 # __kernel_old_gid_emu31_t
887 jg sys32_chown16 # branch to system call 753 jg sys32_chown16 # branch to system call
888 754
889 .globl sys32_getcwd_wrapper 755ENTRY(sys32_getcwd_wrapper)
890sys32_getcwd_wrapper:
891 llgtr %r2,%r2 # char * 756 llgtr %r2,%r2 # char *
892 llgfr %r3,%r3 # unsigned long 757 llgfr %r3,%r3 # unsigned long
893 jg sys_getcwd # branch to system call 758 jg sys_getcwd # branch to system call
894 759
895 .globl sys32_capget_wrapper 760ENTRY(sys32_capget_wrapper)
896sys32_capget_wrapper:
897 llgtr %r2,%r2 # cap_user_header_t 761 llgtr %r2,%r2 # cap_user_header_t
898 llgtr %r3,%r3 # cap_user_data_t 762 llgtr %r3,%r3 # cap_user_data_t
899 jg sys_capget # branch to system call 763 jg sys_capget # branch to system call
900 764
901 .globl sys32_capset_wrapper 765ENTRY(sys32_capset_wrapper)
902sys32_capset_wrapper:
903 llgtr %r2,%r2 # cap_user_header_t 766 llgtr %r2,%r2 # cap_user_header_t
904 llgtr %r3,%r3 # const cap_user_data_t 767 llgtr %r3,%r3 # const cap_user_data_t
905 jg sys_capset # branch to system call 768 jg sys_capset # branch to system call
906 769
907 .globl sys32_sigaltstack_wrapper 770ENTRY(sys32_sigaltstack_wrapper)
908sys32_sigaltstack_wrapper:
909 llgtr %r2,%r2 # const stack_emu31_t * 771 llgtr %r2,%r2 # const stack_emu31_t *
910 llgtr %r3,%r3 # stack_emu31_t * 772 llgtr %r3,%r3 # stack_emu31_t *
911 jg sys32_sigaltstack 773 jg sys32_sigaltstack
912 774
913 .globl sys32_sendfile_wrapper 775ENTRY(sys32_sendfile_wrapper)
914sys32_sendfile_wrapper:
915 lgfr %r2,%r2 # int 776 lgfr %r2,%r2 # int
916 lgfr %r3,%r3 # int 777 lgfr %r3,%r3 # int
917 llgtr %r4,%r4 # __kernel_off_emu31_t * 778 llgtr %r4,%r4 # __kernel_off_emu31_t *
@@ -920,22 +781,19 @@ sys32_sendfile_wrapper:
920 781
921#sys32_vfork_wrapper # done in vfork_glue 782#sys32_vfork_wrapper # done in vfork_glue
922 783
923 .globl sys32_truncate64_wrapper 784ENTRY(sys32_truncate64_wrapper)
924sys32_truncate64_wrapper:
925 llgtr %r2,%r2 # const char * 785 llgtr %r2,%r2 # const char *
926 llgfr %r3,%r3 # unsigned long 786 llgfr %r3,%r3 # unsigned long
927 llgfr %r4,%r4 # unsigned long 787 llgfr %r4,%r4 # unsigned long
928 jg sys32_truncate64 # branch to system call 788 jg sys32_truncate64 # branch to system call
929 789
930 .globl sys32_ftruncate64_wrapper 790ENTRY(sys32_ftruncate64_wrapper)
931sys32_ftruncate64_wrapper:
932 llgfr %r2,%r2 # unsigned int 791 llgfr %r2,%r2 # unsigned int
933 llgfr %r3,%r3 # unsigned long 792 llgfr %r3,%r3 # unsigned long
934 llgfr %r4,%r4 # unsigned long 793 llgfr %r4,%r4 # unsigned long
935 jg sys32_ftruncate64 # branch to system call 794 jg sys32_ftruncate64 # branch to system call
936 795
937 .globl sys32_lchown_wrapper 796ENTRY(sys32_lchown_wrapper)
938sys32_lchown_wrapper:
939 llgtr %r2,%r2 # const char * 797 llgtr %r2,%r2 # const char *
940 llgfr %r3,%r3 # uid_t 798 llgfr %r3,%r3 # uid_t
941 llgfr %r4,%r4 # gid_t 799 llgfr %r4,%r4 # gid_t
@@ -946,156 +804,131 @@ sys32_lchown_wrapper:
946#sys32_geteuid_wrapper # void 804#sys32_geteuid_wrapper # void
947#sys32_getegid_wrapper # void 805#sys32_getegid_wrapper # void
948 806
949 .globl sys32_setreuid_wrapper 807ENTRY(sys32_setreuid_wrapper)
950sys32_setreuid_wrapper:
951 llgfr %r2,%r2 # uid_t 808 llgfr %r2,%r2 # uid_t
952 llgfr %r3,%r3 # uid_t 809 llgfr %r3,%r3 # uid_t
953 jg sys_setreuid # branch to system call 810 jg sys_setreuid # branch to system call
954 811
955 .globl sys32_setregid_wrapper 812ENTRY(sys32_setregid_wrapper)
956sys32_setregid_wrapper:
957 llgfr %r2,%r2 # gid_t 813 llgfr %r2,%r2 # gid_t
958 llgfr %r3,%r3 # gid_t 814 llgfr %r3,%r3 # gid_t
959 jg sys_setregid # branch to system call 815 jg sys_setregid # branch to system call
960 816
961 .globl sys32_getgroups_wrapper 817ENTRY(sys32_getgroups_wrapper)
962sys32_getgroups_wrapper:
963 lgfr %r2,%r2 # int 818 lgfr %r2,%r2 # int
964 llgtr %r3,%r3 # gid_t * 819 llgtr %r3,%r3 # gid_t *
965 jg sys_getgroups # branch to system call 820 jg sys_getgroups # branch to system call
966 821
967 .globl sys32_setgroups_wrapper 822ENTRY(sys32_setgroups_wrapper)
968sys32_setgroups_wrapper:
969 lgfr %r2,%r2 # int 823 lgfr %r2,%r2 # int
970 llgtr %r3,%r3 # gid_t * 824 llgtr %r3,%r3 # gid_t *
971 jg sys_setgroups # branch to system call 825 jg sys_setgroups # branch to system call
972 826
973 .globl sys32_fchown_wrapper 827ENTRY(sys32_fchown_wrapper)
974sys32_fchown_wrapper:
975 llgfr %r2,%r2 # unsigned int 828 llgfr %r2,%r2 # unsigned int
976 llgfr %r3,%r3 # uid_t 829 llgfr %r3,%r3 # uid_t
977 llgfr %r4,%r4 # gid_t 830 llgfr %r4,%r4 # gid_t
978 jg sys_fchown # branch to system call 831 jg sys_fchown # branch to system call
979 832
980 .globl sys32_setresuid_wrapper 833ENTRY(sys32_setresuid_wrapper)
981sys32_setresuid_wrapper:
982 llgfr %r2,%r2 # uid_t 834 llgfr %r2,%r2 # uid_t
983 llgfr %r3,%r3 # uid_t 835 llgfr %r3,%r3 # uid_t
984 llgfr %r4,%r4 # uid_t 836 llgfr %r4,%r4 # uid_t
985 jg sys_setresuid # branch to system call 837 jg sys_setresuid # branch to system call
986 838
987 .globl sys32_getresuid_wrapper 839ENTRY(sys32_getresuid_wrapper)
988sys32_getresuid_wrapper:
989 llgtr %r2,%r2 # uid_t * 840 llgtr %r2,%r2 # uid_t *
990 llgtr %r3,%r3 # uid_t * 841 llgtr %r3,%r3 # uid_t *
991 llgtr %r4,%r4 # uid_t * 842 llgtr %r4,%r4 # uid_t *
992 jg sys_getresuid # branch to system call 843 jg sys_getresuid # branch to system call
993 844
994 .globl sys32_setresgid_wrapper 845ENTRY(sys32_setresgid_wrapper)
995sys32_setresgid_wrapper:
996 llgfr %r2,%r2 # gid_t 846 llgfr %r2,%r2 # gid_t
997 llgfr %r3,%r3 # gid_t 847 llgfr %r3,%r3 # gid_t
998 llgfr %r4,%r4 # gid_t 848 llgfr %r4,%r4 # gid_t
999 jg sys_setresgid # branch to system call 849 jg sys_setresgid # branch to system call
1000 850
1001 .globl sys32_getresgid_wrapper 851ENTRY(sys32_getresgid_wrapper)
1002sys32_getresgid_wrapper:
1003 llgtr %r2,%r2 # gid_t * 852 llgtr %r2,%r2 # gid_t *
1004 llgtr %r3,%r3 # gid_t * 853 llgtr %r3,%r3 # gid_t *
1005 llgtr %r4,%r4 # gid_t * 854 llgtr %r4,%r4 # gid_t *
1006 jg sys_getresgid # branch to system call 855 jg sys_getresgid # branch to system call
1007 856
1008 .globl sys32_chown_wrapper 857ENTRY(sys32_chown_wrapper)
1009sys32_chown_wrapper:
1010 llgtr %r2,%r2 # const char * 858 llgtr %r2,%r2 # const char *
1011 llgfr %r3,%r3 # uid_t 859 llgfr %r3,%r3 # uid_t
1012 llgfr %r4,%r4 # gid_t 860 llgfr %r4,%r4 # gid_t
1013 jg sys_chown # branch to system call 861 jg sys_chown # branch to system call
1014 862
1015 .globl sys32_setuid_wrapper 863ENTRY(sys32_setuid_wrapper)
1016sys32_setuid_wrapper:
1017 llgfr %r2,%r2 # uid_t 864 llgfr %r2,%r2 # uid_t
1018 jg sys_setuid # branch to system call 865 jg sys_setuid # branch to system call
1019 866
1020 .globl sys32_setgid_wrapper 867ENTRY(sys32_setgid_wrapper)
1021sys32_setgid_wrapper:
1022 llgfr %r2,%r2 # gid_t 868 llgfr %r2,%r2 # gid_t
1023 jg sys_setgid # branch to system call 869 jg sys_setgid # branch to system call
1024 870
1025 .globl sys32_setfsuid_wrapper 871ENTRY(sys32_setfsuid_wrapper)
1026sys32_setfsuid_wrapper:
1027 llgfr %r2,%r2 # uid_t 872 llgfr %r2,%r2 # uid_t
1028 jg sys_setfsuid # branch to system call 873 jg sys_setfsuid # branch to system call
1029 874
1030 .globl sys32_setfsgid_wrapper 875ENTRY(sys32_setfsgid_wrapper)
1031sys32_setfsgid_wrapper:
1032 llgfr %r2,%r2 # gid_t 876 llgfr %r2,%r2 # gid_t
1033 jg sys_setfsgid # branch to system call 877 jg sys_setfsgid # branch to system call
1034 878
1035 .globl sys32_pivot_root_wrapper 879ENTRY(sys32_pivot_root_wrapper)
1036sys32_pivot_root_wrapper:
1037 llgtr %r2,%r2 # const char * 880 llgtr %r2,%r2 # const char *
1038 llgtr %r3,%r3 # const char * 881 llgtr %r3,%r3 # const char *
1039 jg sys_pivot_root # branch to system call 882 jg sys_pivot_root # branch to system call
1040 883
1041 .globl sys32_mincore_wrapper 884ENTRY(sys32_mincore_wrapper)
1042sys32_mincore_wrapper:
1043 llgfr %r2,%r2 # unsigned long 885 llgfr %r2,%r2 # unsigned long
1044 llgfr %r3,%r3 # size_t 886 llgfr %r3,%r3 # size_t
1045 llgtr %r4,%r4 # unsigned char * 887 llgtr %r4,%r4 # unsigned char *
1046 jg sys_mincore # branch to system call 888 jg sys_mincore # branch to system call
1047 889
1048 .globl sys32_madvise_wrapper 890ENTRY(sys32_madvise_wrapper)
1049sys32_madvise_wrapper:
1050 llgfr %r2,%r2 # unsigned long 891 llgfr %r2,%r2 # unsigned long
1051 llgfr %r3,%r3 # size_t 892 llgfr %r3,%r3 # size_t
1052 lgfr %r4,%r4 # int 893 lgfr %r4,%r4 # int
1053 jg sys_madvise # branch to system call 894 jg sys_madvise # branch to system call
1054 895
1055 .globl sys32_getdents64_wrapper 896ENTRY(sys32_getdents64_wrapper)
1056sys32_getdents64_wrapper:
1057 llgfr %r2,%r2 # unsigned int 897 llgfr %r2,%r2 # unsigned int
1058 llgtr %r3,%r3 # void * 898 llgtr %r3,%r3 # void *
1059 llgfr %r4,%r4 # unsigned int 899 llgfr %r4,%r4 # unsigned int
1060 jg sys_getdents64 # branch to system call 900 jg sys_getdents64 # branch to system call
1061 901
1062 .globl compat_sys_fcntl64_wrapper 902ENTRY(compat_sys_fcntl64_wrapper)
1063compat_sys_fcntl64_wrapper:
1064 llgfr %r2,%r2 # unsigned int 903 llgfr %r2,%r2 # unsigned int
1065 llgfr %r3,%r3 # unsigned int 904 llgfr %r3,%r3 # unsigned int
1066 llgfr %r4,%r4 # unsigned long 905 llgfr %r4,%r4 # unsigned long
1067 jg compat_sys_fcntl64 # branch to system call 906 jg compat_sys_fcntl64 # branch to system call
1068 907
1069 .globl sys32_stat64_wrapper 908ENTRY(sys32_stat64_wrapper)
1070sys32_stat64_wrapper:
1071 llgtr %r2,%r2 # char * 909 llgtr %r2,%r2 # char *
1072 llgtr %r3,%r3 # struct stat64 * 910 llgtr %r3,%r3 # struct stat64 *
1073 jg sys32_stat64 # branch to system call 911 jg sys32_stat64 # branch to system call
1074 912
1075 .globl sys32_lstat64_wrapper 913ENTRY(sys32_lstat64_wrapper)
1076sys32_lstat64_wrapper:
1077 llgtr %r2,%r2 # char * 914 llgtr %r2,%r2 # char *
1078 llgtr %r3,%r3 # struct stat64 * 915 llgtr %r3,%r3 # struct stat64 *
1079 jg sys32_lstat64 # branch to system call 916 jg sys32_lstat64 # branch to system call
1080 917
1081 .globl sys32_stime_wrapper 918ENTRY(sys32_stime_wrapper)
1082sys32_stime_wrapper:
1083 llgtr %r2,%r2 # long * 919 llgtr %r2,%r2 # long *
1084 jg compat_sys_stime # branch to system call 920 jg compat_sys_stime # branch to system call
1085 921
1086 .globl sys32_sysctl_wrapper 922ENTRY(sys32_sysctl_wrapper)
1087sys32_sysctl_wrapper:
1088 llgtr %r2,%r2 # struct compat_sysctl_args * 923 llgtr %r2,%r2 # struct compat_sysctl_args *
1089 jg compat_sys_sysctl 924 jg compat_sys_sysctl
1090 925
1091 .globl sys32_fstat64_wrapper 926ENTRY(sys32_fstat64_wrapper)
1092sys32_fstat64_wrapper:
1093 llgfr %r2,%r2 # unsigned long 927 llgfr %r2,%r2 # unsigned long
1094 llgtr %r3,%r3 # struct stat64 * 928 llgtr %r3,%r3 # struct stat64 *
1095 jg sys32_fstat64 # branch to system call 929 jg sys32_fstat64 # branch to system call
1096 930
1097 .globl compat_sys_futex_wrapper 931ENTRY(compat_sys_futex_wrapper)
1098compat_sys_futex_wrapper:
1099 llgtr %r2,%r2 # u32 * 932 llgtr %r2,%r2 # u32 *
1100 lgfr %r3,%r3 # int 933 lgfr %r3,%r3 # int
1101 lgfr %r4,%r4 # int 934 lgfr %r4,%r4 # int
@@ -1105,8 +938,7 @@ compat_sys_futex_wrapper:
1105 stg %r0,160(%r15) 938 stg %r0,160(%r15)
1106 jg compat_sys_futex # branch to system call 939 jg compat_sys_futex # branch to system call
1107 940
1108 .globl sys32_setxattr_wrapper 941ENTRY(sys32_setxattr_wrapper)
1109sys32_setxattr_wrapper:
1110 llgtr %r2,%r2 # char * 942 llgtr %r2,%r2 # char *
1111 llgtr %r3,%r3 # char * 943 llgtr %r3,%r3 # char *
1112 llgtr %r4,%r4 # void * 944 llgtr %r4,%r4 # void *
@@ -1114,8 +946,7 @@ sys32_setxattr_wrapper:
1114 lgfr %r6,%r6 # int 946 lgfr %r6,%r6 # int
1115 jg sys_setxattr 947 jg sys_setxattr
1116 948
1117 .globl sys32_lsetxattr_wrapper 949ENTRY(sys32_lsetxattr_wrapper)
1118sys32_lsetxattr_wrapper:
1119 llgtr %r2,%r2 # char * 950 llgtr %r2,%r2 # char *
1120 llgtr %r3,%r3 # char * 951 llgtr %r3,%r3 # char *
1121 llgtr %r4,%r4 # void * 952 llgtr %r4,%r4 # void *
@@ -1123,8 +954,7 @@ sys32_lsetxattr_wrapper:
1123 lgfr %r6,%r6 # int 954 lgfr %r6,%r6 # int
1124 jg sys_lsetxattr 955 jg sys_lsetxattr
1125 956
1126 .globl sys32_fsetxattr_wrapper 957ENTRY(sys32_fsetxattr_wrapper)
1127sys32_fsetxattr_wrapper:
1128 lgfr %r2,%r2 # int 958 lgfr %r2,%r2 # int
1129 llgtr %r3,%r3 # char * 959 llgtr %r3,%r3 # char *
1130 llgtr %r4,%r4 # void * 960 llgtr %r4,%r4 # void *
@@ -1132,124 +962,106 @@ sys32_fsetxattr_wrapper:
1132 lgfr %r6,%r6 # int 962 lgfr %r6,%r6 # int
1133 jg sys_fsetxattr 963 jg sys_fsetxattr
1134 964
1135 .globl sys32_getxattr_wrapper 965ENTRY(sys32_getxattr_wrapper)
1136sys32_getxattr_wrapper:
1137 llgtr %r2,%r2 # char * 966 llgtr %r2,%r2 # char *
1138 llgtr %r3,%r3 # char * 967 llgtr %r3,%r3 # char *
1139 llgtr %r4,%r4 # void * 968 llgtr %r4,%r4 # void *
1140 llgfr %r5,%r5 # size_t 969 llgfr %r5,%r5 # size_t
1141 jg sys_getxattr 970 jg sys_getxattr
1142 971
1143 .globl sys32_lgetxattr_wrapper 972ENTRY(sys32_lgetxattr_wrapper)
1144sys32_lgetxattr_wrapper:
1145 llgtr %r2,%r2 # char * 973 llgtr %r2,%r2 # char *
1146 llgtr %r3,%r3 # char * 974 llgtr %r3,%r3 # char *
1147 llgtr %r4,%r4 # void * 975 llgtr %r4,%r4 # void *
1148 llgfr %r5,%r5 # size_t 976 llgfr %r5,%r5 # size_t
1149 jg sys_lgetxattr 977 jg sys_lgetxattr
1150 978
1151 .globl sys32_fgetxattr_wrapper 979ENTRY(sys32_fgetxattr_wrapper)
1152sys32_fgetxattr_wrapper:
1153 lgfr %r2,%r2 # int 980 lgfr %r2,%r2 # int
1154 llgtr %r3,%r3 # char * 981 llgtr %r3,%r3 # char *
1155 llgtr %r4,%r4 # void * 982 llgtr %r4,%r4 # void *
1156 llgfr %r5,%r5 # size_t 983 llgfr %r5,%r5 # size_t
1157 jg sys_fgetxattr 984 jg sys_fgetxattr
1158 985
1159 .globl sys32_listxattr_wrapper 986ENTRY(sys32_listxattr_wrapper)
1160sys32_listxattr_wrapper:
1161 llgtr %r2,%r2 # char * 987 llgtr %r2,%r2 # char *
1162 llgtr %r3,%r3 # char * 988 llgtr %r3,%r3 # char *
1163 llgfr %r4,%r4 # size_t 989 llgfr %r4,%r4 # size_t
1164 jg sys_listxattr 990 jg sys_listxattr
1165 991
1166 .globl sys32_llistxattr_wrapper 992ENTRY(sys32_llistxattr_wrapper)
1167sys32_llistxattr_wrapper:
1168 llgtr %r2,%r2 # char * 993 llgtr %r2,%r2 # char *
1169 llgtr %r3,%r3 # char * 994 llgtr %r3,%r3 # char *
1170 llgfr %r4,%r4 # size_t 995 llgfr %r4,%r4 # size_t
1171 jg sys_llistxattr 996 jg sys_llistxattr
1172 997
1173 .globl sys32_flistxattr_wrapper 998ENTRY(sys32_flistxattr_wrapper)
1174sys32_flistxattr_wrapper:
1175 lgfr %r2,%r2 # int 999 lgfr %r2,%r2 # int
1176 llgtr %r3,%r3 # char * 1000 llgtr %r3,%r3 # char *
1177 llgfr %r4,%r4 # size_t 1001 llgfr %r4,%r4 # size_t
1178 jg sys_flistxattr 1002 jg sys_flistxattr
1179 1003
1180 .globl sys32_removexattr_wrapper 1004ENTRY(sys32_removexattr_wrapper)
1181sys32_removexattr_wrapper:
1182 llgtr %r2,%r2 # char * 1005 llgtr %r2,%r2 # char *
1183 llgtr %r3,%r3 # char * 1006 llgtr %r3,%r3 # char *
1184 jg sys_removexattr 1007 jg sys_removexattr
1185 1008
1186 .globl sys32_lremovexattr_wrapper 1009ENTRY(sys32_lremovexattr_wrapper)
1187sys32_lremovexattr_wrapper:
1188 llgtr %r2,%r2 # char * 1010 llgtr %r2,%r2 # char *
1189 llgtr %r3,%r3 # char * 1011 llgtr %r3,%r3 # char *
1190 jg sys_lremovexattr 1012 jg sys_lremovexattr
1191 1013
1192 .globl sys32_fremovexattr_wrapper 1014ENTRY(sys32_fremovexattr_wrapper)
1193sys32_fremovexattr_wrapper:
1194 lgfr %r2,%r2 # int 1015 lgfr %r2,%r2 # int
1195 llgtr %r3,%r3 # char * 1016 llgtr %r3,%r3 # char *
1196 jg sys_fremovexattr 1017 jg sys_fremovexattr
1197 1018
1198 .globl sys32_sched_setaffinity_wrapper 1019ENTRY(sys32_sched_setaffinity_wrapper)
1199sys32_sched_setaffinity_wrapper:
1200 lgfr %r2,%r2 # int 1020 lgfr %r2,%r2 # int
1201 llgfr %r3,%r3 # unsigned int 1021 llgfr %r3,%r3 # unsigned int
1202 llgtr %r4,%r4 # unsigned long * 1022 llgtr %r4,%r4 # unsigned long *
1203 jg compat_sys_sched_setaffinity 1023 jg compat_sys_sched_setaffinity
1204 1024
1205 .globl sys32_sched_getaffinity_wrapper 1025ENTRY(sys32_sched_getaffinity_wrapper)
1206sys32_sched_getaffinity_wrapper:
1207 lgfr %r2,%r2 # int 1026 lgfr %r2,%r2 # int
1208 llgfr %r3,%r3 # unsigned int 1027 llgfr %r3,%r3 # unsigned int
1209 llgtr %r4,%r4 # unsigned long * 1028 llgtr %r4,%r4 # unsigned long *
1210 jg compat_sys_sched_getaffinity 1029 jg compat_sys_sched_getaffinity
1211 1030
1212 .globl sys32_exit_group_wrapper 1031ENTRY(sys32_exit_group_wrapper)
1213sys32_exit_group_wrapper:
1214 lgfr %r2,%r2 # int 1032 lgfr %r2,%r2 # int
1215 jg sys_exit_group # branch to system call 1033 jg sys_exit_group # branch to system call
1216 1034
1217 .globl sys32_set_tid_address_wrapper 1035ENTRY(sys32_set_tid_address_wrapper)
1218sys32_set_tid_address_wrapper:
1219 llgtr %r2,%r2 # int * 1036 llgtr %r2,%r2 # int *
1220 jg sys_set_tid_address # branch to system call 1037 jg sys_set_tid_address # branch to system call
1221 1038
1222 .globl sys_epoll_create_wrapper 1039ENTRY(sys_epoll_create_wrapper)
1223sys_epoll_create_wrapper:
1224 lgfr %r2,%r2 # int 1040 lgfr %r2,%r2 # int
1225 jg sys_epoll_create # branch to system call 1041 jg sys_epoll_create # branch to system call
1226 1042
1227 .globl sys_epoll_ctl_wrapper 1043ENTRY(sys_epoll_ctl_wrapper)
1228sys_epoll_ctl_wrapper:
1229 lgfr %r2,%r2 # int 1044 lgfr %r2,%r2 # int
1230 lgfr %r3,%r3 # int 1045 lgfr %r3,%r3 # int
1231 lgfr %r4,%r4 # int 1046 lgfr %r4,%r4 # int
1232 llgtr %r5,%r5 # struct epoll_event * 1047 llgtr %r5,%r5 # struct epoll_event *
1233 jg sys_epoll_ctl # branch to system call 1048 jg sys_epoll_ctl # branch to system call
1234 1049
1235 .globl sys_epoll_wait_wrapper 1050ENTRY(sys_epoll_wait_wrapper)
1236sys_epoll_wait_wrapper:
1237 lgfr %r2,%r2 # int 1051 lgfr %r2,%r2 # int
1238 llgtr %r3,%r3 # struct epoll_event * 1052 llgtr %r3,%r3 # struct epoll_event *
1239 lgfr %r4,%r4 # int 1053 lgfr %r4,%r4 # int
1240 lgfr %r5,%r5 # int 1054 lgfr %r5,%r5 # int
1241 jg sys_epoll_wait # branch to system call 1055 jg sys_epoll_wait # branch to system call
1242 1056
1243 .globl sys32_lookup_dcookie_wrapper 1057ENTRY(sys32_lookup_dcookie_wrapper)
1244sys32_lookup_dcookie_wrapper:
1245 sllg %r2,%r2,32 # get high word of 64bit dcookie 1058 sllg %r2,%r2,32 # get high word of 64bit dcookie
1246 or %r2,%r3 # get low word of 64bit dcookie 1059 or %r2,%r3 # get low word of 64bit dcookie
1247 llgtr %r3,%r4 # char * 1060 llgtr %r3,%r4 # char *
1248 llgfr %r4,%r5 # size_t 1061 llgfr %r4,%r5 # size_t
1249 jg sys_lookup_dcookie 1062 jg sys_lookup_dcookie
1250 1063
1251 .globl sys32_fadvise64_wrapper 1064ENTRY(sys32_fadvise64_wrapper)
1252sys32_fadvise64_wrapper:
1253 lgfr %r2,%r2 # int 1065 lgfr %r2,%r2 # int
1254 sllg %r3,%r3,32 # get high word of 64bit loff_t 1066 sllg %r3,%r3,32 # get high word of 64bit loff_t
1255 or %r3,%r4 # get low word of 64bit loff_t 1067 or %r3,%r4 # get low word of 64bit loff_t
@@ -1257,81 +1069,68 @@ sys32_fadvise64_wrapper:
1257 lgfr %r5,%r6 # int 1069 lgfr %r5,%r6 # int
1258 jg sys32_fadvise64 1070 jg sys32_fadvise64
1259 1071
1260 .globl sys32_fadvise64_64_wrapper 1072ENTRY(sys32_fadvise64_64_wrapper)
1261sys32_fadvise64_64_wrapper:
1262 llgtr %r2,%r2 # struct fadvise64_64_args * 1073 llgtr %r2,%r2 # struct fadvise64_64_args *
1263 jg sys32_fadvise64_64 1074 jg sys32_fadvise64_64
1264 1075
1265 .globl sys32_clock_settime_wrapper 1076ENTRY(sys32_clock_settime_wrapper)
1266sys32_clock_settime_wrapper:
1267 lgfr %r2,%r2 # clockid_t (int) 1077 lgfr %r2,%r2 # clockid_t (int)
1268 llgtr %r3,%r3 # struct compat_timespec * 1078 llgtr %r3,%r3 # struct compat_timespec *
1269 jg compat_sys_clock_settime 1079 jg compat_sys_clock_settime
1270 1080
1271 .globl sys32_clock_gettime_wrapper 1081ENTRY(sys32_clock_gettime_wrapper)
1272sys32_clock_gettime_wrapper:
1273 lgfr %r2,%r2 # clockid_t (int) 1082 lgfr %r2,%r2 # clockid_t (int)
1274 llgtr %r3,%r3 # struct compat_timespec * 1083 llgtr %r3,%r3 # struct compat_timespec *
1275 jg compat_sys_clock_gettime 1084 jg compat_sys_clock_gettime
1276 1085
1277 .globl sys32_clock_getres_wrapper 1086ENTRY(sys32_clock_getres_wrapper)
1278sys32_clock_getres_wrapper:
1279 lgfr %r2,%r2 # clockid_t (int) 1087 lgfr %r2,%r2 # clockid_t (int)
1280 llgtr %r3,%r3 # struct compat_timespec * 1088 llgtr %r3,%r3 # struct compat_timespec *
1281 jg compat_sys_clock_getres 1089 jg compat_sys_clock_getres
1282 1090
1283 .globl sys32_clock_nanosleep_wrapper 1091ENTRY(sys32_clock_nanosleep_wrapper)
1284sys32_clock_nanosleep_wrapper:
1285 lgfr %r2,%r2 # clockid_t (int) 1092 lgfr %r2,%r2 # clockid_t (int)
1286 lgfr %r3,%r3 # int 1093 lgfr %r3,%r3 # int
1287 llgtr %r4,%r4 # struct compat_timespec * 1094 llgtr %r4,%r4 # struct compat_timespec *
1288 llgtr %r5,%r5 # struct compat_timespec * 1095 llgtr %r5,%r5 # struct compat_timespec *
1289 jg compat_sys_clock_nanosleep 1096 jg compat_sys_clock_nanosleep
1290 1097
1291 .globl sys32_timer_create_wrapper 1098ENTRY(sys32_timer_create_wrapper)
1292sys32_timer_create_wrapper:
1293 lgfr %r2,%r2 # timer_t (int) 1099 lgfr %r2,%r2 # timer_t (int)
1294 llgtr %r3,%r3 # struct compat_sigevent * 1100 llgtr %r3,%r3 # struct compat_sigevent *
1295 llgtr %r4,%r4 # timer_t * 1101 llgtr %r4,%r4 # timer_t *
1296 jg compat_sys_timer_create 1102 jg compat_sys_timer_create
1297 1103
1298 .globl sys32_timer_settime_wrapper 1104ENTRY(sys32_timer_settime_wrapper)
1299sys32_timer_settime_wrapper:
1300 lgfr %r2,%r2 # timer_t (int) 1105 lgfr %r2,%r2 # timer_t (int)
1301 lgfr %r3,%r3 # int 1106 lgfr %r3,%r3 # int
1302 llgtr %r4,%r4 # struct compat_itimerspec * 1107 llgtr %r4,%r4 # struct compat_itimerspec *
1303 llgtr %r5,%r5 # struct compat_itimerspec * 1108 llgtr %r5,%r5 # struct compat_itimerspec *
1304 jg compat_sys_timer_settime 1109 jg compat_sys_timer_settime
1305 1110
1306 .globl sys32_timer_gettime_wrapper 1111ENTRY(sys32_timer_gettime_wrapper)
1307sys32_timer_gettime_wrapper:
1308 lgfr %r2,%r2 # timer_t (int) 1112 lgfr %r2,%r2 # timer_t (int)
1309 llgtr %r3,%r3 # struct compat_itimerspec * 1113 llgtr %r3,%r3 # struct compat_itimerspec *
1310 jg compat_sys_timer_gettime 1114 jg compat_sys_timer_gettime
1311 1115
1312 .globl sys32_timer_getoverrun_wrapper 1116ENTRY(sys32_timer_getoverrun_wrapper)
1313sys32_timer_getoverrun_wrapper:
1314 lgfr %r2,%r2 # timer_t (int) 1117 lgfr %r2,%r2 # timer_t (int)
1315 jg sys_timer_getoverrun 1118 jg sys_timer_getoverrun
1316 1119
1317 .globl sys32_timer_delete_wrapper 1120ENTRY(sys32_timer_delete_wrapper)
1318sys32_timer_delete_wrapper:
1319 lgfr %r2,%r2 # timer_t (int) 1121 lgfr %r2,%r2 # timer_t (int)
1320 jg sys_timer_delete 1122 jg sys_timer_delete
1321 1123
1322 .globl sys32_io_setup_wrapper 1124ENTRY(sys32_io_setup_wrapper)
1323sys32_io_setup_wrapper:
1324 llgfr %r2,%r2 # unsigned int 1125 llgfr %r2,%r2 # unsigned int
1325 llgtr %r3,%r3 # u32 * 1126 llgtr %r3,%r3 # u32 *
1326 jg compat_sys_io_setup 1127 jg compat_sys_io_setup
1327 1128
1328 .globl sys32_io_destroy_wrapper 1129ENTRY(sys32_io_destroy_wrapper)
1329sys32_io_destroy_wrapper:
1330 llgfr %r2,%r2 # (aio_context_t) u32 1130 llgfr %r2,%r2 # (aio_context_t) u32
1331 jg sys_io_destroy 1131 jg sys_io_destroy
1332 1132
1333 .globl sys32_io_getevents_wrapper 1133ENTRY(sys32_io_getevents_wrapper)
1334sys32_io_getevents_wrapper:
1335 llgfr %r2,%r2 # (aio_context_t) u32 1134 llgfr %r2,%r2 # (aio_context_t) u32
1336 lgfr %r3,%r3 # long 1135 lgfr %r3,%r3 # long
1337 lgfr %r4,%r4 # long 1136 lgfr %r4,%r4 # long
@@ -1339,49 +1138,42 @@ sys32_io_getevents_wrapper:
1339 llgtr %r6,%r6 # struct compat_timespec * 1138 llgtr %r6,%r6 # struct compat_timespec *
1340 jg compat_sys_io_getevents 1139 jg compat_sys_io_getevents
1341 1140
1342 .globl sys32_io_submit_wrapper 1141ENTRY(sys32_io_submit_wrapper)
1343sys32_io_submit_wrapper:
1344 llgfr %r2,%r2 # (aio_context_t) u32 1142 llgfr %r2,%r2 # (aio_context_t) u32
1345 lgfr %r3,%r3 # long 1143 lgfr %r3,%r3 # long
1346 llgtr %r4,%r4 # struct iocb ** 1144 llgtr %r4,%r4 # struct iocb **
1347 jg compat_sys_io_submit 1145 jg compat_sys_io_submit
1348 1146
1349 .globl sys32_io_cancel_wrapper 1147ENTRY(sys32_io_cancel_wrapper)
1350sys32_io_cancel_wrapper:
1351 llgfr %r2,%r2 # (aio_context_t) u32 1148 llgfr %r2,%r2 # (aio_context_t) u32
1352 llgtr %r3,%r3 # struct iocb * 1149 llgtr %r3,%r3 # struct iocb *
1353 llgtr %r4,%r4 # struct io_event * 1150 llgtr %r4,%r4 # struct io_event *
1354 jg sys_io_cancel 1151 jg sys_io_cancel
1355 1152
1356 .globl compat_sys_statfs64_wrapper 1153ENTRY(compat_sys_statfs64_wrapper)
1357compat_sys_statfs64_wrapper:
1358 llgtr %r2,%r2 # const char * 1154 llgtr %r2,%r2 # const char *
1359 llgfr %r3,%r3 # compat_size_t 1155 llgfr %r3,%r3 # compat_size_t
1360 llgtr %r4,%r4 # struct compat_statfs64 * 1156 llgtr %r4,%r4 # struct compat_statfs64 *
1361 jg compat_sys_statfs64 1157 jg compat_sys_statfs64
1362 1158
1363 .globl compat_sys_fstatfs64_wrapper 1159ENTRY(compat_sys_fstatfs64_wrapper)
1364compat_sys_fstatfs64_wrapper:
1365 llgfr %r2,%r2 # unsigned int fd 1160 llgfr %r2,%r2 # unsigned int fd
1366 llgfr %r3,%r3 # compat_size_t 1161 llgfr %r3,%r3 # compat_size_t
1367 llgtr %r4,%r4 # struct compat_statfs64 * 1162 llgtr %r4,%r4 # struct compat_statfs64 *
1368 jg compat_sys_fstatfs64 1163 jg compat_sys_fstatfs64
1369 1164
1370 .globl compat_sys_mq_open_wrapper 1165ENTRY(compat_sys_mq_open_wrapper)
1371compat_sys_mq_open_wrapper:
1372 llgtr %r2,%r2 # const char * 1166 llgtr %r2,%r2 # const char *
1373 lgfr %r3,%r3 # int 1167 lgfr %r3,%r3 # int
1374 llgfr %r4,%r4 # mode_t 1168 llgfr %r4,%r4 # mode_t
1375 llgtr %r5,%r5 # struct compat_mq_attr * 1169 llgtr %r5,%r5 # struct compat_mq_attr *
1376 jg compat_sys_mq_open 1170 jg compat_sys_mq_open
1377 1171
1378 .globl sys32_mq_unlink_wrapper 1172ENTRY(sys32_mq_unlink_wrapper)
1379sys32_mq_unlink_wrapper:
1380 llgtr %r2,%r2 # const char * 1173 llgtr %r2,%r2 # const char *
1381 jg sys_mq_unlink 1174 jg sys_mq_unlink
1382 1175
1383 .globl compat_sys_mq_timedsend_wrapper 1176ENTRY(compat_sys_mq_timedsend_wrapper)
1384compat_sys_mq_timedsend_wrapper:
1385 lgfr %r2,%r2 # mqd_t 1177 lgfr %r2,%r2 # mqd_t
1386 llgtr %r3,%r3 # const char * 1178 llgtr %r3,%r3 # const char *
1387 llgfr %r4,%r4 # size_t 1179 llgfr %r4,%r4 # size_t
@@ -1389,8 +1181,7 @@ compat_sys_mq_timedsend_wrapper:
1389 llgtr %r6,%r6 # const struct compat_timespec * 1181 llgtr %r6,%r6 # const struct compat_timespec *
1390 jg compat_sys_mq_timedsend 1182 jg compat_sys_mq_timedsend
1391 1183
1392 .globl compat_sys_mq_timedreceive_wrapper 1184ENTRY(compat_sys_mq_timedreceive_wrapper)
1393compat_sys_mq_timedreceive_wrapper:
1394 lgfr %r2,%r2 # mqd_t 1185 lgfr %r2,%r2 # mqd_t
1395 llgtr %r3,%r3 # char * 1186 llgtr %r3,%r3 # char *
1396 llgfr %r4,%r4 # size_t 1187 llgfr %r4,%r4 # size_t
@@ -1398,21 +1189,18 @@ compat_sys_mq_timedreceive_wrapper:
1398 llgtr %r6,%r6 # const struct compat_timespec * 1189 llgtr %r6,%r6 # const struct compat_timespec *
1399 jg compat_sys_mq_timedreceive 1190 jg compat_sys_mq_timedreceive
1400 1191
1401 .globl compat_sys_mq_notify_wrapper 1192ENTRY(compat_sys_mq_notify_wrapper)
1402compat_sys_mq_notify_wrapper:
1403 lgfr %r2,%r2 # mqd_t 1193 lgfr %r2,%r2 # mqd_t
1404 llgtr %r3,%r3 # struct compat_sigevent * 1194 llgtr %r3,%r3 # struct compat_sigevent *
1405 jg compat_sys_mq_notify 1195 jg compat_sys_mq_notify
1406 1196
1407 .globl compat_sys_mq_getsetattr_wrapper 1197ENTRY(compat_sys_mq_getsetattr_wrapper)
1408compat_sys_mq_getsetattr_wrapper:
1409 lgfr %r2,%r2 # mqd_t 1198 lgfr %r2,%r2 # mqd_t
1410 llgtr %r3,%r3 # struct compat_mq_attr * 1199 llgtr %r3,%r3 # struct compat_mq_attr *
1411 llgtr %r4,%r4 # struct compat_mq_attr * 1200 llgtr %r4,%r4 # struct compat_mq_attr *
1412 jg compat_sys_mq_getsetattr 1201 jg compat_sys_mq_getsetattr
1413 1202
1414 .globl compat_sys_add_key_wrapper 1203ENTRY(compat_sys_add_key_wrapper)
1415compat_sys_add_key_wrapper:
1416 llgtr %r2,%r2 # const char * 1204 llgtr %r2,%r2 # const char *
1417 llgtr %r3,%r3 # const char * 1205 llgtr %r3,%r3 # const char *
1418 llgtr %r4,%r4 # const void * 1206 llgtr %r4,%r4 # const void *
@@ -1420,16 +1208,14 @@ compat_sys_add_key_wrapper:
1420 llgfr %r6,%r6 # (key_serial_t) u32 1208 llgfr %r6,%r6 # (key_serial_t) u32
1421 jg sys_add_key 1209 jg sys_add_key
1422 1210
1423 .globl compat_sys_request_key_wrapper 1211ENTRY(compat_sys_request_key_wrapper)
1424compat_sys_request_key_wrapper:
1425 llgtr %r2,%r2 # const char * 1212 llgtr %r2,%r2 # const char *
1426 llgtr %r3,%r3 # const char * 1213 llgtr %r3,%r3 # const char *
1427 llgtr %r4,%r4 # const void * 1214 llgtr %r4,%r4 # const void *
1428 llgfr %r5,%r5 # (key_serial_t) u32 1215 llgfr %r5,%r5 # (key_serial_t) u32
1429 jg sys_request_key 1216 jg sys_request_key
1430 1217
1431 .globl sys32_remap_file_pages_wrapper 1218ENTRY(sys32_remap_file_pages_wrapper)
1432sys32_remap_file_pages_wrapper:
1433 llgfr %r2,%r2 # unsigned long 1219 llgfr %r2,%r2 # unsigned long
1434 llgfr %r3,%r3 # unsigned long 1220 llgfr %r3,%r3 # unsigned long
1435 llgfr %r4,%r4 # unsigned long 1221 llgfr %r4,%r4 # unsigned long
@@ -1437,8 +1223,7 @@ sys32_remap_file_pages_wrapper:
1437 llgfr %r6,%r6 # unsigned long 1223 llgfr %r6,%r6 # unsigned long
1438 jg sys_remap_file_pages 1224 jg sys_remap_file_pages
1439 1225
1440 .globl compat_sys_waitid_wrapper 1226ENTRY(compat_sys_waitid_wrapper)
1441compat_sys_waitid_wrapper:
1442 lgfr %r2,%r2 # int 1227 lgfr %r2,%r2 # int
1443 lgfr %r3,%r3 # pid_t 1228 lgfr %r3,%r3 # pid_t
1444 llgtr %r4,%r4 # siginfo_emu31_t * 1229 llgtr %r4,%r4 # siginfo_emu31_t *
@@ -1446,65 +1231,56 @@ compat_sys_waitid_wrapper:
1446 llgtr %r6,%r6 # struct rusage_emu31 * 1231 llgtr %r6,%r6 # struct rusage_emu31 *
1447 jg compat_sys_waitid 1232 jg compat_sys_waitid
1448 1233
1449 .globl compat_sys_kexec_load_wrapper 1234ENTRY(compat_sys_kexec_load_wrapper)
1450compat_sys_kexec_load_wrapper:
1451 llgfr %r2,%r2 # unsigned long 1235 llgfr %r2,%r2 # unsigned long
1452 llgfr %r3,%r3 # unsigned long 1236 llgfr %r3,%r3 # unsigned long
1453 llgtr %r4,%r4 # struct kexec_segment * 1237 llgtr %r4,%r4 # struct kexec_segment *
1454 llgfr %r5,%r5 # unsigned long 1238 llgfr %r5,%r5 # unsigned long
1455 jg compat_sys_kexec_load 1239 jg compat_sys_kexec_load
1456 1240
1457 .globl sys_ioprio_set_wrapper 1241ENTRY(sys_ioprio_set_wrapper)
1458sys_ioprio_set_wrapper:
1459 lgfr %r2,%r2 # int 1242 lgfr %r2,%r2 # int
1460 lgfr %r3,%r3 # int 1243 lgfr %r3,%r3 # int
1461 lgfr %r4,%r4 # int 1244 lgfr %r4,%r4 # int
1462 jg sys_ioprio_set 1245 jg sys_ioprio_set
1463 1246
1464 .globl sys_ioprio_get_wrapper 1247ENTRY(sys_ioprio_get_wrapper)
1465sys_ioprio_get_wrapper:
1466 lgfr %r2,%r2 # int 1248 lgfr %r2,%r2 # int
1467 lgfr %r3,%r3 # int 1249 lgfr %r3,%r3 # int
1468 jg sys_ioprio_get 1250 jg sys_ioprio_get
1469 1251
1470 .globl sys_inotify_add_watch_wrapper 1252ENTRY(sys_inotify_add_watch_wrapper)
1471sys_inotify_add_watch_wrapper:
1472 lgfr %r2,%r2 # int 1253 lgfr %r2,%r2 # int
1473 llgtr %r3,%r3 # const char * 1254 llgtr %r3,%r3 # const char *
1474 llgfr %r4,%r4 # u32 1255 llgfr %r4,%r4 # u32
1475 jg sys_inotify_add_watch 1256 jg sys_inotify_add_watch
1476 1257
1477 .globl sys_inotify_rm_watch_wrapper 1258ENTRY(sys_inotify_rm_watch_wrapper)
1478sys_inotify_rm_watch_wrapper:
1479 lgfr %r2,%r2 # int 1259 lgfr %r2,%r2 # int
1480 llgfr %r3,%r3 # u32 1260 llgfr %r3,%r3 # u32
1481 jg sys_inotify_rm_watch 1261 jg sys_inotify_rm_watch
1482 1262
1483 .globl compat_sys_openat_wrapper 1263ENTRY(compat_sys_openat_wrapper)
1484compat_sys_openat_wrapper:
1485 llgfr %r2,%r2 # unsigned int 1264 llgfr %r2,%r2 # unsigned int
1486 llgtr %r3,%r3 # const char * 1265 llgtr %r3,%r3 # const char *
1487 lgfr %r4,%r4 # int 1266 lgfr %r4,%r4 # int
1488 lgfr %r5,%r5 # int 1267 lgfr %r5,%r5 # int
1489 jg compat_sys_openat 1268 jg compat_sys_openat
1490 1269
1491 .globl sys_mkdirat_wrapper 1270ENTRY(sys_mkdirat_wrapper)
1492sys_mkdirat_wrapper:
1493 lgfr %r2,%r2 # int 1271 lgfr %r2,%r2 # int
1494 llgtr %r3,%r3 # const char * 1272 llgtr %r3,%r3 # const char *
1495 lgfr %r4,%r4 # int 1273 lgfr %r4,%r4 # int
1496 jg sys_mkdirat 1274 jg sys_mkdirat
1497 1275
1498 .globl sys_mknodat_wrapper 1276ENTRY(sys_mknodat_wrapper)
1499sys_mknodat_wrapper:
1500 lgfr %r2,%r2 # int 1277 lgfr %r2,%r2 # int
1501 llgtr %r3,%r3 # const char * 1278 llgtr %r3,%r3 # const char *
1502 lgfr %r4,%r4 # int 1279 lgfr %r4,%r4 # int
1503 llgfr %r5,%r5 # unsigned int 1280 llgfr %r5,%r5 # unsigned int
1504 jg sys_mknodat 1281 jg sys_mknodat
1505 1282
1506 .globl sys_fchownat_wrapper 1283ENTRY(sys_fchownat_wrapper)
1507sys_fchownat_wrapper:
1508 lgfr %r2,%r2 # int 1284 lgfr %r2,%r2 # int
1509 llgtr %r3,%r3 # const char * 1285 llgtr %r3,%r3 # const char *
1510 llgfr %r4,%r4 # uid_t 1286 llgfr %r4,%r4 # uid_t
@@ -1512,38 +1288,33 @@ sys_fchownat_wrapper:
1512 lgfr %r6,%r6 # int 1288 lgfr %r6,%r6 # int
1513 jg sys_fchownat 1289 jg sys_fchownat
1514 1290
1515 .globl compat_sys_futimesat_wrapper 1291ENTRY(compat_sys_futimesat_wrapper)
1516compat_sys_futimesat_wrapper:
1517 llgfr %r2,%r2 # unsigned int 1292 llgfr %r2,%r2 # unsigned int
1518 llgtr %r3,%r3 # char * 1293 llgtr %r3,%r3 # char *
1519 llgtr %r4,%r4 # struct timeval * 1294 llgtr %r4,%r4 # struct timeval *
1520 jg compat_sys_futimesat 1295 jg compat_sys_futimesat
1521 1296
1522 .globl sys32_fstatat64_wrapper 1297ENTRY(sys32_fstatat64_wrapper)
1523sys32_fstatat64_wrapper:
1524 llgfr %r2,%r2 # unsigned int 1298 llgfr %r2,%r2 # unsigned int
1525 llgtr %r3,%r3 # char * 1299 llgtr %r3,%r3 # char *
1526 llgtr %r4,%r4 # struct stat64 * 1300 llgtr %r4,%r4 # struct stat64 *
1527 lgfr %r5,%r5 # int 1301 lgfr %r5,%r5 # int
1528 jg sys32_fstatat64 1302 jg sys32_fstatat64
1529 1303
1530 .globl sys_unlinkat_wrapper 1304ENTRY(sys_unlinkat_wrapper)
1531sys_unlinkat_wrapper:
1532 lgfr %r2,%r2 # int 1305 lgfr %r2,%r2 # int
1533 llgtr %r3,%r3 # const char * 1306 llgtr %r3,%r3 # const char *
1534 lgfr %r4,%r4 # int 1307 lgfr %r4,%r4 # int
1535 jg sys_unlinkat 1308 jg sys_unlinkat
1536 1309
1537 .globl sys_renameat_wrapper 1310ENTRY(sys_renameat_wrapper)
1538sys_renameat_wrapper:
1539 lgfr %r2,%r2 # int 1311 lgfr %r2,%r2 # int
1540 llgtr %r3,%r3 # const char * 1312 llgtr %r3,%r3 # const char *
1541 lgfr %r4,%r4 # int 1313 lgfr %r4,%r4 # int
1542 llgtr %r5,%r5 # const char * 1314 llgtr %r5,%r5 # const char *
1543 jg sys_renameat 1315 jg sys_renameat
1544 1316
1545 .globl sys_linkat_wrapper 1317ENTRY(sys_linkat_wrapper)
1546sys_linkat_wrapper:
1547 lgfr %r2,%r2 # int 1318 lgfr %r2,%r2 # int
1548 llgtr %r3,%r3 # const char * 1319 llgtr %r3,%r3 # const char *
1549 lgfr %r4,%r4 # int 1320 lgfr %r4,%r4 # int
@@ -1551,37 +1322,32 @@ sys_linkat_wrapper:
1551 lgfr %r6,%r6 # int 1322 lgfr %r6,%r6 # int
1552 jg sys_linkat 1323 jg sys_linkat
1553 1324
1554 .globl sys_symlinkat_wrapper 1325ENTRY(sys_symlinkat_wrapper)
1555sys_symlinkat_wrapper:
1556 llgtr %r2,%r2 # const char * 1326 llgtr %r2,%r2 # const char *
1557 lgfr %r3,%r3 # int 1327 lgfr %r3,%r3 # int
1558 llgtr %r4,%r4 # const char * 1328 llgtr %r4,%r4 # const char *
1559 jg sys_symlinkat 1329 jg sys_symlinkat
1560 1330
1561 .globl sys_readlinkat_wrapper 1331ENTRY(sys_readlinkat_wrapper)
1562sys_readlinkat_wrapper:
1563 lgfr %r2,%r2 # int 1332 lgfr %r2,%r2 # int
1564 llgtr %r3,%r3 # const char * 1333 llgtr %r3,%r3 # const char *
1565 llgtr %r4,%r4 # char * 1334 llgtr %r4,%r4 # char *
1566 lgfr %r5,%r5 # int 1335 lgfr %r5,%r5 # int
1567 jg sys_readlinkat 1336 jg sys_readlinkat
1568 1337
1569 .globl sys_fchmodat_wrapper 1338ENTRY(sys_fchmodat_wrapper)
1570sys_fchmodat_wrapper:
1571 lgfr %r2,%r2 # int 1339 lgfr %r2,%r2 # int
1572 llgtr %r3,%r3 # const char * 1340 llgtr %r3,%r3 # const char *
1573 llgfr %r4,%r4 # mode_t 1341 llgfr %r4,%r4 # mode_t
1574 jg sys_fchmodat 1342 jg sys_fchmodat
1575 1343
1576 .globl sys_faccessat_wrapper 1344ENTRY(sys_faccessat_wrapper)
1577sys_faccessat_wrapper:
1578 lgfr %r2,%r2 # int 1345 lgfr %r2,%r2 # int
1579 llgtr %r3,%r3 # const char * 1346 llgtr %r3,%r3 # const char *
1580 lgfr %r4,%r4 # int 1347 lgfr %r4,%r4 # int
1581 jg sys_faccessat 1348 jg sys_faccessat
1582 1349
1583 .globl compat_sys_pselect6_wrapper 1350ENTRY(compat_sys_pselect6_wrapper)
1584compat_sys_pselect6_wrapper:
1585 lgfr %r2,%r2 # int 1351 lgfr %r2,%r2 # int
1586 llgtr %r3,%r3 # fd_set * 1352 llgtr %r3,%r3 # fd_set *
1587 llgtr %r4,%r4 # fd_set * 1353 llgtr %r4,%r4 # fd_set *
@@ -1591,8 +1357,7 @@ compat_sys_pselect6_wrapper:
1591 stg %r0,160(%r15) 1357 stg %r0,160(%r15)
1592 jg compat_sys_pselect6 1358 jg compat_sys_pselect6
1593 1359
1594 .globl compat_sys_ppoll_wrapper 1360ENTRY(compat_sys_ppoll_wrapper)
1595compat_sys_ppoll_wrapper:
1596 llgtr %r2,%r2 # struct pollfd * 1361 llgtr %r2,%r2 # struct pollfd *
1597 llgfr %r3,%r3 # unsigned int 1362 llgfr %r3,%r3 # unsigned int
1598 llgtr %r4,%r4 # struct timespec * 1363 llgtr %r4,%r4 # struct timespec *
@@ -1600,26 +1365,22 @@ compat_sys_ppoll_wrapper:
1600 llgfr %r6,%r6 # size_t 1365 llgfr %r6,%r6 # size_t
1601 jg compat_sys_ppoll 1366 jg compat_sys_ppoll
1602 1367
1603 .globl sys_unshare_wrapper 1368ENTRY(sys_unshare_wrapper)
1604sys_unshare_wrapper:
1605 llgfr %r2,%r2 # unsigned long 1369 llgfr %r2,%r2 # unsigned long
1606 jg sys_unshare 1370 jg sys_unshare
1607 1371
1608 .globl compat_sys_set_robust_list_wrapper 1372ENTRY(compat_sys_set_robust_list_wrapper)
1609compat_sys_set_robust_list_wrapper:
1610 llgtr %r2,%r2 # struct compat_robust_list_head * 1373 llgtr %r2,%r2 # struct compat_robust_list_head *
1611 llgfr %r3,%r3 # size_t 1374 llgfr %r3,%r3 # size_t
1612 jg compat_sys_set_robust_list 1375 jg compat_sys_set_robust_list
1613 1376
1614 .globl compat_sys_get_robust_list_wrapper 1377ENTRY(compat_sys_get_robust_list_wrapper)
1615compat_sys_get_robust_list_wrapper:
1616 lgfr %r2,%r2 # int 1378 lgfr %r2,%r2 # int
1617 llgtr %r3,%r3 # compat_uptr_t_t * 1379 llgtr %r3,%r3 # compat_uptr_t_t *
1618 llgtr %r4,%r4 # compat_size_t * 1380 llgtr %r4,%r4 # compat_size_t *
1619 jg compat_sys_get_robust_list 1381 jg compat_sys_get_robust_list
1620 1382
1621 .globl sys_splice_wrapper 1383ENTRY(sys_splice_wrapper)
1622sys_splice_wrapper:
1623 lgfr %r2,%r2 # int 1384 lgfr %r2,%r2 # int
1624 llgtr %r3,%r3 # loff_t * 1385 llgtr %r3,%r3 # loff_t *
1625 lgfr %r4,%r4 # int 1386 lgfr %r4,%r4 # int
@@ -1629,8 +1390,7 @@ sys_splice_wrapper:
1629 stg %r0,160(%r15) 1390 stg %r0,160(%r15)
1630 jg sys_splice 1391 jg sys_splice
1631 1392
1632 .globl sys_sync_file_range_wrapper 1393ENTRY(sys_sync_file_range_wrapper)
1633sys_sync_file_range_wrapper:
1634 lgfr %r2,%r2 # int 1394 lgfr %r2,%r2 # int
1635 sllg %r3,%r3,32 # get high word of 64bit loff_t 1395 sllg %r3,%r3,32 # get high word of 64bit loff_t
1636 or %r3,%r4 # get low word of 64bit loff_t 1396 or %r3,%r4 # get low word of 64bit loff_t
@@ -1639,31 +1399,27 @@ sys_sync_file_range_wrapper:
1639 llgf %r5,164(%r15) # unsigned int 1399 llgf %r5,164(%r15) # unsigned int
1640 jg sys_sync_file_range 1400 jg sys_sync_file_range
1641 1401
1642 .globl sys_tee_wrapper 1402ENTRY(sys_tee_wrapper)
1643sys_tee_wrapper:
1644 lgfr %r2,%r2 # int 1403 lgfr %r2,%r2 # int
1645 lgfr %r3,%r3 # int 1404 lgfr %r3,%r3 # int
1646 llgfr %r4,%r4 # size_t 1405 llgfr %r4,%r4 # size_t
1647 llgfr %r5,%r5 # unsigned int 1406 llgfr %r5,%r5 # unsigned int
1648 jg sys_tee 1407 jg sys_tee
1649 1408
1650 .globl compat_sys_vmsplice_wrapper 1409ENTRY(compat_sys_vmsplice_wrapper)
1651compat_sys_vmsplice_wrapper:
1652 lgfr %r2,%r2 # int 1410 lgfr %r2,%r2 # int
1653 llgtr %r3,%r3 # compat_iovec * 1411 llgtr %r3,%r3 # compat_iovec *
1654 llgfr %r4,%r4 # unsigned int 1412 llgfr %r4,%r4 # unsigned int
1655 llgfr %r5,%r5 # unsigned int 1413 llgfr %r5,%r5 # unsigned int
1656 jg compat_sys_vmsplice 1414 jg compat_sys_vmsplice
1657 1415
1658 .globl sys_getcpu_wrapper 1416ENTRY(sys_getcpu_wrapper)
1659sys_getcpu_wrapper:
1660 llgtr %r2,%r2 # unsigned * 1417 llgtr %r2,%r2 # unsigned *
1661 llgtr %r3,%r3 # unsigned * 1418 llgtr %r3,%r3 # unsigned *
1662 llgtr %r4,%r4 # struct getcpu_cache * 1419 llgtr %r4,%r4 # struct getcpu_cache *
1663 jg sys_getcpu 1420 jg sys_getcpu
1664 1421
1665 .globl compat_sys_epoll_pwait_wrapper 1422ENTRY(compat_sys_epoll_pwait_wrapper)
1666compat_sys_epoll_pwait_wrapper:
1667 lgfr %r2,%r2 # int 1423 lgfr %r2,%r2 # int
1668 llgtr %r3,%r3 # struct compat_epoll_event * 1424 llgtr %r3,%r3 # struct compat_epoll_event *
1669 lgfr %r4,%r4 # int 1425 lgfr %r4,%r4 # int
@@ -1673,34 +1429,29 @@ compat_sys_epoll_pwait_wrapper:
1673 stg %r0,160(%r15) 1429 stg %r0,160(%r15)
1674 jg compat_sys_epoll_pwait 1430 jg compat_sys_epoll_pwait
1675 1431
1676 .globl compat_sys_utimes_wrapper 1432ENTRY(compat_sys_utimes_wrapper)
1677compat_sys_utimes_wrapper:
1678 llgtr %r2,%r2 # char * 1433 llgtr %r2,%r2 # char *
1679 llgtr %r3,%r3 # struct compat_timeval * 1434 llgtr %r3,%r3 # struct compat_timeval *
1680 jg compat_sys_utimes 1435 jg compat_sys_utimes
1681 1436
1682 .globl compat_sys_utimensat_wrapper 1437ENTRY(compat_sys_utimensat_wrapper)
1683compat_sys_utimensat_wrapper:
1684 llgfr %r2,%r2 # unsigned int 1438 llgfr %r2,%r2 # unsigned int
1685 llgtr %r3,%r3 # char * 1439 llgtr %r3,%r3 # char *
1686 llgtr %r4,%r4 # struct compat_timespec * 1440 llgtr %r4,%r4 # struct compat_timespec *
1687 lgfr %r5,%r5 # int 1441 lgfr %r5,%r5 # int
1688 jg compat_sys_utimensat 1442 jg compat_sys_utimensat
1689 1443
1690 .globl compat_sys_signalfd_wrapper 1444ENTRY(compat_sys_signalfd_wrapper)
1691compat_sys_signalfd_wrapper:
1692 lgfr %r2,%r2 # int 1445 lgfr %r2,%r2 # int
1693 llgtr %r3,%r3 # compat_sigset_t * 1446 llgtr %r3,%r3 # compat_sigset_t *
1694 llgfr %r4,%r4 # compat_size_t 1447 llgfr %r4,%r4 # compat_size_t
1695 jg compat_sys_signalfd 1448 jg compat_sys_signalfd
1696 1449
1697 .globl sys_eventfd_wrapper 1450ENTRY(sys_eventfd_wrapper)
1698sys_eventfd_wrapper:
1699 llgfr %r2,%r2 # unsigned int 1451 llgfr %r2,%r2 # unsigned int
1700 jg sys_eventfd 1452 jg sys_eventfd
1701 1453
1702 .globl sys_fallocate_wrapper 1454ENTRY(sys_fallocate_wrapper)
1703sys_fallocate_wrapper:
1704 lgfr %r2,%r2 # int 1455 lgfr %r2,%r2 # int
1705 lgfr %r3,%r3 # int 1456 lgfr %r3,%r3 # int
1706 sllg %r4,%r4,32 # get high word of 64bit loff_t 1457 sllg %r4,%r4,32 # get high word of 64bit loff_t
@@ -1709,94 +1460,80 @@ sys_fallocate_wrapper:
1709 l %r5,164(%r15) # get low word of 64bit loff_t 1460 l %r5,164(%r15) # get low word of 64bit loff_t
1710 jg sys_fallocate 1461 jg sys_fallocate
1711 1462
1712 .globl sys_timerfd_create_wrapper 1463ENTRY(sys_timerfd_create_wrapper)
1713sys_timerfd_create_wrapper:
1714 lgfr %r2,%r2 # int 1464 lgfr %r2,%r2 # int
1715 lgfr %r3,%r3 # int 1465 lgfr %r3,%r3 # int
1716 jg sys_timerfd_create 1466 jg sys_timerfd_create
1717 1467
1718 .globl compat_sys_timerfd_settime_wrapper 1468ENTRY(compat_sys_timerfd_settime_wrapper)
1719compat_sys_timerfd_settime_wrapper:
1720 lgfr %r2,%r2 # int 1469 lgfr %r2,%r2 # int
1721 lgfr %r3,%r3 # int 1470 lgfr %r3,%r3 # int
1722 llgtr %r4,%r4 # struct compat_itimerspec * 1471 llgtr %r4,%r4 # struct compat_itimerspec *
1723 llgtr %r5,%r5 # struct compat_itimerspec * 1472 llgtr %r5,%r5 # struct compat_itimerspec *
1724 jg compat_sys_timerfd_settime 1473 jg compat_sys_timerfd_settime
1725 1474
1726 .globl compat_sys_timerfd_gettime_wrapper 1475ENTRY(compat_sys_timerfd_gettime_wrapper)
1727compat_sys_timerfd_gettime_wrapper:
1728 lgfr %r2,%r2 # int 1476 lgfr %r2,%r2 # int
1729 llgtr %r3,%r3 # struct compat_itimerspec * 1477 llgtr %r3,%r3 # struct compat_itimerspec *
1730 jg compat_sys_timerfd_gettime 1478 jg compat_sys_timerfd_gettime
1731 1479
1732 .globl compat_sys_signalfd4_wrapper 1480ENTRY(compat_sys_signalfd4_wrapper)
1733compat_sys_signalfd4_wrapper:
1734 lgfr %r2,%r2 # int 1481 lgfr %r2,%r2 # int
1735 llgtr %r3,%r3 # compat_sigset_t * 1482 llgtr %r3,%r3 # compat_sigset_t *
1736 llgfr %r4,%r4 # compat_size_t 1483 llgfr %r4,%r4 # compat_size_t
1737 lgfr %r5,%r5 # int 1484 lgfr %r5,%r5 # int
1738 jg compat_sys_signalfd4 1485 jg compat_sys_signalfd4
1739 1486
1740 .globl sys_eventfd2_wrapper 1487ENTRY(sys_eventfd2_wrapper)
1741sys_eventfd2_wrapper:
1742 llgfr %r2,%r2 # unsigned int 1488 llgfr %r2,%r2 # unsigned int
1743 lgfr %r3,%r3 # int 1489 lgfr %r3,%r3 # int
1744 jg sys_eventfd2 1490 jg sys_eventfd2
1745 1491
1746 .globl sys_inotify_init1_wrapper 1492ENTRY(sys_inotify_init1_wrapper)
1747sys_inotify_init1_wrapper:
1748 lgfr %r2,%r2 # int 1493 lgfr %r2,%r2 # int
1749 jg sys_inotify_init1 1494 jg sys_inotify_init1
1750 1495
1751 .globl sys_pipe2_wrapper 1496ENTRY(sys_pipe2_wrapper)
1752sys_pipe2_wrapper:
1753 llgtr %r2,%r2 # u32 * 1497 llgtr %r2,%r2 # u32 *
1754 lgfr %r3,%r3 # int 1498 lgfr %r3,%r3 # int
1755 jg sys_pipe2 # branch to system call 1499 jg sys_pipe2 # branch to system call
1756 1500
1757 .globl sys_dup3_wrapper 1501ENTRY(sys_dup3_wrapper)
1758sys_dup3_wrapper:
1759 llgfr %r2,%r2 # unsigned int 1502 llgfr %r2,%r2 # unsigned int
1760 llgfr %r3,%r3 # unsigned int 1503 llgfr %r3,%r3 # unsigned int
1761 lgfr %r4,%r4 # int 1504 lgfr %r4,%r4 # int
1762 jg sys_dup3 # branch to system call 1505 jg sys_dup3 # branch to system call
1763 1506
1764 .globl sys_epoll_create1_wrapper 1507ENTRY(sys_epoll_create1_wrapper)
1765sys_epoll_create1_wrapper:
1766 lgfr %r2,%r2 # int 1508 lgfr %r2,%r2 # int
1767 jg sys_epoll_create1 # branch to system call 1509 jg sys_epoll_create1 # branch to system call
1768 1510
1769 .globl sys32_readahead_wrapper 1511ENTRY(sys32_readahead_wrapper)
1770sys32_readahead_wrapper:
1771 lgfr %r2,%r2 # int 1512 lgfr %r2,%r2 # int
1772 llgfr %r3,%r3 # u32 1513 llgfr %r3,%r3 # u32
1773 llgfr %r4,%r4 # u32 1514 llgfr %r4,%r4 # u32
1774 lgfr %r5,%r5 # s32 1515 lgfr %r5,%r5 # s32
1775 jg sys32_readahead # branch to system call 1516 jg sys32_readahead # branch to system call
1776 1517
1777 .globl sys32_sendfile64_wrapper 1518ENTRY(sys32_sendfile64_wrapper)
1778sys32_sendfile64_wrapper:
1779 lgfr %r2,%r2 # int 1519 lgfr %r2,%r2 # int
1780 lgfr %r3,%r3 # int 1520 lgfr %r3,%r3 # int
1781 llgtr %r4,%r4 # compat_loff_t * 1521 llgtr %r4,%r4 # compat_loff_t *
1782 lgfr %r5,%r5 # s32 1522 lgfr %r5,%r5 # s32
1783 jg sys32_sendfile64 # branch to system call 1523 jg sys32_sendfile64 # branch to system call
1784 1524
1785 .globl sys_tkill_wrapper 1525ENTRY(sys_tkill_wrapper)
1786sys_tkill_wrapper:
1787 lgfr %r2,%r2 # pid_t 1526 lgfr %r2,%r2 # pid_t
1788 lgfr %r3,%r3 # int 1527 lgfr %r3,%r3 # int
1789 jg sys_tkill # branch to system call 1528 jg sys_tkill # branch to system call
1790 1529
1791 .globl sys_tgkill_wrapper 1530ENTRY(sys_tgkill_wrapper)
1792sys_tgkill_wrapper:
1793 lgfr %r2,%r2 # pid_t 1531 lgfr %r2,%r2 # pid_t
1794 lgfr %r3,%r3 # pid_t 1532 lgfr %r3,%r3 # pid_t
1795 lgfr %r4,%r4 # int 1533 lgfr %r4,%r4 # int
1796 jg sys_tgkill # branch to system call 1534 jg sys_tgkill # branch to system call
1797 1535
1798 .globl compat_sys_keyctl_wrapper 1536ENTRY(compat_sys_keyctl_wrapper)
1799compat_sys_keyctl_wrapper:
1800 llgfr %r2,%r2 # u32 1537 llgfr %r2,%r2 # u32
1801 llgfr %r3,%r3 # u32 1538 llgfr %r3,%r3 # u32
1802 llgfr %r4,%r4 # u32 1539 llgfr %r4,%r4 # u32
@@ -1804,8 +1541,7 @@ compat_sys_keyctl_wrapper:
1804 llgfr %r6,%r6 # u32 1541 llgfr %r6,%r6 # u32
1805 jg compat_sys_keyctl # branch to system call 1542 jg compat_sys_keyctl # branch to system call
1806 1543
1807 .globl compat_sys_preadv_wrapper 1544ENTRY(compat_sys_preadv_wrapper)
1808compat_sys_preadv_wrapper:
1809 llgfr %r2,%r2 # unsigned long 1545 llgfr %r2,%r2 # unsigned long
1810 llgtr %r3,%r3 # compat_iovec * 1546 llgtr %r3,%r3 # compat_iovec *
1811 llgfr %r4,%r4 # unsigned long 1547 llgfr %r4,%r4 # unsigned long
@@ -1813,8 +1549,7 @@ compat_sys_preadv_wrapper:
1813 llgfr %r6,%r6 # u32 1549 llgfr %r6,%r6 # u32
1814 jg compat_sys_preadv # branch to system call 1550 jg compat_sys_preadv # branch to system call
1815 1551
1816 .globl compat_sys_pwritev_wrapper 1552ENTRY(compat_sys_pwritev_wrapper)
1817compat_sys_pwritev_wrapper:
1818 llgfr %r2,%r2 # unsigned long 1553 llgfr %r2,%r2 # unsigned long
1819 llgtr %r3,%r3 # compat_iovec * 1554 llgtr %r3,%r3 # compat_iovec *
1820 llgfr %r4,%r4 # unsigned long 1555 llgfr %r4,%r4 # unsigned long
@@ -1822,16 +1557,14 @@ compat_sys_pwritev_wrapper:
1822 llgfr %r6,%r6 # u32 1557 llgfr %r6,%r6 # u32
1823 jg compat_sys_pwritev # branch to system call 1558 jg compat_sys_pwritev # branch to system call
1824 1559
1825 .globl compat_sys_rt_tgsigqueueinfo_wrapper 1560ENTRY(compat_sys_rt_tgsigqueueinfo_wrapper)
1826compat_sys_rt_tgsigqueueinfo_wrapper:
1827 lgfr %r2,%r2 # compat_pid_t 1561 lgfr %r2,%r2 # compat_pid_t
1828 lgfr %r3,%r3 # compat_pid_t 1562 lgfr %r3,%r3 # compat_pid_t
1829 lgfr %r4,%r4 # int 1563 lgfr %r4,%r4 # int
1830 llgtr %r5,%r5 # struct compat_siginfo * 1564 llgtr %r5,%r5 # struct compat_siginfo *
1831 jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call 1565 jg compat_sys_rt_tgsigqueueinfo_wrapper # branch to system call
1832 1566
1833 .globl sys_perf_event_open_wrapper 1567ENTRY(sys_perf_event_open_wrapper)
1834sys_perf_event_open_wrapper:
1835 llgtr %r2,%r2 # const struct perf_event_attr * 1568 llgtr %r2,%r2 # const struct perf_event_attr *
1836 lgfr %r3,%r3 # pid_t 1569 lgfr %r3,%r3 # pid_t
1837 lgfr %r4,%r4 # int 1570 lgfr %r4,%r4 # int
@@ -1839,29 +1572,25 @@ sys_perf_event_open_wrapper:
1839 llgfr %r6,%r6 # unsigned long 1572 llgfr %r6,%r6 # unsigned long
1840 jg sys_perf_event_open # branch to system call 1573 jg sys_perf_event_open # branch to system call
1841 1574
1842 .globl sys_clone_wrapper 1575ENTRY(sys_clone_wrapper)
1843sys_clone_wrapper:
1844 llgfr %r2,%r2 # unsigned long 1576 llgfr %r2,%r2 # unsigned long
1845 llgfr %r3,%r3 # unsigned long 1577 llgfr %r3,%r3 # unsigned long
1846 llgtr %r4,%r4 # int * 1578 llgtr %r4,%r4 # int *
1847 llgtr %r5,%r5 # int * 1579 llgtr %r5,%r5 # int *
1848 jg sys_clone # branch to system call 1580 jg sys_clone # branch to system call
1849 1581
1850 .globl sys32_execve_wrapper 1582ENTRY(sys32_execve_wrapper)
1851sys32_execve_wrapper:
1852 llgtr %r2,%r2 # char * 1583 llgtr %r2,%r2 # char *
1853 llgtr %r3,%r3 # compat_uptr_t * 1584 llgtr %r3,%r3 # compat_uptr_t *
1854 llgtr %r4,%r4 # compat_uptr_t * 1585 llgtr %r4,%r4 # compat_uptr_t *
1855 jg sys32_execve # branch to system call 1586 jg sys32_execve # branch to system call
1856 1587
1857 .globl sys_fanotify_init_wrapper 1588ENTRY(sys_fanotify_init_wrapper)
1858sys_fanotify_init_wrapper:
1859 llgfr %r2,%r2 # unsigned int 1589 llgfr %r2,%r2 # unsigned int
1860 llgfr %r3,%r3 # unsigned int 1590 llgfr %r3,%r3 # unsigned int
1861 jg sys_fanotify_init # branch to system call 1591 jg sys_fanotify_init # branch to system call
1862 1592
1863 .globl sys_fanotify_mark_wrapper 1593ENTRY(sys_fanotify_mark_wrapper)
1864sys_fanotify_mark_wrapper:
1865 lgfr %r2,%r2 # int 1594 lgfr %r2,%r2 # int
1866 llgfr %r3,%r3 # unsigned int 1595 llgfr %r3,%r3 # unsigned int
1867 sllg %r4,%r4,32 # get high word of 64bit mask 1596 sllg %r4,%r4,32 # get high word of 64bit mask
@@ -1870,16 +1599,14 @@ sys_fanotify_mark_wrapper:
1870 llgt %r6,164(%r15) # char * 1599 llgt %r6,164(%r15) # char *
1871 jg sys_fanotify_mark # branch to system call 1600 jg sys_fanotify_mark # branch to system call
1872 1601
1873 .globl sys_prlimit64_wrapper 1602ENTRY(sys_prlimit64_wrapper)
1874sys_prlimit64_wrapper:
1875 lgfr %r2,%r2 # pid_t 1603 lgfr %r2,%r2 # pid_t
1876 llgfr %r3,%r3 # unsigned int 1604 llgfr %r3,%r3 # unsigned int
1877 llgtr %r4,%r4 # const struct rlimit64 __user * 1605 llgtr %r4,%r4 # const struct rlimit64 __user *
1878 llgtr %r5,%r5 # struct rlimit64 __user * 1606 llgtr %r5,%r5 # struct rlimit64 __user *
1879 jg sys_prlimit64 # branch to system call 1607 jg sys_prlimit64 # branch to system call
1880 1608
1881 .globl sys_name_to_handle_at_wrapper 1609ENTRY(sys_name_to_handle_at_wrapper)
1882sys_name_to_handle_at_wrapper:
1883 lgfr %r2,%r2 # int 1610 lgfr %r2,%r2 # int
1884 llgtr %r3,%r3 # const char __user * 1611 llgtr %r3,%r3 # const char __user *
1885 llgtr %r4,%r4 # struct file_handle __user * 1612 llgtr %r4,%r4 # struct file_handle __user *
@@ -1887,21 +1614,18 @@ sys_name_to_handle_at_wrapper:
1887 lgfr %r6,%r6 # int 1614 lgfr %r6,%r6 # int
1888 jg sys_name_to_handle_at 1615 jg sys_name_to_handle_at
1889 1616
1890 .globl compat_sys_open_by_handle_at_wrapper 1617ENTRY(compat_sys_open_by_handle_at_wrapper)
1891compat_sys_open_by_handle_at_wrapper:
1892 lgfr %r2,%r2 # int 1618 lgfr %r2,%r2 # int
1893 llgtr %r3,%r3 # struct file_handle __user * 1619 llgtr %r3,%r3 # struct file_handle __user *
1894 lgfr %r4,%r4 # int 1620 lgfr %r4,%r4 # int
1895 jg compat_sys_open_by_handle_at 1621 jg compat_sys_open_by_handle_at
1896 1622
1897 .globl compat_sys_clock_adjtime_wrapper 1623ENTRY(compat_sys_clock_adjtime_wrapper)
1898compat_sys_clock_adjtime_wrapper:
1899 lgfr %r2,%r2 # clockid_t (int) 1624 lgfr %r2,%r2 # clockid_t (int)
1900 llgtr %r3,%r3 # struct compat_timex __user * 1625 llgtr %r3,%r3 # struct compat_timex __user *
1901 jg compat_sys_clock_adjtime 1626 jg compat_sys_clock_adjtime
1902 1627
1903 .globl sys_syncfs_wrapper 1628ENTRY(sys_syncfs_wrapper)
1904sys_syncfs_wrapper:
1905 lgfr %r2,%r2 # int 1629 lgfr %r2,%r2 # int
1906 jg sys_syncfs 1630 jg sys_syncfs
1907 1631
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 0476174dfff5..3eab7cfab07c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -9,8 +9,8 @@
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/linkage.h>
13#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm/errno.h> 15#include <asm/errno.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
@@ -197,8 +197,7 @@ STACK_SIZE = 1 << STACK_SHIFT
197 * Returns: 197 * Returns:
198 * gpr2 = prev 198 * gpr2 = prev
199 */ 199 */
200 .globl __switch_to 200ENTRY(__switch_to)
201__switch_to:
202 basr %r1,0 201 basr %r1,0
2030: l %r4,__THREAD_info(%r2) # get thread_info of prev 2020: l %r4,__THREAD_info(%r2) # get thread_info of prev
204 l %r5,__THREAD_info(%r3) # get thread_info of next 203 l %r5,__THREAD_info(%r3) # get thread_info of next
@@ -224,8 +223,7 @@ __critical_start:
224 * are executed with interrupts enabled. 223 * are executed with interrupts enabled.
225 */ 224 */
226 225
227 .globl system_call 226ENTRY(system_call)
228system_call:
229 stpt __LC_SYNC_ENTER_TIMER 227 stpt __LC_SYNC_ENTER_TIMER
230sysc_saveall: 228sysc_saveall:
231 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 229 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@@ -388,8 +386,7 @@ sysc_tracenogo:
388# 386#
389# a new process exits the kernel with ret_from_fork 387# a new process exits the kernel with ret_from_fork
390# 388#
391 .globl ret_from_fork 389ENTRY(ret_from_fork)
392ret_from_fork:
393 l %r13,__LC_SVC_NEW_PSW+4 390 l %r13,__LC_SVC_NEW_PSW+4
394 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct 391 l %r12,__LC_THREAD_INFO # load pointer to thread_info struct
395 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 392 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@@ -405,8 +402,7 @@ ret_from_fork:
405# kernel_execve function needs to deal with pt_regs that is not 402# kernel_execve function needs to deal with pt_regs that is not
406# at the usual place 403# at the usual place
407# 404#
408 .globl kernel_execve 405ENTRY(kernel_execve)
409kernel_execve:
410 stm %r12,%r15,48(%r15) 406 stm %r12,%r15,48(%r15)
411 lr %r14,%r15 407 lr %r14,%r15
412 l %r13,__LC_SVC_NEW_PSW+4 408 l %r13,__LC_SVC_NEW_PSW+4
@@ -438,8 +434,7 @@ kernel_execve:
438 * Program check handler routine 434 * Program check handler routine
439 */ 435 */
440 436
441 .globl pgm_check_handler 437ENTRY(pgm_check_handler)
442pgm_check_handler:
443/* 438/*
444 * First we need to check for a special case: 439 * First we need to check for a special case:
445 * Single stepping an instruction that disables the PER event mask will 440 * Single stepping an instruction that disables the PER event mask will
@@ -565,8 +560,7 @@ kernel_per:
565 * IO interrupt handler routine 560 * IO interrupt handler routine
566 */ 561 */
567 562
568 .globl io_int_handler 563ENTRY(io_int_handler)
569io_int_handler:
570 stck __LC_INT_CLOCK 564 stck __LC_INT_CLOCK
571 stpt __LC_ASYNC_ENTER_TIMER 565 stpt __LC_ASYNC_ENTER_TIMER
572 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 566 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@@ -703,8 +697,7 @@ io_notify_resume:
703 * External interrupt handler routine 697 * External interrupt handler routine
704 */ 698 */
705 699
706 .globl ext_int_handler 700ENTRY(ext_int_handler)
707ext_int_handler:
708 stck __LC_INT_CLOCK 701 stck __LC_INT_CLOCK
709 stpt __LC_ASYNC_ENTER_TIMER 702 stpt __LC_ASYNC_ENTER_TIMER
710 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 703 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@@ -731,8 +724,7 @@ __critical_end:
731 * Machine check handler routines 724 * Machine check handler routines
732 */ 725 */
733 726
734 .globl mcck_int_handler 727ENTRY(mcck_int_handler)
735mcck_int_handler:
736 stck __LC_MCCK_CLOCK 728 stck __LC_MCCK_CLOCK
737 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer 729 spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
738 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs 730 lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
@@ -818,8 +810,7 @@ mcck_return:
818 */ 810 */
819#ifdef CONFIG_SMP 811#ifdef CONFIG_SMP
820 __CPUINIT 812 __CPUINIT
821 .globl restart_int_handler 813ENTRY(restart_int_handler)
822restart_int_handler:
823 basr %r1,0 814 basr %r1,0
824restart_base: 815restart_base:
825 spt restart_vtime-restart_base(%r1) 816 spt restart_vtime-restart_base(%r1)
@@ -848,8 +839,7 @@ restart_vtime:
848/* 839/*
849 * If we do not run with SMP enabled, let the new CPU crash ... 840 * If we do not run with SMP enabled, let the new CPU crash ...
850 */ 841 */
851 .globl restart_int_handler 842ENTRY(restart_int_handler)
852restart_int_handler:
853 basr %r1,0 843 basr %r1,0
854restart_base: 844restart_base:
855 lpsw restart_crash-restart_base(%r1) 845 lpsw restart_crash-restart_base(%r1)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 17a6f83a2d67..66729eb7bbc5 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,10 +5,9 @@
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7 7
8typedef void pgm_check_handler_t(struct pt_regs *, long, unsigned long); 8void do_protection_exception(struct pt_regs *, long, unsigned long);
9extern pgm_check_handler_t *pgm_check_table[128]; 9void do_dat_exception(struct pt_regs *, long, unsigned long);
10pgm_check_handler_t do_protection_exception; 10void do_asce_exception(struct pt_regs *, long, unsigned long);
11pgm_check_handler_t do_dat_exception;
12 11
13extern int sysctl_userprocess_debug; 12extern int sysctl_userprocess_debug;
14 13
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index d61967e2eab0..7a0fd426ca92 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -9,8 +9,8 @@
9 * Heiko Carstens <heiko.carstens@de.ibm.com> 9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 */ 10 */
11 11
12#include <linux/linkage.h>
13#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/linkage.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15#include <asm/errno.h> 15#include <asm/errno.h>
16#include <asm/ptrace.h> 16#include <asm/ptrace.h>
@@ -56,15 +56,28 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
56 _TIF_MCCK_PENDING) 56 _TIF_MCCK_PENDING)
57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ 57_TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
58 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8) 58 _TIF_SECCOMP>>8 | _TIF_SYSCALL_TRACEPOINT>>8)
59_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
59 60
60#define BASED(name) name-system_call(%r13) 61#define BASED(name) name-system_call(%r13)
61 62
63 .macro SPP newpp
64#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
65 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
66 jz .+8
67 .insn s,0xb2800000,\newpp
68#endif
69 .endm
70
62 .macro HANDLE_SIE_INTERCEPT 71 .macro HANDLE_SIE_INTERCEPT
63#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 72#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
64 lg %r3,__LC_SIE_HOOK 73 tm __TI_flags+6(%r12),_TIF_SIE>>8
65 ltgr %r3,%r3
66 jz 0f 74 jz 0f
67 basr %r14,%r3 75 SPP __LC_CMF_HPP # set host id
76 clc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
77 jl 0f
78 clc SP_PSW+8(8,%r15),BASED(.Lsie_done)
79 jhe 0f
80 mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop)
680: 810:
69#endif 82#endif
70 .endm 83 .endm
@@ -206,8 +219,7 @@ _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \
206 * Returns: 219 * Returns:
207 * gpr2 = prev 220 * gpr2 = prev
208 */ 221 */
209 .globl __switch_to 222ENTRY(__switch_to)
210__switch_to:
211 lg %r4,__THREAD_info(%r2) # get thread_info of prev 223 lg %r4,__THREAD_info(%r2) # get thread_info of prev
212 lg %r5,__THREAD_info(%r3) # get thread_info of next 224 lg %r5,__THREAD_info(%r3) # get thread_info of next
213 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 225 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending?
@@ -232,8 +244,7 @@ __critical_start:
232 * are executed with interrupts enabled. 244 * are executed with interrupts enabled.
233 */ 245 */
234 246
235 .globl system_call 247ENTRY(system_call)
236system_call:
237 stpt __LC_SYNC_ENTER_TIMER 248 stpt __LC_SYNC_ENTER_TIMER
238sysc_saveall: 249sysc_saveall:
239 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA 250 SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
@@ -395,8 +406,7 @@ sysc_tracenogo:
395# 406#
396# a new process exits the kernel with ret_from_fork 407# a new process exits the kernel with ret_from_fork
397# 408#
398 .globl ret_from_fork 409ENTRY(ret_from_fork)
399ret_from_fork:
400 lg %r13,__LC_SVC_NEW_PSW+8 410 lg %r13,__LC_SVC_NEW_PSW+8
401 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 411 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
402 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? 412 tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
@@ -411,8 +421,7 @@ ret_from_fork:
411# kernel_execve function needs to deal with pt_regs that is not 421# kernel_execve function needs to deal with pt_regs that is not
412# at the usual place 422# at the usual place
413# 423#
414 .globl kernel_execve 424ENTRY(kernel_execve)
415kernel_execve:
416 stmg %r12,%r15,96(%r15) 425 stmg %r12,%r15,96(%r15)
417 lgr %r14,%r15 426 lgr %r14,%r15
418 aghi %r15,-SP_SIZE 427 aghi %r15,-SP_SIZE
@@ -442,8 +451,7 @@ kernel_execve:
442 * Program check handler routine 451 * Program check handler routine
443 */ 452 */
444 453
445 .globl pgm_check_handler 454ENTRY(pgm_check_handler)
446pgm_check_handler:
447/* 455/*
448 * First we need to check for a special case: 456 * First we need to check for a special case:
449 * Single stepping an instruction that disables the PER event mask will 457 * Single stepping an instruction that disables the PER event mask will
@@ -465,6 +473,7 @@ pgm_check_handler:
465 xc SP_ILC(4,%r15),SP_ILC(%r15) 473 xc SP_ILC(4,%r15),SP_ILC(%r15)
466 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW 474 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
467 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 475 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
476 HANDLE_SIE_INTERCEPT
468 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 477 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
469 jz pgm_no_vtime 478 jz pgm_no_vtime
470 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 479 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -472,7 +481,6 @@ pgm_check_handler:
472 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 481 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
473 LAST_BREAK 482 LAST_BREAK
474pgm_no_vtime: 483pgm_no_vtime:
475 HANDLE_SIE_INTERCEPT
476 stg %r11,SP_ARGS(%r15) 484 stg %r11,SP_ARGS(%r15)
477 lgf %r3,__LC_PGM_ILC # load program interruption code 485 lgf %r3,__LC_PGM_ILC # load program interruption code
478 lg %r4,__LC_TRANS_EXC_CODE 486 lg %r4,__LC_TRANS_EXC_CODE
@@ -507,6 +515,7 @@ pgm_per_std:
507 CREATE_STACK_FRAME __LC_SAVE_AREA 515 CREATE_STACK_FRAME __LC_SAVE_AREA
508 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW 516 mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW
509 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 517 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
518 HANDLE_SIE_INTERCEPT
510 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 519 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
511 jz pgm_no_vtime2 520 jz pgm_no_vtime2
512 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER 521 UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -514,7 +523,6 @@ pgm_per_std:
514 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 523 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
515 LAST_BREAK 524 LAST_BREAK
516pgm_no_vtime2: 525pgm_no_vtime2:
517 HANDLE_SIE_INTERCEPT
518 lg %r1,__TI_task(%r12) 526 lg %r1,__TI_task(%r12)
519 tm SP_PSW+1(%r15),0x01 # kernel per event ? 527 tm SP_PSW+1(%r15),0x01 # kernel per event ?
520 jz kernel_per 528 jz kernel_per
@@ -571,14 +579,14 @@ kernel_per:
571/* 579/*
572 * IO interrupt handler routine 580 * IO interrupt handler routine
573 */ 581 */
574 .globl io_int_handler 582ENTRY(io_int_handler)
575io_int_handler:
576 stck __LC_INT_CLOCK 583 stck __LC_INT_CLOCK
577 stpt __LC_ASYNC_ENTER_TIMER 584 stpt __LC_ASYNC_ENTER_TIMER
578 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 585 SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40
579 CREATE_STACK_FRAME __LC_SAVE_AREA+40 586 CREATE_STACK_FRAME __LC_SAVE_AREA+40
580 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 587 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
581 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 588 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
589 HANDLE_SIE_INTERCEPT
582 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 590 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
583 jz io_no_vtime 591 jz io_no_vtime
584 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 592 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -586,7 +594,6 @@ io_int_handler:
586 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 594 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
587 LAST_BREAK 595 LAST_BREAK
588io_no_vtime: 596io_no_vtime:
589 HANDLE_SIE_INTERCEPT
590 TRACE_IRQS_OFF 597 TRACE_IRQS_OFF
591 la %r2,SP_PTREGS(%r15) # address of register-save area 598 la %r2,SP_PTREGS(%r15) # address of register-save area
592 brasl %r14,do_IRQ # call standard irq handler 599 brasl %r14,do_IRQ # call standard irq handler
@@ -706,14 +713,14 @@ io_notify_resume:
706/* 713/*
707 * External interrupt handler routine 714 * External interrupt handler routine
708 */ 715 */
709 .globl ext_int_handler 716ENTRY(ext_int_handler)
710ext_int_handler:
711 stck __LC_INT_CLOCK 717 stck __LC_INT_CLOCK
712 stpt __LC_ASYNC_ENTER_TIMER 718 stpt __LC_ASYNC_ENTER_TIMER
713 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 719 SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40
714 CREATE_STACK_FRAME __LC_SAVE_AREA+40 720 CREATE_STACK_FRAME __LC_SAVE_AREA+40
715 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack 721 mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
716 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 722 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
723 HANDLE_SIE_INTERCEPT
717 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 724 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
718 jz ext_no_vtime 725 jz ext_no_vtime
719 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER 726 UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
@@ -721,7 +728,6 @@ ext_int_handler:
721 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER 728 mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
722 LAST_BREAK 729 LAST_BREAK
723ext_no_vtime: 730ext_no_vtime:
724 HANDLE_SIE_INTERCEPT
725 TRACE_IRQS_OFF 731 TRACE_IRQS_OFF
726 lghi %r1,4096 732 lghi %r1,4096
727 la %r2,SP_PTREGS(%r15) # address of register-save area 733 la %r2,SP_PTREGS(%r15) # address of register-save area
@@ -736,8 +742,7 @@ __critical_end:
736/* 742/*
737 * Machine check handler routines 743 * Machine check handler routines
738 */ 744 */
739 .globl mcck_int_handler 745ENTRY(mcck_int_handler)
740mcck_int_handler:
741 stck __LC_MCCK_CLOCK 746 stck __LC_MCCK_CLOCK
742 la %r1,4095 # revalidate r1 747 la %r1,4095 # revalidate r1
743 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer 748 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
@@ -785,6 +790,7 @@ mcck_int_main:
785 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct 790 lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct
786 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? 791 tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
787 jno mcck_no_vtime # no -> no timer update 792 jno mcck_no_vtime # no -> no timer update
793 HANDLE_SIE_INTERCEPT
788 tm SP_PSW+1(%r15),0x01 # interrupting from user ? 794 tm SP_PSW+1(%r15),0x01 # interrupting from user ?
789 jz mcck_no_vtime 795 jz mcck_no_vtime
790 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER 796 UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER
@@ -804,7 +810,6 @@ mcck_no_vtime:
804 stosm __SF_EMPTY(%r15),0x04 # turn dat on 810 stosm __SF_EMPTY(%r15),0x04 # turn dat on
805 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING 811 tm __TI_flags+7(%r12),_TIF_MCCK_PENDING
806 jno mcck_return 812 jno mcck_return
807 HANDLE_SIE_INTERCEPT
808 TRACE_IRQS_OFF 813 TRACE_IRQS_OFF
809 brasl %r14,s390_handle_mcck 814 brasl %r14,s390_handle_mcck
810 TRACE_IRQS_ON 815 TRACE_IRQS_ON
@@ -823,8 +828,7 @@ mcck_done:
823 */ 828 */
824#ifdef CONFIG_SMP 829#ifdef CONFIG_SMP
825 __CPUINIT 830 __CPUINIT
826 .globl restart_int_handler 831ENTRY(restart_int_handler)
827restart_int_handler:
828 basr %r1,0 832 basr %r1,0
829restart_base: 833restart_base:
830 spt restart_vtime-restart_base(%r1) 834 spt restart_vtime-restart_base(%r1)
@@ -851,8 +855,7 @@ restart_vtime:
851/* 855/*
852 * If we do not run with SMP enabled, let the new CPU crash ... 856 * If we do not run with SMP enabled, let the new CPU crash ...
853 */ 857 */
854 .globl restart_int_handler 858ENTRY(restart_int_handler)
855restart_int_handler:
856 basr %r1,0 859 basr %r1,0
857restart_base: 860restart_base:
858 lpswe restart_crash-restart_base(%r1) 861 lpswe restart_crash-restart_base(%r1)
@@ -1036,6 +1039,56 @@ cleanup_io_restore_insn:
1036.Lcritical_end: 1039.Lcritical_end:
1037 .quad __critical_end 1040 .quad __critical_end
1038 1041
1042#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
1043/*
1044 * sie64a calling convention:
1045 * %r2 pointer to sie control block
1046 * %r3 guest register save area
1047 */
1048ENTRY(sie64a)
1049 stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
1050 stg %r2,__SF_EMPTY(%r15) # save control block pointer
1051 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
1052 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
1053 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1054 oi __TI_flags+6(%r14),_TIF_SIE>>8
1055sie_loop:
1056 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1057 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
1058 jnz sie_exit
1059 lg %r14,__SF_EMPTY(%r15) # get control block pointer
1060 SPP __SF_EMPTY(%r15) # set guest id
1061 sie 0(%r14)
1062sie_done:
1063 SPP __LC_CMF_HPP # set host id
1064 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1065sie_exit:
1066 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
1067 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
1068 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
1069 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
1070 lghi %r2,0
1071 br %r14
1072sie_fault:
1073 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
1074 ni __TI_flags+6(%r14),255-(_TIF_SIE>>8)
1075 lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
1076 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
1077 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
1078 lghi %r2,-EFAULT
1079 br %r14
1080
1081 .align 8
1082.Lsie_loop:
1083 .quad sie_loop
1084.Lsie_done:
1085 .quad sie_done
1086
1087 .section __ex_table,"a"
1088 .quad sie_loop,sie_fault
1089 .previous
1090#endif
1091
1039 .section .rodata, "a" 1092 .section .rodata, "a"
1040#define SYSCALL(esa,esame,emu) .long esame 1093#define SYSCALL(esa,esame,emu) .long esame
1041 .globl sys_call_table 1094 .globl sys_call_table
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index fb317bf2c378..2d781bab37bb 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -22,6 +22,7 @@
22 */ 22 */
23 23
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/linkage.h>
25#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
26#include <asm/thread_info.h> 27#include <asm/thread_info.h>
27#include <asm/page.h> 28#include <asm/page.h>
@@ -383,8 +384,7 @@ iplstart:
383# doesn't need a builtin ipl record. 384# doesn't need a builtin ipl record.
384# 385#
385 .org 0x800 386 .org 0x800
386 .globl start 387ENTRY(start)
387start:
388 stm %r0,%r15,0x07b0 # store registers 388 stm %r0,%r15,0x07b0 # store registers
389 basr %r12,%r0 389 basr %r12,%r0
390.base: 390.base:
@@ -448,8 +448,7 @@ start:
448# or linload or SALIPL 448# or linload or SALIPL
449# 449#
450 .org 0x10000 450 .org 0x10000
451 .globl startup 451ENTRY(startup)
452startup:
453 basr %r13,0 # get base 452 basr %r13,0 # get base
454.LPG0: 453.LPG0:
455 xc 0x200(256),0x200 # partially clear lowcore 454 xc 0x200(256),0x200 # partially clear lowcore
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index b8f8dc126102..f21954b44dc1 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -11,13 +11,13 @@
11 */ 11 */
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/linkage.h>
14#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 16#include <asm/thread_info.h>
16#include <asm/page.h> 17#include <asm/page.h>
17 18
18__HEAD 19__HEAD
19 .globl startup_continue 20ENTRY(startup_continue)
20startup_continue:
21 basr %r13,0 # get base 21 basr %r13,0 # get base
22.LPG1: 22.LPG1:
23 23
@@ -45,7 +45,7 @@ startup_continue:
45 # virtual and never return ... 45 # virtual and never return ...
46 .align 8 46 .align 8
47.Lentry:.long 0x00080000,0x80000000 + _stext 47.Lentry:.long 0x00080000,0x80000000 + _stext
48.Lctl: .long 0x04b50002 # cr0: various things 48.Lctl: .long 0x04b50000 # cr0: various things
49 .long 0 # cr1: primary space segment table 49 .long 0 # cr1: primary space segment table
50 .long .Lduct # cr2: dispatchable unit control table 50 .long .Lduct # cr2: dispatchable unit control table
51 .long 0 # cr3: instruction authorization 51 .long 0 # cr3: instruction authorization
@@ -78,8 +78,7 @@ startup_continue:
78.Lbase_cc: 78.Lbase_cc:
79 .long sched_clock_base_cc 79 .long sched_clock_base_cc
80 80
81 .globl _ehead 81ENTRY(_ehead)
82_ehead:
83 82
84#ifdef CONFIG_SHARED_KERNEL 83#ifdef CONFIG_SHARED_KERNEL
85 .org 0x100000 - 0x11000 # head.o ends at 0x11000 84 .org 0x100000 - 0x11000 # head.o ends at 0x11000
@@ -88,8 +87,8 @@ _ehead:
88# 87#
89# startup-code, running in absolute addressing mode 88# startup-code, running in absolute addressing mode
90# 89#
91 .globl _stext 90ENTRY(_stext)
92_stext: basr %r13,0 # get base 91 basr %r13,0 # get base
93.LPG3: 92.LPG3:
94# check control registers 93# check control registers
95 stctl %c0,%c15,0(%r15) 94 stctl %c0,%c15,0(%r15)
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index cdef68717416..ae5d492b069e 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -11,13 +11,13 @@
11 */ 11 */
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/linkage.h>
14#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 16#include <asm/thread_info.h>
16#include <asm/page.h> 17#include <asm/page.h>
17 18
18__HEAD 19__HEAD
19 .globl startup_continue 20ENTRY(startup_continue)
20startup_continue:
21 larl %r1,sched_clock_base_cc 21 larl %r1,sched_clock_base_cc
22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK 22 mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
23 larl %r13,.LPG1 # get base 23 larl %r13,.LPG1 # get base
@@ -46,7 +46,7 @@ startup_continue:
46 .align 16 46 .align 16
47.LPG1: 47.LPG1:
48.Lentry:.quad 0x0000000180000000,_stext 48.Lentry:.quad 0x0000000180000000,_stext
49.Lctl: .quad 0x04350002 # cr0: various things 49.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
50 .quad 0 # cr1: primary space segment table 50 .quad 0 # cr1: primary space segment table
51 .quad .Lduct # cr2: dispatchable unit control table 51 .quad .Lduct # cr2: dispatchable unit control table
52 .quad 0 # cr3: instruction authorization 52 .quad 0 # cr3: instruction authorization
@@ -76,8 +76,7 @@ startup_continue:
76 .long 0x80000000,0,0,0 # invalid access-list entries 76 .long 0x80000000,0,0,0 # invalid access-list entries
77 .endr 77 .endr
78 78
79 .globl _ehead 79ENTRY(_ehead)
80_ehead:
81 80
82#ifdef CONFIG_SHARED_KERNEL 81#ifdef CONFIG_SHARED_KERNEL
83 .org 0x100000 - 0x11000 # head.o ends at 0x11000 82 .org 0x100000 - 0x11000 # head.o ends at 0x11000
@@ -86,8 +85,8 @@ _ehead:
86# 85#
87# startup-code, running in absolute addressing mode 86# startup-code, running in absolute addressing mode
88# 87#
89 .globl _stext 88ENTRY(_stext)
90_stext: basr %r13,0 # get base 89 basr %r13,0 # get base
91.LPG3: 90.LPG3:
92# check control registers 91# check control registers
93 stctg %c0,%c15,0(%r15) 92 stctg %c0,%c15,0(%r15)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e3264f6a9720..1f4050d45f78 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -88,15 +88,6 @@ int show_interrupts(struct seq_file *p, void *v)
88} 88}
89 89
90/* 90/*
91 * For compatibilty only. S/390 specific setup of interrupts et al. is done
92 * much later in init_channel_subsystem().
93 */
94void __init init_IRQ(void)
95{
96 /* nothing... */
97}
98
99/*
100 * Switch to the asynchronous interrupt stack for softirq execution. 91 * Switch to the asynchronous interrupt stack for softirq execution.
101 */ 92 */
102asmlinkage void do_softirq(void) 93asmlinkage void do_softirq(void)
@@ -144,28 +135,45 @@ void init_irq_proc(void)
144#endif 135#endif
145 136
146/* 137/*
147 * ext_int_hash[index] is the start of the list for all external interrupts 138 * ext_int_hash[index] is the list head for all external interrupts that hash
148 * that hash to this index. With the current set of external interrupts 139 * to this index.
149 * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
150 * iucv and 0x2603 pfault) this is always the first element.
151 */ 140 */
141static struct list_head ext_int_hash[256];
152 142
153struct ext_int_info { 143struct ext_int_info {
154 struct ext_int_info *next;
155 ext_int_handler_t handler; 144 ext_int_handler_t handler;
156 u16 code; 145 u16 code;
146 struct list_head entry;
147 struct rcu_head rcu;
157}; 148};
158 149
159static struct ext_int_info *ext_int_hash[256]; 150/* ext_int_hash_lock protects the handler lists for external interrupts */
151DEFINE_SPINLOCK(ext_int_hash_lock);
152
153static void __init init_external_interrupts(void)
154{
155 int idx;
156
157 for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
158 INIT_LIST_HEAD(&ext_int_hash[idx]);
159}
160 160
161static inline int ext_hash(u16 code) 161static inline int ext_hash(u16 code)
162{ 162{
163 return (code + (code >> 9)) & 0xff; 163 return (code + (code >> 9)) & 0xff;
164} 164}
165 165
166static void ext_int_hash_update(struct rcu_head *head)
167{
168 struct ext_int_info *p = container_of(head, struct ext_int_info, rcu);
169
170 kfree(p);
171}
172
166int register_external_interrupt(u16 code, ext_int_handler_t handler) 173int register_external_interrupt(u16 code, ext_int_handler_t handler)
167{ 174{
168 struct ext_int_info *p; 175 struct ext_int_info *p;
176 unsigned long flags;
169 int index; 177 int index;
170 178
171 p = kmalloc(sizeof(*p), GFP_ATOMIC); 179 p = kmalloc(sizeof(*p), GFP_ATOMIC);
@@ -174,33 +182,27 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler)
174 p->code = code; 182 p->code = code;
175 p->handler = handler; 183 p->handler = handler;
176 index = ext_hash(code); 184 index = ext_hash(code);
177 p->next = ext_int_hash[index]; 185
178 ext_int_hash[index] = p; 186 spin_lock_irqsave(&ext_int_hash_lock, flags);
187 list_add_rcu(&p->entry, &ext_int_hash[index]);
188 spin_unlock_irqrestore(&ext_int_hash_lock, flags);
179 return 0; 189 return 0;
180} 190}
181EXPORT_SYMBOL(register_external_interrupt); 191EXPORT_SYMBOL(register_external_interrupt);
182 192
183int unregister_external_interrupt(u16 code, ext_int_handler_t handler) 193int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
184{ 194{
185 struct ext_int_info *p, *q; 195 struct ext_int_info *p;
186 int index; 196 unsigned long flags;
197 int index = ext_hash(code);
187 198
188 index = ext_hash(code); 199 spin_lock_irqsave(&ext_int_hash_lock, flags);
189 q = NULL; 200 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
190 p = ext_int_hash[index]; 201 if (p->code == code && p->handler == handler) {
191 while (p) { 202 list_del_rcu(&p->entry);
192 if (p->code == code && p->handler == handler) 203 call_rcu(&p->rcu, ext_int_hash_update);
193 break; 204 }
194 q = p; 205 spin_unlock_irqrestore(&ext_int_hash_lock, flags);
195 p = p->next;
196 }
197 if (!p)
198 return -ENOENT;
199 if (q)
200 q->next = p->next;
201 else
202 ext_int_hash[index] = p->next;
203 kfree(p);
204 return 0; 206 return 0;
205} 207}
206EXPORT_SYMBOL(unregister_external_interrupt); 208EXPORT_SYMBOL(unregister_external_interrupt);
@@ -224,15 +226,22 @@ void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
224 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; 226 kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
225 if (code != 0x1004) 227 if (code != 0x1004)
226 __get_cpu_var(s390_idle).nohz_delay = 1; 228 __get_cpu_var(s390_idle).nohz_delay = 1;
229
227 index = ext_hash(code); 230 index = ext_hash(code);
228 for (p = ext_int_hash[index]; p; p = p->next) { 231 rcu_read_lock();
232 list_for_each_entry_rcu(p, &ext_int_hash[index], entry)
229 if (likely(p->code == code)) 233 if (likely(p->code == code))
230 p->handler(ext_int_code, param32, param64); 234 p->handler(ext_int_code, param32, param64);
231 } 235 rcu_read_unlock();
232 irq_exit(); 236 irq_exit();
233 set_irq_regs(old_regs); 237 set_irq_regs(old_regs);
234} 238}
235 239
240void __init init_IRQ(void)
241{
242 init_external_interrupts();
243}
244
236static DEFINE_SPINLOCK(sc_irq_lock); 245static DEFINE_SPINLOCK(sc_irq_lock);
237static int sc_irq_refcount; 246static int sc_irq_refcount;
238 247
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 1e6a55795628..7e2c38ba1373 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -5,21 +5,19 @@
5 * 5 *
6 */ 6 */
7 7
8#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
9 10
10 .section .kprobes.text, "ax" 11 .section .kprobes.text, "ax"
11 12
12 .globl ftrace_stub 13ENTRY(ftrace_stub)
13ftrace_stub:
14 br %r14 14 br %r14
15 15
16 .globl _mcount 16ENTRY(_mcount)
17_mcount:
18#ifdef CONFIG_DYNAMIC_FTRACE 17#ifdef CONFIG_DYNAMIC_FTRACE
19 br %r14 18 br %r14
20 19
21 .globl ftrace_caller 20ENTRY(ftrace_caller)
22ftrace_caller:
23#endif 21#endif
24 stm %r2,%r5,16(%r15) 22 stm %r2,%r5,16(%r15)
25 bras %r1,2f 23 bras %r1,2f
@@ -41,8 +39,7 @@ ftrace_caller:
41#ifdef CONFIG_FUNCTION_GRAPH_TRACER 39#ifdef CONFIG_FUNCTION_GRAPH_TRACER
42 l %r2,100(%r15) 40 l %r2,100(%r15)
43 l %r3,152(%r15) 41 l %r3,152(%r15)
44 .globl ftrace_graph_caller 42ENTRY(ftrace_graph_caller)
45ftrace_graph_caller:
46# The bras instruction gets runtime patched to call prepare_ftrace_return. 43# The bras instruction gets runtime patched to call prepare_ftrace_return.
47# See ftrace_enable_ftrace_graph_caller. The patched instruction is: 44# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
48# bras %r14,prepare_ftrace_return 45# bras %r14,prepare_ftrace_return
@@ -56,8 +53,7 @@ ftrace_graph_caller:
56 53
57#ifdef CONFIG_FUNCTION_GRAPH_TRACER 54#ifdef CONFIG_FUNCTION_GRAPH_TRACER
58 55
59 .globl return_to_handler 56ENTRY(return_to_handler)
60return_to_handler:
61 stm %r2,%r5,16(%r15) 57 stm %r2,%r5,16(%r15)
62 st %r14,56(%r15) 58 st %r14,56(%r15)
63 lr %r0,%r15 59 lr %r0,%r15
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index e73667286ac0..f70cadec68fc 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -5,21 +5,19 @@
5 * 5 *
6 */ 6 */
7 7
8#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
9 10
10 .section .kprobes.text, "ax" 11 .section .kprobes.text, "ax"
11 12
12 .globl ftrace_stub 13ENTRY(ftrace_stub)
13ftrace_stub:
14 br %r14 14 br %r14
15 15
16 .globl _mcount 16ENTRY(_mcount)
17_mcount:
18#ifdef CONFIG_DYNAMIC_FTRACE 17#ifdef CONFIG_DYNAMIC_FTRACE
19 br %r14 18 br %r14
20 19
21 .globl ftrace_caller 20ENTRY(ftrace_caller)
22ftrace_caller:
23#endif 21#endif
24 larl %r1,function_trace_stop 22 larl %r1,function_trace_stop
25 icm %r1,0xf,0(%r1) 23 icm %r1,0xf,0(%r1)
@@ -37,8 +35,7 @@ ftrace_caller:
37#ifdef CONFIG_FUNCTION_GRAPH_TRACER 35#ifdef CONFIG_FUNCTION_GRAPH_TRACER
38 lg %r2,168(%r15) 36 lg %r2,168(%r15)
39 lg %r3,272(%r15) 37 lg %r3,272(%r15)
40 .globl ftrace_graph_caller 38ENTRY(ftrace_graph_caller)
41ftrace_graph_caller:
42# The bras instruction gets runtime patched to call prepare_ftrace_return. 39# The bras instruction gets runtime patched to call prepare_ftrace_return.
43# See ftrace_enable_ftrace_graph_caller. The patched instruction is: 40# See ftrace_enable_ftrace_graph_caller. The patched instruction is:
44# bras %r14,prepare_ftrace_return 41# bras %r14,prepare_ftrace_return
@@ -52,8 +49,7 @@ ftrace_graph_caller:
52 49
53#ifdef CONFIG_FUNCTION_GRAPH_TRACER 50#ifdef CONFIG_FUNCTION_GRAPH_TRACER
54 51
55 .globl return_to_handler 52ENTRY(return_to_handler)
56return_to_handler:
57 stmg %r2,%r5,32(%r15) 53 stmg %r2,%r5,32(%r15)
58 lgr %r1,%r15 54 lgr %r1,%r15
59 aghi %r15,-160 55 aghi %r15,-160
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index cb899d9f8505..303d961c3bb5 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,14 +6,15 @@
6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com) 6 * Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
7 */ 7 */
8 8
9#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 10#include <asm/asm-offsets.h>
10 11
11# 12#
12# do_reipl_asm 13# do_reipl_asm
13# Parameter: r2 = schid of reipl device 14# Parameter: r2 = schid of reipl device
14# 15#
15 .globl do_reipl_asm 16ENTRY(do_reipl_asm)
16do_reipl_asm: basr %r13,0 17 basr %r13,0
17.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) 18.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
18.Lpg1: # do store status of all registers 19.Lpg1: # do store status of all registers
19 20
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 9eabbc90795d..78eb7cfbd3d1 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -4,6 +4,7 @@
4 * Denis Joseph Barrow, 4 * Denis Joseph Barrow,
5 */ 5 */
6 6
7#include <linux/linkage.h>
7#include <asm/asm-offsets.h> 8#include <asm/asm-offsets.h>
8 9
9# 10#
@@ -11,8 +12,8 @@
11# Parameter: r2 = schid of reipl device 12# Parameter: r2 = schid of reipl device
12# 13#
13 14
14 .globl do_reipl_asm 15ENTRY(do_reipl_asm)
15do_reipl_asm: basr %r13,0 16 basr %r13,0
16.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) 17.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
17.Lpg1: # do store status of all registers 18.Lpg1: # do store status of all registers
18 19
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index 3b456b80bcee..c91d70aede91 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -8,6 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/linkage.h>
12
11/* 13/*
12 * moves the new kernel to its destination... 14 * moves the new kernel to its destination...
13 * %r2 = pointer to first kimage_entry_t 15 * %r2 = pointer to first kimage_entry_t
@@ -22,8 +24,7 @@
22 */ 24 */
23 25
24 .text 26 .text
25 .globl relocate_kernel 27ENTRY(relocate_kernel)
26 relocate_kernel:
27 basr %r13,0 # base address 28 basr %r13,0 # base address
28 .base: 29 .base:
29 stnsm sys_msk-.base(%r13),0xfb # disable DAT 30 stnsm sys_msk-.base(%r13),0xfb # disable DAT
@@ -112,6 +113,7 @@
112 .byte 0 113 .byte 0
113 .align 8 114 .align 8
114 relocate_kernel_end: 115 relocate_kernel_end:
116 .align 8
115 .globl relocate_kernel_len 117 .globl relocate_kernel_len
116 relocate_kernel_len: 118 relocate_kernel_len:
117 .quad relocate_kernel_end - relocate_kernel 119 .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 1f9ea2067b59..7c3ce589a7f0 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -8,6 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/linkage.h>
12
11/* 13/*
12 * moves the new kernel to its destination... 14 * moves the new kernel to its destination...
13 * %r2 = pointer to first kimage_entry_t 15 * %r2 = pointer to first kimage_entry_t
@@ -23,8 +25,7 @@
23 */ 25 */
24 26
25 .text 27 .text
26 .globl relocate_kernel 28ENTRY(relocate_kernel)
27 relocate_kernel:
28 basr %r13,0 # base address 29 basr %r13,0 # base address
29 .base: 30 .base:
30 stnsm sys_msk-.base(%r13),0xfb # disable DAT 31 stnsm sys_msk-.base(%r13),0xfb # disable DAT
@@ -115,6 +116,7 @@
115 .byte 0 116 .byte 0
116 .align 8 117 .align 8
117 relocate_kernel_end: 118 relocate_kernel_end:
119 .align 8
118 .globl relocate_kernel_len 120 .globl relocate_kernel_len
119 relocate_kernel_len: 121 relocate_kernel_len:
120 .quad relocate_kernel_end - relocate_kernel 122 .quad relocate_kernel_end - relocate_kernel
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 656fcbb9bd83..57b536649b00 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,6 +1,10 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/kvm_host.h>
2#include <asm/ftrace.h> 3#include <asm/ftrace.h>
3 4
4#ifdef CONFIG_FUNCTION_TRACER 5#ifdef CONFIG_FUNCTION_TRACER
5EXPORT_SYMBOL(_mcount); 6EXPORT_SYMBOL(_mcount);
6#endif 7#endif
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
9EXPORT_SYMBOL(sie64a);
10#endif
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 2e82fdd89320..95792d846bb6 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -8,6 +8,8 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/linkage.h>
12
11LC_EXT_NEW_PSW = 0x58 # addr of ext int handler 13LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
12LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit 14LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
13LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter 15LC_EXT_INT_PARAM = 0x80 # addr of ext int parameter
@@ -260,8 +262,7 @@ _sclp_print:
260# R2 = 0 on success, 1 on failure 262# R2 = 0 on success, 1 on failure
261# 263#
262 264
263 .globl _sclp_print_early 265ENTRY(_sclp_print_early)
264_sclp_print_early:
265 stm %r6,%r15,24(%r15) # save registers 266 stm %r6,%r15,24(%r15) # save registers
266 ahi %r15,-96 # create stack frame 267 ahi %r15,-96 # create stack frame
267#ifdef CONFIG_64BIT 268#ifdef CONFIG_64BIT
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1d55c95f617c..a6d85c0a7f20 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -654,7 +654,8 @@ int __cpu_disable(void)
654 /* disable all external interrupts */ 654 /* disable all external interrupts */
655 cr_parms.orvals[0] = 0; 655 cr_parms.orvals[0] = 0;
656 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 | 656 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
657 1 << 10 | 1 << 9 | 1 << 6 | 1 << 4); 657 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
658 1 << 4);
658 /* disable all I/O interrupts */ 659 /* disable all I/O interrupts */
659 cr_parms.orvals[6] = 0; 660 cr_parms.orvals[6] = 0;
660 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | 661 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
diff --git a/arch/s390/kernel/switch_cpu.S b/arch/s390/kernel/switch_cpu.S
index 20530dd2eab1..bfe070bc7659 100644
--- a/arch/s390/kernel/switch_cpu.S
+++ b/arch/s390/kernel/switch_cpu.S
@@ -5,6 +5,7 @@
5 * 5 *
6 */ 6 */
7 7
8#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10 11
@@ -16,9 +17,7 @@
16# %r6 - destination cpu 17# %r6 - destination cpu
17 18
18 .section .text 19 .section .text
19 .align 4 20ENTRY(smp_switch_to_cpu)
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stm %r6,%r15,__SF_GPRS(%r15) 21 stm %r6,%r15,__SF_GPRS(%r15)
23 lr %r1,%r15 22 lr %r1,%r15
24 ahi %r15,-STACK_FRAME_OVERHEAD 23 ahi %r15,-STACK_FRAME_OVERHEAD
@@ -33,8 +32,7 @@ smp_switch_to_cpu:
33 brc 2,2b /* busy, try again */ 32 brc 2,2b /* busy, try again */
343: j 3b 333: j 3b
35 34
36 .globl smp_restart_cpu 35ENTRY(smp_restart_cpu)
37smp_restart_cpu:
38 basr %r13,0 36 basr %r13,0
390: la %r1,.gprregs_addr-0b(%r13) 370: la %r1,.gprregs_addr-0b(%r13)
40 l %r1,0(%r1) 38 l %r1,0(%r1)
diff --git a/arch/s390/kernel/switch_cpu64.S b/arch/s390/kernel/switch_cpu64.S
index 5be3f43898f9..fcc42d799e41 100644
--- a/arch/s390/kernel/switch_cpu64.S
+++ b/arch/s390/kernel/switch_cpu64.S
@@ -5,6 +5,7 @@
5 * 5 *
6 */ 6 */
7 7
8#include <linux/linkage.h>
8#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
9#include <asm/ptrace.h> 10#include <asm/ptrace.h>
10 11
@@ -16,9 +17,7 @@
16# %r6 - destination cpu 17# %r6 - destination cpu
17 18
18 .section .text 19 .section .text
19 .align 4 20ENTRY(smp_switch_to_cpu)
20 .globl smp_switch_to_cpu
21smp_switch_to_cpu:
22 stmg %r6,%r15,__SF_GPRS(%r15) 21 stmg %r6,%r15,__SF_GPRS(%r15)
23 lgr %r1,%r15 22 lgr %r1,%r15
24 aghi %r15,-STACK_FRAME_OVERHEAD 23 aghi %r15,-STACK_FRAME_OVERHEAD
@@ -31,8 +30,7 @@ smp_switch_to_cpu:
31 brc 2,2b /* busy, try again */ 30 brc 2,2b /* busy, try again */
323: j 3b 313: j 3b
33 32
34 .globl smp_restart_cpu 33ENTRY(smp_restart_cpu)
35smp_restart_cpu:
36 larl %r1,.gprregs 34 larl %r1,.gprregs
37 lmg %r0,%r15,0(%r1) 35 lmg %r0,%r15,0(%r1)
381: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */ 361: sigp %r0,%r5,__SIGP_SENSE /* Wait for calling CPU */
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index 1f066e46e83e..51bcdb50a230 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -7,6 +7,7 @@
7 * Michael Holzheu <holzheu@linux.vnet.ibm.com> 7 * Michael Holzheu <holzheu@linux.vnet.ibm.com>
8 */ 8 */
9 9
10#include <linux/linkage.h>
10#include <asm/page.h> 11#include <asm/page.h>
11#include <asm/ptrace.h> 12#include <asm/ptrace.h>
12#include <asm/thread_info.h> 13#include <asm/thread_info.h>
@@ -22,9 +23,7 @@
22 * This function runs with disabled interrupts. 23 * This function runs with disabled interrupts.
23 */ 24 */
24 .section .text 25 .section .text
25 .align 4 26ENTRY(swsusp_arch_suspend)
26 .globl swsusp_arch_suspend
27swsusp_arch_suspend:
28 stmg %r6,%r15,__SF_GPRS(%r15) 27 stmg %r6,%r15,__SF_GPRS(%r15)
29 lgr %r1,%r15 28 lgr %r1,%r15
30 aghi %r15,-STACK_FRAME_OVERHEAD 29 aghi %r15,-STACK_FRAME_OVERHEAD
@@ -112,8 +111,7 @@ swsusp_arch_suspend:
112 * Then we return to the function that called swsusp_arch_suspend(). 111 * Then we return to the function that called swsusp_arch_suspend().
113 * swsusp_arch_resume() runs with disabled interrupts. 112 * swsusp_arch_resume() runs with disabled interrupts.
114 */ 113 */
115 .globl swsusp_arch_resume 114ENTRY(swsusp_arch_resume)
116swsusp_arch_resume:
117 stmg %r6,%r15,__SF_GPRS(%r15) 115 stmg %r6,%r15,__SF_GPRS(%r15)
118 lgr %r1,%r15 116 lgr %r1,%r15
119 aghi %r15,-STACK_FRAME_OVERHEAD 117 aghi %r15,-STACK_FRAME_OVERHEAD
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a63d34c3611e..e9372c77cced 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -18,7 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/string.h> 19#include <linux/string.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/tracehook.h> 21#include <linux/ptrace.h>
22#include <linux/timer.h> 22#include <linux/timer.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/smp.h> 24#include <linux/smp.h>
@@ -43,14 +43,10 @@
43#include <asm/debug.h> 43#include <asm/debug.h>
44#include "entry.h" 44#include "entry.h"
45 45
46pgm_check_handler_t *pgm_check_table[128]; 46void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long);
47 47
48int show_unhandled_signals; 48int show_unhandled_signals;
49 49
50extern pgm_check_handler_t do_protection_exception;
51extern pgm_check_handler_t do_dat_exception;
52extern pgm_check_handler_t do_asce_exception;
53
54#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 50#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
55 51
56#ifndef CONFIG_64BIT 52#ifndef CONFIG_64BIT
@@ -329,10 +325,17 @@ static inline void __user *get_psw_address(struct pt_regs *regs,
329 325
330void __kprobes do_per_trap(struct pt_regs *regs) 326void __kprobes do_per_trap(struct pt_regs *regs)
331{ 327{
328 siginfo_t info;
329
332 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 330 if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
333 return; 331 return;
334 if (current->ptrace) 332 if (!current->ptrace)
335 force_sig(SIGTRAP, current); 333 return;
334 info.si_signo = SIGTRAP;
335 info.si_errno = 0;
336 info.si_code = TRAP_HWBKPT;
337 info.si_addr = (void *) current->thread.per_event.address;
338 force_sig_info(SIGTRAP, &info, current);
336} 339}
337 340
338static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, 341static void default_trap_handler(struct pt_regs *regs, long pgm_int_code,
@@ -425,9 +428,13 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
425 if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) 428 if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
426 return; 429 return;
427 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { 430 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
428 if (current->ptrace) 431 if (current->ptrace) {
429 force_sig(SIGTRAP, current); 432 info.si_signo = SIGTRAP;
430 else 433 info.si_errno = 0;
434 info.si_code = TRAP_BRKPT;
435 info.si_addr = location;
436 force_sig_info(SIGTRAP, &info, current);
437 } else
431 signal = SIGILL; 438 signal = SIGILL;
432#ifdef CONFIG_MATHEMU 439#ifdef CONFIG_MATHEMU
433 } else if (opcode[0] == 0xb3) { 440 } else if (opcode[0] == 0xb3) {
@@ -489,9 +496,8 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
489 496
490 497
491#ifdef CONFIG_MATHEMU 498#ifdef CONFIG_MATHEMU
492asmlinkage void specification_exception(struct pt_regs *regs, 499void specification_exception(struct pt_regs *regs, long pgm_int_code,
493 long pgm_int_code, 500 unsigned long trans_exc_code)
494 unsigned long trans_exc_code)
495{ 501{
496 __u8 opcode[6]; 502 __u8 opcode[6];
497 __u16 __user *location = NULL; 503 __u16 __user *location = NULL;
@@ -648,7 +654,7 @@ static void space_switch_exception(struct pt_regs *regs, long pgm_int_code,
648 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); 654 do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info);
649} 655}
650 656
651asmlinkage void __kprobes kernel_stack_overflow(struct pt_regs * regs) 657void __kprobes kernel_stack_overflow(struct pt_regs * regs)
652{ 658{
653 bust_spinlocks(1); 659 bust_spinlocks(1);
654 printk("Kernel stack overflow.\n"); 660 printk("Kernel stack overflow.\n");
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 860d26514c08..3975722bb19d 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -10,5 +10,5 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
10 10
11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 11ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
12 12
13kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o 13kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o diag.o
14obj-$(CONFIG_KVM) += kvm.o 14obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 03c716a0f01f..c86f6ae43f76 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * gaccess.h - access guest memory 2 * access.h - access guest memory
3 * 3 *
4 * Copyright IBM Corp. 2008,2009 4 * Copyright IBM Corp. 2008,2009
5 * 5 *
@@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
22 unsigned long guestaddr) 22 unsigned long guestaddr)
23{ 23{
24 unsigned long prefix = vcpu->arch.sie_block->prefix; 24 unsigned long prefix = vcpu->arch.sie_block->prefix;
25 unsigned long origin = vcpu->arch.sie_block->gmsor;
26 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
27 25
28 if (guestaddr < 2 * PAGE_SIZE) 26 if (guestaddr < 2 * PAGE_SIZE)
29 guestaddr += prefix; 27 guestaddr += prefix;
30 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) 28 else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
31 guestaddr -= prefix; 29 guestaddr -= prefix;
32 30
33 if (guestaddr > memsize) 31 return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
34 return (void __user __force *) ERR_PTR(-EFAULT);
35
36 guestaddr += origin;
37
38 return (void __user *) guestaddr;
39} 32}
40 33
41static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr, 34static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
@@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
141 134
142static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, 135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
143 unsigned long guestdest, 136 unsigned long guestdest,
144 const void *from, unsigned long n) 137 void *from, unsigned long n)
145{ 138{
146 int rc; 139 int rc;
147 unsigned long i; 140 unsigned long i;
148 const u8 *data = from; 141 u8 *data = from;
149 142
150 for (i = 0; i < n; i++) { 143 for (i = 0; i < n; i++) {
151 rc = put_guest_u8(vcpu, guestdest++, *(data++)); 144 rc = put_guest_u8(vcpu, guestdest++, *(data++));
@@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
155 return 0; 148 return 0;
156} 149}
157 150
151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152 unsigned long guestdest,
153 void *from, unsigned long n)
154{
155 int r;
156 void __user *uptr;
157 unsigned long size;
158
159 if (guestdest + n < guestdest)
160 return -EFAULT;
161
162 /* simple case: all within one segment table entry? */
163 if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164 uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166 if (IS_ERR((void __force *) uptr))
167 return PTR_ERR((void __force *) uptr);
168
169 r = copy_to_user(uptr, from, n);
170
171 if (r)
172 r = -EFAULT;
173
174 goto out;
175 }
176
177 /* copy first segment */
178 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180 if (IS_ERR((void __force *) uptr))
181 return PTR_ERR((void __force *) uptr);
182
183 size = PMD_SIZE - (guestdest & ~PMD_MASK);
184
185 r = copy_to_user(uptr, from, size);
186
187 if (r) {
188 r = -EFAULT;
189 goto out;
190 }
191 from += size;
192 n -= size;
193 guestdest += size;
194
195 /* copy full segments */
196 while (n >= PMD_SIZE) {
197 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198
199 if (IS_ERR((void __force *) uptr))
200 return PTR_ERR((void __force *) uptr);
201
202 r = copy_to_user(uptr, from, PMD_SIZE);
203
204 if (r) {
205 r = -EFAULT;
206 goto out;
207 }
208 from += PMD_SIZE;
209 n -= PMD_SIZE;
210 guestdest += PMD_SIZE;
211 }
212
213 /* copy the tail segment */
214 if (n) {
215 uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217 if (IS_ERR((void __force *) uptr))
218 return PTR_ERR((void __force *) uptr);
219
220 r = copy_to_user(uptr, from, n);
221
222 if (r)
223 r = -EFAULT;
224 }
225out:
226 return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230 unsigned long guestdest,
231 void *from, unsigned long n)
232{
233 return __copy_to_guest_fast(vcpu, guestdest, from, n);
234}
235
158static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest, 236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
159 const void *from, unsigned long n) 237 void *from, unsigned long n)
160{ 238{
161 unsigned long prefix = vcpu->arch.sie_block->prefix; 239 unsigned long prefix = vcpu->arch.sie_block->prefix;
162 unsigned long origin = vcpu->arch.sie_block->gmsor;
163 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
164 240
165 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) 241 if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
166 goto slowpath; 242 goto slowpath;
@@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
177 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) 253 else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
178 guestdest -= prefix; 254 guestdest -= prefix;
179 255
180 if (guestdest + n > memsize) 256 return __copy_to_guest_fast(vcpu, guestdest, from, n);
181 return -EFAULT;
182
183 if (guestdest + n < guestdest)
184 return -EFAULT;
185
186 guestdest += origin;
187
188 return copy_to_user((void __user *) guestdest, from, n);
189slowpath: 257slowpath:
190 return __copy_to_guest_slow(vcpu, guestdest, from, n); 258 return __copy_to_guest_slow(vcpu, guestdest, from, n);
191} 259}
@@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
206 return 0; 274 return 0;
207} 275}
208 276
209static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, 277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
210 unsigned long guestsrc, unsigned long n) 278 unsigned long guestsrc,
279 unsigned long n)
211{ 280{
212 unsigned long prefix = vcpu->arch.sie_block->prefix; 281 int r;
213 unsigned long origin = vcpu->arch.sie_block->gmsor; 282 void __user *uptr;
214 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); 283 unsigned long size;
215 284
216 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) 285 if (guestsrc + n < guestsrc)
217 goto slowpath; 286 return -EFAULT;
218 287
219 if ((guestsrc < prefix) && (guestsrc + n > prefix)) 288 /* simple case: all within one segment table entry? */
220 goto slowpath; 289 if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290 uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
221 291
222 if ((guestsrc < prefix + 2 * PAGE_SIZE) 292 if (IS_ERR((void __force *) uptr))
223 && (guestsrc + n > prefix + 2 * PAGE_SIZE)) 293 return PTR_ERR((void __force *) uptr);
224 goto slowpath;
225 294
226 if (guestsrc < 2 * PAGE_SIZE) 295 r = copy_from_user(to, uptr, n);
227 guestsrc += prefix;
228 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
229 guestsrc -= prefix;
230 296
231 if (guestsrc + n > memsize) 297 if (r)
232 return -EFAULT; 298 r = -EFAULT;
233 299
234 if (guestsrc + n < guestsrc) 300 goto out;
235 return -EFAULT; 301 }
236 302
237 guestsrc += origin; 303 /* copy first segment */
304 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
238 305
239 return copy_from_user(to, (void __user *) guestsrc, n); 306 if (IS_ERR((void __force *) uptr))
240slowpath: 307 return PTR_ERR((void __force *) uptr);
241 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
242}
243 308
244static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, 309 size = PMD_SIZE - (guestsrc & ~PMD_MASK);
245 unsigned long guestdest,
246 const void *from, unsigned long n)
247{
248 unsigned long origin = vcpu->arch.sie_block->gmsor;
249 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
250 310
251 if (guestdest + n > memsize) 311 r = copy_from_user(to, uptr, size);
252 return -EFAULT;
253 312
254 if (guestdest + n < guestdest) 313 if (r) {
255 return -EFAULT; 314 r = -EFAULT;
315 goto out;
316 }
317 to += size;
318 n -= size;
319 guestsrc += size;
320
321 /* copy full segments */
322 while (n >= PMD_SIZE) {
323 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325 if (IS_ERR((void __force *) uptr))
326 return PTR_ERR((void __force *) uptr);
327
328 r = copy_from_user(to, uptr, PMD_SIZE);
329
330 if (r) {
331 r = -EFAULT;
332 goto out;
333 }
334 to += PMD_SIZE;
335 n -= PMD_SIZE;
336 guestsrc += PMD_SIZE;
337 }
338
339 /* copy the tail segment */
340 if (n) {
341 uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
256 342
257 guestdest += origin; 343 if (IS_ERR((void __force *) uptr))
344 return PTR_ERR((void __force *) uptr);
258 345
259 return copy_to_user((void __user *) guestdest, from, n); 346 r = copy_from_user(to, uptr, n);
347
348 if (r)
349 r = -EFAULT;
350 }
351out:
352 return r;
260} 353}
261 354
262static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, 355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
263 unsigned long guestsrc, 356 unsigned long guestsrc,
264 unsigned long n) 357 unsigned long n)
265{ 358{
266 unsigned long origin = vcpu->arch.sie_block->gmsor; 359 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
267 unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu); 360}
268 361
269 if (guestsrc + n > memsize) 362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
270 return -EFAULT; 363 unsigned long guestsrc, unsigned long n)
364{
365 unsigned long prefix = vcpu->arch.sie_block->prefix;
271 366
272 if (guestsrc + n < guestsrc) 367 if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
273 return -EFAULT; 368 goto slowpath;
274 369
275 guestsrc += origin; 370 if ((guestsrc < prefix) && (guestsrc + n > prefix))
371 goto slowpath;
372
373 if ((guestsrc < prefix + 2 * PAGE_SIZE)
374 && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375 goto slowpath;
376
377 if (guestsrc < 2 * PAGE_SIZE)
378 guestsrc += prefix;
379 else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380 guestsrc -= prefix;
276 381
277 return copy_from_user(to, (void __user *) guestsrc, n); 382 return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383slowpath:
384 return __copy_from_guest_slow(vcpu, to, guestsrc, n);
278} 385}
279#endif 386#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index f7b6df45d8be..c7c51898984e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -105,6 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
105 [0xae] = kvm_s390_handle_sigp, 105 [0xae] = kvm_s390_handle_sigp,
106 [0xb2] = kvm_s390_handle_b2, 106 [0xb2] = kvm_s390_handle_b2,
107 [0xb7] = handle_lctl, 107 [0xb7] = handle_lctl,
108 [0xe5] = kvm_s390_handle_e5,
108 [0xeb] = handle_lctlg, 109 [0xeb] = handle_lctlg,
109}; 110};
110 111
@@ -159,22 +160,42 @@ static int handle_stop(struct kvm_vcpu *vcpu)
159 160
160static int handle_validity(struct kvm_vcpu *vcpu) 161static int handle_validity(struct kvm_vcpu *vcpu)
161{ 162{
163 unsigned long vmaddr;
162 int viwhy = vcpu->arch.sie_block->ipb >> 16; 164 int viwhy = vcpu->arch.sie_block->ipb >> 16;
163 int rc; 165 int rc;
164 166
165 vcpu->stat.exit_validity++; 167 vcpu->stat.exit_validity++;
166 if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix 168 if (viwhy == 0x37) {
167 <= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) { 169 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
168 rc = fault_in_pages_writeable((char __user *) 170 vcpu->arch.gmap);
169 vcpu->arch.sie_block->gmsor + 171 if (IS_ERR_VALUE(vmaddr)) {
170 vcpu->arch.sie_block->prefix, 172 rc = -EOPNOTSUPP;
171 2*PAGE_SIZE); 173 goto out;
172 if (rc) 174 }
175 rc = fault_in_pages_writeable((char __user *) vmaddr,
176 PAGE_SIZE);
177 if (rc) {
178 /* user will receive sigsegv, exit to user */
179 rc = -EOPNOTSUPP;
180 goto out;
181 }
182 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
183 vcpu->arch.gmap);
184 if (IS_ERR_VALUE(vmaddr)) {
185 rc = -EOPNOTSUPP;
186 goto out;
187 }
188 rc = fault_in_pages_writeable((char __user *) vmaddr,
189 PAGE_SIZE);
190 if (rc) {
173 /* user will receive sigsegv, exit to user */ 191 /* user will receive sigsegv, exit to user */
174 rc = -EOPNOTSUPP; 192 rc = -EOPNOTSUPP;
193 goto out;
194 }
175 } else 195 } else
176 rc = -EOPNOTSUPP; 196 rc = -EOPNOTSUPP;
177 197
198out:
178 if (rc) 199 if (rc)
179 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d", 200 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
180 viwhy); 201 viwhy);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 35c21bf910c5..c9aeb4b4d0b8 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -128,6 +128,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
128 if (rc == -EFAULT) 128 if (rc == -EFAULT)
129 exception = 1; 129 exception = 1;
130 130
131 rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, inti->emerg.code);
132 if (rc == -EFAULT)
133 exception = 1;
134
131 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW, 135 rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
132 &vcpu->arch.sie_block->gpsw, sizeof(psw_t)); 136 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
133 if (rc == -EFAULT) 137 if (rc == -EFAULT)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 67345ae7ce8d..f17296e4fc89 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -62,6 +62,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
@@ -189,7 +190,13 @@ int kvm_arch_init_vm(struct kvm *kvm)
189 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
190 VM_EVENT(kvm, 3, "%s", "vm created"); 191 VM_EVENT(kvm, 3, "%s", "vm created");
191 192
193 kvm->arch.gmap = gmap_alloc(current->mm);
194 if (!kvm->arch.gmap)
195 goto out_nogmap;
196
192 return 0; 197 return 0;
198out_nogmap:
199 debug_unregister(kvm->arch.dbf);
193out_nodbf: 200out_nodbf:
194 free_page((unsigned long)(kvm->arch.sca)); 201 free_page((unsigned long)(kvm->arch.sca));
195out_err: 202out_err:
@@ -234,11 +241,13 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
234 kvm_free_vcpus(kvm); 241 kvm_free_vcpus(kvm);
235 free_page((unsigned long)(kvm->arch.sca)); 242 free_page((unsigned long)(kvm->arch.sca));
236 debug_unregister(kvm->arch.dbf); 243 debug_unregister(kvm->arch.dbf);
244 gmap_free(kvm->arch.gmap);
237} 245}
238 246
239/* Section: vcpu related */ 247/* Section: vcpu related */
240int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 248int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241{ 249{
250 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
242 return 0; 251 return 0;
243} 252}
244 253
@@ -284,8 +293,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
284 293
285int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 294int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
286{ 295{
287 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH); 296 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
288 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
289 vcpu->arch.sie_block->ecb = 6; 297 vcpu->arch.sie_block->ecb = 6;
290 vcpu->arch.sie_block->eca = 0xC1002001U; 298 vcpu->arch.sie_block->eca = 0xC1002001U;
291 vcpu->arch.sie_block->fac = (int) (long) facilities; 299 vcpu->arch.sie_block->fac = (int) (long) facilities;
@@ -453,6 +461,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
453 local_irq_disable(); 461 local_irq_disable();
454 kvm_guest_enter(); 462 kvm_guest_enter();
455 local_irq_enable(); 463 local_irq_enable();
464 gmap_enable(vcpu->arch.gmap);
456 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 465 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
457 atomic_read(&vcpu->arch.sie_block->cpuflags)); 466 atomic_read(&vcpu->arch.sie_block->cpuflags));
458 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { 467 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
@@ -461,6 +470,7 @@ static void __vcpu_run(struct kvm_vcpu *vcpu)
461 } 470 }
462 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 471 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
463 vcpu->arch.sie_block->icptcode); 472 vcpu->arch.sie_block->icptcode);
473 gmap_disable(vcpu->arch.gmap);
464 local_irq_disable(); 474 local_irq_disable();
465 kvm_guest_exit(); 475 kvm_guest_exit();
466 local_irq_enable(); 476 local_irq_enable();
@@ -474,17 +484,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
474 sigset_t sigsaved; 484 sigset_t sigsaved;
475 485
476rerun_vcpu: 486rerun_vcpu:
477 if (vcpu->requests)
478 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
479 kvm_s390_vcpu_set_mem(vcpu);
480
481 /* verify, that memory has been registered */
482 if (!vcpu->arch.sie_block->gmslm) {
483 vcpu_put(vcpu);
484 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
485 return -EINVAL;
486 }
487
488 if (vcpu->sigset_active) 487 if (vcpu->sigset_active)
489 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 488 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
490 489
@@ -545,7 +544,7 @@ rerun_vcpu:
545 return rc; 544 return rc;
546} 545}
547 546
548static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from, 547static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
549 unsigned long n, int prefix) 548 unsigned long n, int prefix)
550{ 549{
551 if (prefix) 550 if (prefix)
@@ -562,7 +561,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
562 */ 561 */
563int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 562int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
564{ 563{
565 const unsigned char archmode = 1; 564 unsigned char archmode = 1;
566 int prefix; 565 int prefix;
567 566
568 if (addr == KVM_S390_STORE_STATUS_NOADDR) { 567 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
@@ -680,10 +679,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
680 if (mem->guest_phys_addr) 679 if (mem->guest_phys_addr)
681 return -EINVAL; 680 return -EINVAL;
682 681
683 if (mem->userspace_addr & (PAGE_SIZE - 1)) 682 if (mem->userspace_addr & 0xffffful)
684 return -EINVAL; 683 return -EINVAL;
685 684
686 if (mem->memory_size & (PAGE_SIZE - 1)) 685 if (mem->memory_size & 0xffffful)
687 return -EINVAL; 686 return -EINVAL;
688 687
689 if (!user_alloc) 688 if (!user_alloc)
@@ -697,15 +696,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
697 struct kvm_memory_slot old, 696 struct kvm_memory_slot old,
698 int user_alloc) 697 int user_alloc)
699{ 698{
700 int i; 699 int rc;
701 struct kvm_vcpu *vcpu;
702 700
703 /* request update of sie control block for all available vcpus */ 701
704 kvm_for_each_vcpu(i, vcpu, kvm) { 702 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
705 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) 703 mem->guest_phys_addr, mem->memory_size);
706 continue; 704 if (rc)
707 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); 705 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
708 } 706 return;
709} 707}
710 708
711void kvm_arch_flush_shadow(struct kvm *kvm) 709void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index a7b7586626db..99b0b7597115 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -58,35 +58,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); 58int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); 59int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
60 60
61static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
62{
63 return vcpu->arch.sie_block->gmslm
64 - vcpu->arch.sie_block->gmsor
65 - VIRTIODESCSPACE + 1ul;
66}
67
68static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
69{
70 int idx;
71 struct kvm_memory_slot *mem;
72 struct kvm_memslots *memslots;
73
74 idx = srcu_read_lock(&vcpu->kvm->srcu);
75 memslots = kvm_memslots(vcpu->kvm);
76
77 mem = &memslots->memslots[0];
78
79 vcpu->arch.sie_block->gmsor = mem->userspace_addr;
80 vcpu->arch.sie_block->gmslm =
81 mem->userspace_addr +
82 (mem->npages << PAGE_SHIFT) +
83 VIRTIODESCSPACE - 1ul;
84
85 srcu_read_unlock(&vcpu->kvm->srcu, idx);
86}
87
88/* implemented in priv.c */ 61/* implemented in priv.c */
89int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 62int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
63int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
90 64
91/* implemented in sigp.c */ 65/* implemented in sigp.c */
92int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 66int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 73c47bd95db3..391626361084 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -326,3 +326,52 @@ int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
326 } 326 }
327 return -EOPNOTSUPP; 327 return -EOPNOTSUPP;
328} 328}
329
330static int handle_tprot(struct kvm_vcpu *vcpu)
331{
332 int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
333 int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
334 int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
335 int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
336 u64 address1 = disp1 + base1 ? vcpu->arch.guest_gprs[base1] : 0;
337 u64 address2 = disp2 + base2 ? vcpu->arch.guest_gprs[base2] : 0;
338 struct vm_area_struct *vma;
339
340 vcpu->stat.instruction_tprot++;
341
342 /* we only handle the Linux memory detection case:
343 * access key == 0
344 * guest DAT == off
345 * everything else goes to userspace. */
346 if (address2 & 0xf0)
347 return -EOPNOTSUPP;
348 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
349 return -EOPNOTSUPP;
350
351
352 down_read(&current->mm->mmap_sem);
353 vma = find_vma(current->mm,
354 (unsigned long) __guestaddr_to_user(vcpu, address1));
355 if (!vma) {
356 up_read(&current->mm->mmap_sem);
357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
358 }
359
360 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
361 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
362 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
363 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
364 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
365
366 up_read(&current->mm->mmap_sem);
367 return 0;
368}
369
370int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
371{
372 /* For e5xx... instructions we only handle TPROT */
373 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
374 return handle_tprot(vcpu);
375 return -EOPNOTSUPP;
376}
377
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
deleted file mode 100644
index 5faa1b1b23fa..000000000000
--- a/arch/s390/kvm/sie64a.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * sie64a.S - low level sie call
3 *
4 * Copyright IBM Corp. 2008,2010
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
11 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
12 */
13
14#include <linux/errno.h>
15#include <asm/asm-offsets.h>
16#include <asm/setup.h>
17#include <asm/asm-offsets.h>
18#include <asm/ptrace.h>
19#include <asm/thread_info.h>
20
21_TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
22
23/*
24 * offsets into stackframe
25 * SP_ = offsets into stack sie64 is called with
26 * SPI_ = offsets into irq stack
27 */
28SP_GREGS = __SF_EMPTY
29SP_HOOK = __SF_EMPTY+8
30SP_GPP = __SF_EMPTY+16
31SPI_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
32
33
34 .macro SPP newpp
35 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP
36 jz 0f
37 .insn s,0xb2800000,\newpp
380:
39 .endm
40
41sie_irq_handler:
42 SPP __LC_CMF_HPP # set host id
43 larl %r2,sie_inst
44 clg %r2,SPI_PSW+8(0,%r15) # intercepted sie
45 jne 1f
46 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
47 lg %r2,__LC_THREAD_INFO # pointer thread_info struct
48 tm __TI_flags+7(%r2),_TIF_EXIT_SIE
49 jz 0f
50 larl %r2,sie_exit # work pending, leave sie
51 stg %r2,SPI_PSW+8(0,%r15)
52 br %r14
530: larl %r2,sie_reenter # re-enter with guest id
54 stg %r2,SPI_PSW+8(0,%r15)
551: br %r14
56
57/*
58 * sie64a calling convention:
59 * %r2 pointer to sie control block
60 * %r3 guest register save area
61 */
62 .globl sie64a
63sie64a:
64 stg %r3,SP_GREGS(%r15) # save guest register save area
65 stmg %r6,%r14,__SF_GPRS(%r15) # save registers on entry
66 lgr %r14,%r2 # pointer to sie control block
67 larl %r5,sie_irq_handler
68 stg %r2,SP_GPP(%r15)
69 stg %r5,SP_HOOK(%r15) # save hook target
70 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
71sie_reenter:
72 mvc __LC_SIE_HOOK(8),SP_HOOK(%r15)
73 SPP SP_GPP(%r15) # set guest id
74sie_inst:
75 sie 0(%r14)
76 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
77 SPP __LC_CMF_HPP # set host id
78sie_exit:
79 lg %r14,SP_GREGS(%r15)
80 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
81 lghi %r2,0
82 lmg %r6,%r14,__SF_GPRS(%r15)
83 br %r14
84
85sie_err:
86 xc __LC_SIE_HOOK(8),__LC_SIE_HOOK
87 SPP __LC_CMF_HPP # set host id
88 lg %r14,SP_GREGS(%r15)
89 stmg %r0,%r13,0(%r14) # save guest gprs 0-13
90 lghi %r2,-EFAULT
91 lmg %r6,%r14,__SF_GPRS(%r15)
92 br %r14
93
94 .section __ex_table,"a"
95 .quad sie_inst,sie_err
96 .quad sie_exit,sie_err
97 .quad sie_reenter,sie_err
98 .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 702276f5e2fa..d6a50c1fb2e6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -189,10 +189,8 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
189 189
190 /* make sure that the new value is valid memory */ 190 /* make sure that the new value is valid memory */
191 address = address & 0x7fffe000u; 191 address = address & 0x7fffe000u;
192 if ((copy_from_user(&tmp, (void __user *) 192 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
193 (address + vcpu->arch.sie_block->gmsor) , 1)) || 193 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
194 (copy_from_user(&tmp, (void __user *)(address +
195 vcpu->arch.sie_block->gmsor + PAGE_SIZE), 1))) {
196 *reg |= SIGP_STAT_INVALID_PARAMETER; 194 *reg |= SIGP_STAT_INVALID_PARAMETER;
197 return 1; /* invalid parameter */ 195 return 1; /* invalid parameter */
198 } 196 }
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
index eb1df632e749..d321329130ec 100644
--- a/arch/s390/lib/qrnnd.S
+++ b/arch/s390/lib/qrnnd.S
@@ -1,5 +1,7 @@
1# S/390 __udiv_qrnnd 1# S/390 __udiv_qrnnd
2 2
3#include <linux/linkage.h>
4
3# r2 : &__r 5# r2 : &__r
4# r3 : upper half of 64 bit word n 6# r3 : upper half of 64 bit word n
5# r4 : lower half of 64 bit word n 7# r4 : lower half of 64 bit word n
@@ -8,8 +10,7 @@
8# the quotient q is to be returned 10# the quotient q is to be returned
9 11
10 .text 12 .text
11 .globl __udiv_qrnnd 13ENTRY(__udiv_qrnnd)
12__udiv_qrnnd:
13 st %r2,24(%r15) # store pointer to reminder for later 14 st %r2,24(%r15) # store pointer to reminder for later
14 lr %r0,%r3 # reload n 15 lr %r0,%r3 # reload n
15 lr %r1,%r4 16 lr %r1,%r4
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 095f782a5512..9564fc779b27 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -303,9 +303,24 @@ static inline int do_exception(struct pt_regs *regs, int access,
303 flags = FAULT_FLAG_ALLOW_RETRY; 303 flags = FAULT_FLAG_ALLOW_RETRY;
304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) 304 if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305 flags |= FAULT_FLAG_WRITE; 305 flags |= FAULT_FLAG_WRITE;
306retry:
307 down_read(&mm->mmap_sem); 306 down_read(&mm->mmap_sem);
308 307
308#ifdef CONFIG_PGSTE
309 if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) {
310 address = gmap_fault(address,
311 (struct gmap *) S390_lowcore.gmap);
312 if (address == -EFAULT) {
313 fault = VM_FAULT_BADMAP;
314 goto out_up;
315 }
316 if (address == -ENOMEM) {
317 fault = VM_FAULT_OOM;
318 goto out_up;
319 }
320 }
321#endif
322
323retry:
309 fault = VM_FAULT_BADMAP; 324 fault = VM_FAULT_BADMAP;
310 vma = find_vma(mm, address); 325 vma = find_vma(mm, address);
311 if (!vma) 326 if (!vma)
@@ -356,6 +371,7 @@ retry:
356 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk 371 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
357 * of starvation. */ 372 * of starvation. */
358 flags &= ~FAULT_FLAG_ALLOW_RETRY; 373 flags &= ~FAULT_FLAG_ALLOW_RETRY;
374 down_read(&mm->mmap_sem);
359 goto retry; 375 goto retry;
360 } 376 }
361 } 377 }
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index a4d856db9154..597bb2d27c3c 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -35,7 +35,7 @@ int arch_prepare_hugepage(struct page *page)
35 if (MACHINE_HAS_HPAGE) 35 if (MACHINE_HAS_HPAGE)
36 return 0; 36 return 0;
37 37
38 ptep = (pte_t *) pte_alloc_one(&init_mm, address); 38 ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
39 if (!ptep) 39 if (!ptep)
40 return -ENOMEM; 40 return -ENOMEM;
41 41
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37a23c223705..2adb23938a7f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -16,6 +16,7 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/quicklist.h> 17#include <linux/quicklist.h>
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/slab.h>
19 20
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
@@ -133,30 +134,374 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
133} 134}
134#endif 135#endif
135 136
136static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) 137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
137{ 146{
138 unsigned int old, new; 147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
139 150
140 do { 151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
141 old = atomic_read(v); 152 if (!gmap)
142 new = old ^ bits; 153 goto out;
143 } while (atomic_cmpxchg(v, old, new) != old); 154 INIT_LIST_HEAD(&gmap->crst_list);
144 return new; 155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
163 list_add(&gmap->list, &mm->context.gmap_list);
164 return gmap;
165
166out_free:
167 kfree(gmap);
168out:
169 return NULL;
145} 170}
171EXPORT_SYMBOL_GPL(gmap_alloc);
146 172
147/* 173static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
148 * page table entry allocation/free routines. 174{
175 struct gmap_pgtable *mp;
176 struct gmap_rmap *rmap;
177 struct page *page;
178
179 if (*table & _SEGMENT_ENTRY_INV)
180 return 0;
181 page = pfn_to_page(*table >> PAGE_SHIFT);
182 mp = (struct gmap_pgtable *) page->index;
183 list_for_each_entry(rmap, &mp->mapper, list) {
184 if (rmap->entry != table)
185 continue;
186 list_del(&rmap->list);
187 kfree(rmap);
188 break;
189 }
190 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
191 return 1;
192}
193
194static void gmap_flush_tlb(struct gmap *gmap)
195{
196 if (MACHINE_HAS_IDTE)
197 __tlb_flush_idte((unsigned long) gmap->table |
198 _ASCE_TYPE_REGION1);
199 else
200 __tlb_flush_global();
201}
202
203/**
204 * gmap_free - free a guest address space
205 * @gmap: pointer to the guest address space structure
149 */ 206 */
150#ifdef CONFIG_PGSTE 207void gmap_free(struct gmap *gmap)
151static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm) 208{
209 struct page *page, *next;
210 unsigned long *table;
211 int i;
212
213
214 /* Flush tlb. */
215 if (MACHINE_HAS_IDTE)
216 __tlb_flush_idte((unsigned long) gmap->table |
217 _ASCE_TYPE_REGION1);
218 else
219 __tlb_flush_global();
220
221 /* Free all segment & region tables. */
222 down_read(&gmap->mm->mmap_sem);
223 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
224 table = (unsigned long *) page_to_phys(page);
225 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
226 /* Remove gmap rmap structures for segment table. */
227 for (i = 0; i < PTRS_PER_PMD; i++, table++)
228 gmap_unlink_segment(gmap, table);
229 __free_pages(page, ALLOC_ORDER);
230 }
231 up_read(&gmap->mm->mmap_sem);
232 list_del(&gmap->list);
233 kfree(gmap);
234}
235EXPORT_SYMBOL_GPL(gmap_free);
236
237/**
238 * gmap_enable - switch primary space to the guest address space
239 * @gmap: pointer to the guest address space structure
240 */
241void gmap_enable(struct gmap *gmap)
242{
243 /* Load primary space page table origin. */
244 S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
245 _ASCE_USER_BITS | __pa(gmap->table);
246 asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
247 S390_lowcore.gmap = (unsigned long) gmap;
248}
249EXPORT_SYMBOL_GPL(gmap_enable);
250
251/**
252 * gmap_disable - switch back to the standard primary address space
253 * @gmap: pointer to the guest address space structure
254 */
255void gmap_disable(struct gmap *gmap)
256{
257 /* Load primary space page table origin. */
258 S390_lowcore.user_asce =
259 gmap->mm->context.asce_bits | __pa(gmap->mm->pgd);
260 asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) );
261 S390_lowcore.gmap = 0UL;
262}
263EXPORT_SYMBOL_GPL(gmap_disable);
264
265static int gmap_alloc_table(struct gmap *gmap,
266 unsigned long *table, unsigned long init)
267{
268 struct page *page;
269 unsigned long *new;
270
271 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
272 if (!page)
273 return -ENOMEM;
274 new = (unsigned long *) page_to_phys(page);
275 crst_table_init(new, init);
276 down_read(&gmap->mm->mmap_sem);
277 if (*table & _REGION_ENTRY_INV) {
278 list_add(&page->lru, &gmap->crst_list);
279 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
280 (*table & _REGION_ENTRY_TYPE_MASK);
281 } else
282 __free_pages(page, ALLOC_ORDER);
283 up_read(&gmap->mm->mmap_sem);
284 return 0;
285}
286
287/**
288 * gmap_unmap_segment - unmap segment from the guest address space
289 * @gmap: pointer to the guest address space structure
290 * @addr: address in the guest address space
291 * @len: length of the memory area to unmap
292 *
293 * Returns 0 if the unmap succeded, -EINVAL if not.
294 */
295int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
296{
297 unsigned long *table;
298 unsigned long off;
299 int flush;
300
301 if ((to | len) & (PMD_SIZE - 1))
302 return -EINVAL;
303 if (len == 0 || to + len < to)
304 return -EINVAL;
305
306 flush = 0;
307 down_read(&gmap->mm->mmap_sem);
308 for (off = 0; off < len; off += PMD_SIZE) {
309 /* Walk the guest addr space page table */
310 table = gmap->table + (((to + off) >> 53) & 0x7ff);
311 if (*table & _REGION_ENTRY_INV)
312 return 0;
313 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
314 table = table + (((to + off) >> 42) & 0x7ff);
315 if (*table & _REGION_ENTRY_INV)
316 return 0;
317 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
318 table = table + (((to + off) >> 31) & 0x7ff);
319 if (*table & _REGION_ENTRY_INV)
320 return 0;
321 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
322 table = table + (((to + off) >> 20) & 0x7ff);
323
324 /* Clear segment table entry in guest address space. */
325 flush |= gmap_unlink_segment(gmap, table);
326 *table = _SEGMENT_ENTRY_INV;
327 }
328 up_read(&gmap->mm->mmap_sem);
329 if (flush)
330 gmap_flush_tlb(gmap);
331 return 0;
332}
333EXPORT_SYMBOL_GPL(gmap_unmap_segment);
334
335/**
336 * gmap_mmap_segment - map a segment to the guest address space
337 * @gmap: pointer to the guest address space structure
338 * @from: source address in the parent address space
339 * @to: target address in the guest address space
340 *
341 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
342 */
343int gmap_map_segment(struct gmap *gmap, unsigned long from,
344 unsigned long to, unsigned long len)
345{
346 unsigned long *table;
347 unsigned long off;
348 int flush;
349
350 if ((from | to | len) & (PMD_SIZE - 1))
351 return -EINVAL;
352 if (len == 0 || from + len > PGDIR_SIZE ||
353 from + len < from || to + len < to)
354 return -EINVAL;
355
356 flush = 0;
357 down_read(&gmap->mm->mmap_sem);
358 for (off = 0; off < len; off += PMD_SIZE) {
359 /* Walk the gmap address space page table */
360 table = gmap->table + (((to + off) >> 53) & 0x7ff);
361 if ((*table & _REGION_ENTRY_INV) &&
362 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
363 goto out_unmap;
364 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
365 table = table + (((to + off) >> 42) & 0x7ff);
366 if ((*table & _REGION_ENTRY_INV) &&
367 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
368 goto out_unmap;
369 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
370 table = table + (((to + off) >> 31) & 0x7ff);
371 if ((*table & _REGION_ENTRY_INV) &&
372 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
373 goto out_unmap;
374 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
375 table = table + (((to + off) >> 20) & 0x7ff);
376
377 /* Store 'from' address in an invalid segment table entry. */
378 flush |= gmap_unlink_segment(gmap, table);
379 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
380 }
381 up_read(&gmap->mm->mmap_sem);
382 if (flush)
383 gmap_flush_tlb(gmap);
384 return 0;
385
386out_unmap:
387 up_read(&gmap->mm->mmap_sem);
388 gmap_unmap_segment(gmap, to, len);
389 return -ENOMEM;
390}
391EXPORT_SYMBOL_GPL(gmap_map_segment);
392
393unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
394{
395 unsigned long *table, vmaddr, segment;
396 struct mm_struct *mm;
397 struct gmap_pgtable *mp;
398 struct gmap_rmap *rmap;
399 struct vm_area_struct *vma;
400 struct page *page;
401 pgd_t *pgd;
402 pud_t *pud;
403 pmd_t *pmd;
404
405 current->thread.gmap_addr = address;
406 mm = gmap->mm;
407 /* Walk the gmap address space page table */
408 table = gmap->table + ((address >> 53) & 0x7ff);
409 if (unlikely(*table & _REGION_ENTRY_INV))
410 return -EFAULT;
411 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
412 table = table + ((address >> 42) & 0x7ff);
413 if (unlikely(*table & _REGION_ENTRY_INV))
414 return -EFAULT;
415 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
416 table = table + ((address >> 31) & 0x7ff);
417 if (unlikely(*table & _REGION_ENTRY_INV))
418 return -EFAULT;
419 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
420 table = table + ((address >> 20) & 0x7ff);
421
422 /* Convert the gmap address to an mm address. */
423 segment = *table;
424 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
425 page = pfn_to_page(segment >> PAGE_SHIFT);
426 mp = (struct gmap_pgtable *) page->index;
427 return mp->vmaddr | (address & ~PMD_MASK);
428 } else if (segment & _SEGMENT_ENTRY_RO) {
429 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
430 vma = find_vma(mm, vmaddr);
431 if (!vma || vma->vm_start > vmaddr)
432 return -EFAULT;
433
434 /* Walk the parent mm page table */
435 pgd = pgd_offset(mm, vmaddr);
436 pud = pud_alloc(mm, pgd, vmaddr);
437 if (!pud)
438 return -ENOMEM;
439 pmd = pmd_alloc(mm, pud, vmaddr);
440 if (!pmd)
441 return -ENOMEM;
442 if (!pmd_present(*pmd) &&
443 __pte_alloc(mm, vma, pmd, vmaddr))
444 return -ENOMEM;
445 /* pmd now points to a valid segment table entry. */
446 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
447 if (!rmap)
448 return -ENOMEM;
449 /* Link gmap segment table entry location to page table. */
450 page = pmd_page(*pmd);
451 mp = (struct gmap_pgtable *) page->index;
452 rmap->entry = table;
453 list_add(&rmap->list, &mp->mapper);
454 /* Set gmap segment table entry to page table. */
455 *table = pmd_val(*pmd) & PAGE_MASK;
456 return vmaddr | (address & ~PMD_MASK);
457 }
458 return -EFAULT;
459
460}
461EXPORT_SYMBOL_GPL(gmap_fault);
462
463void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
464{
465 struct gmap_rmap *rmap, *next;
466 struct gmap_pgtable *mp;
467 struct page *page;
468 int flush;
469
470 flush = 0;
471 spin_lock(&mm->page_table_lock);
472 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
473 mp = (struct gmap_pgtable *) page->index;
474 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
475 *rmap->entry =
476 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
477 list_del(&rmap->list);
478 kfree(rmap);
479 flush = 1;
480 }
481 spin_unlock(&mm->page_table_lock);
482 if (flush)
483 __tlb_flush_global();
484}
485
486static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
487 unsigned long vmaddr)
152{ 488{
153 struct page *page; 489 struct page *page;
154 unsigned long *table; 490 unsigned long *table;
491 struct gmap_pgtable *mp;
155 492
156 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 493 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
157 if (!page) 494 if (!page)
158 return NULL; 495 return NULL;
496 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
497 if (!mp) {
498 __free_page(page);
499 return NULL;
500 }
159 pgtable_page_ctor(page); 501 pgtable_page_ctor(page);
502 mp->vmaddr = vmaddr & PMD_MASK;
503 INIT_LIST_HEAD(&mp->mapper);
504 page->index = (unsigned long) mp;
160 atomic_set(&page->_mapcount, 3); 505 atomic_set(&page->_mapcount, 3);
161 table = (unsigned long *) page_to_phys(page); 506 table = (unsigned long *) page_to_phys(page);
162 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); 507 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
@@ -167,24 +512,57 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
167static inline void page_table_free_pgste(unsigned long *table) 512static inline void page_table_free_pgste(unsigned long *table)
168{ 513{
169 struct page *page; 514 struct page *page;
515 struct gmap_pgtable *mp;
170 516
171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 517 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
518 mp = (struct gmap_pgtable *) page->index;
519 BUG_ON(!list_empty(&mp->mapper));
172 pgtable_page_ctor(page); 520 pgtable_page_ctor(page);
173 atomic_set(&page->_mapcount, -1); 521 atomic_set(&page->_mapcount, -1);
522 kfree(mp);
174 __free_page(page); 523 __free_page(page);
175} 524}
176#endif
177 525
178unsigned long *page_table_alloc(struct mm_struct *mm) 526#else /* CONFIG_PGSTE */
527
528static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
529 unsigned long vmaddr)
530{
531}
532
533static inline void page_table_free_pgste(unsigned long *table)
534{
535}
536
537static inline void gmap_unmap_notifier(struct mm_struct *mm,
538 unsigned long *table)
539{
540}
541
542#endif /* CONFIG_PGSTE */
543
544static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
545{
546 unsigned int old, new;
547
548 do {
549 old = atomic_read(v);
550 new = old ^ bits;
551 } while (atomic_cmpxchg(v, old, new) != old);
552 return new;
553}
554
555/*
556 * page table entry allocation/free routines.
557 */
558unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
179{ 559{
180 struct page *page; 560 struct page *page;
181 unsigned long *table; 561 unsigned long *table;
182 unsigned int mask, bit; 562 unsigned int mask, bit;
183 563
184#ifdef CONFIG_PGSTE
185 if (mm_has_pgste(mm)) 564 if (mm_has_pgste(mm))
186 return page_table_alloc_pgste(mm); 565 return page_table_alloc_pgste(mm, vmaddr);
187#endif
188 /* Allocate fragments of a 4K page as 1K/2K page table */ 566 /* Allocate fragments of a 4K page as 1K/2K page table */
189 spin_lock_bh(&mm->context.list_lock); 567 spin_lock_bh(&mm->context.list_lock);
190 mask = FRAG_MASK; 568 mask = FRAG_MASK;
@@ -222,10 +600,10 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
222 struct page *page; 600 struct page *page;
223 unsigned int bit, mask; 601 unsigned int bit, mask;
224 602
225#ifdef CONFIG_PGSTE 603 if (mm_has_pgste(mm)) {
226 if (mm_has_pgste(mm)) 604 gmap_unmap_notifier(mm, table);
227 return page_table_free_pgste(table); 605 return page_table_free_pgste(table);
228#endif 606 }
229 /* Free 1K/2K page table fragment of a 4K page */ 607 /* Free 1K/2K page table fragment of a 4K page */
230 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 608 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
231 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); 609 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
@@ -249,10 +627,8 @@ static void __page_table_free_rcu(void *table, unsigned bit)
249{ 627{
250 struct page *page; 628 struct page *page;
251 629
252#ifdef CONFIG_PGSTE
253 if (bit == FRAG_MASK) 630 if (bit == FRAG_MASK)
254 return page_table_free_pgste(table); 631 return page_table_free_pgste(table);
255#endif
256 /* Free 1K/2K page table fragment of a 4K page */ 632 /* Free 1K/2K page table fragment of a 4K page */
257 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 633 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
258 if (atomic_xor_bits(&page->_mapcount, bit) == 0) { 634 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
@@ -269,13 +645,12 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
269 unsigned int bit, mask; 645 unsigned int bit, mask;
270 646
271 mm = tlb->mm; 647 mm = tlb->mm;
272#ifdef CONFIG_PGSTE
273 if (mm_has_pgste(mm)) { 648 if (mm_has_pgste(mm)) {
649 gmap_unmap_notifier(mm, table);
274 table = (unsigned long *) (__pa(table) | FRAG_MASK); 650 table = (unsigned long *) (__pa(table) | FRAG_MASK);
275 tlb_remove_table(tlb, table); 651 tlb_remove_table(tlb, table);
276 return; 652 return;
277 } 653 }
278#endif
279 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); 654 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
280 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 655 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
281 spin_lock_bh(&mm->context.list_lock); 656 spin_lock_bh(&mm->context.list_lock);
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8c1970d1dd91..781ff5169560 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -61,12 +61,12 @@ static inline pmd_t *vmem_pmd_alloc(void)
61 return pmd; 61 return pmd;
62} 62}
63 63
64static pte_t __ref *vmem_pte_alloc(void) 64static pte_t __ref *vmem_pte_alloc(unsigned long address)
65{ 65{
66 pte_t *pte; 66 pte_t *pte;
67 67
68 if (slab_is_available()) 68 if (slab_is_available())
69 pte = (pte_t *) page_table_alloc(&init_mm); 69 pte = (pte_t *) page_table_alloc(&init_mm, address);
70 else 70 else
71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
72 if (!pte) 72 if (!pte)
@@ -120,7 +120,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
120 } 120 }
121#endif 121#endif
122 if (pmd_none(*pm_dir)) { 122 if (pmd_none(*pm_dir)) {
123 pt_dir = vmem_pte_alloc(); 123 pt_dir = vmem_pte_alloc(address);
124 if (!pt_dir) 124 if (!pt_dir)
125 goto out; 125 goto out;
126 pmd_populate(&init_mm, pm_dir, pt_dir); 126 pmd_populate(&init_mm, pm_dir, pt_dir);
@@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
205 205
206 pm_dir = pmd_offset(pu_dir, address); 206 pm_dir = pmd_offset(pu_dir, address);
207 if (pmd_none(*pm_dir)) { 207 if (pmd_none(*pm_dir)) {
208 pt_dir = vmem_pte_alloc(); 208 pt_dir = vmem_pte_alloc(address);
209 if (!pt_dir) 209 if (!pt_dir)
210 goto out; 210 goto out;
211 pmd_populate(&init_mm, pm_dir, pt_dir); 211 pmd_populate(&init_mm, pm_dir, pt_dir);