aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild5
-rw-r--r--arch/s390/include/asm/airq.h14
-rw-r--r--arch/s390/include/asm/bitops.h8
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/ccwgroup.h1
-rw-r--r--arch/s390/include/asm/checksum.h11
-rw-r--r--arch/s390/include/asm/compat.h6
-rw-r--r--arch/s390/include/asm/futex.h13
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h39
-rw-r--r--arch/s390/include/asm/pgalloc.h18
-rw-r--r--arch/s390/include/asm/pgtable.h105
-rw-r--r--arch/s390/include/asm/ptrace.h1
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/uaccess.h171
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h6
18 files changed, 243 insertions, 171 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 8386a4a1f19a..57892a8a9055 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,7 @@
1 1
2 2
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += trace_clock.h
5generic-y += preempt.h
6generic-y += hash.h 4generic-y += hash.h
5generic-y += mcs_spinlock.h
6generic-y += preempt.h
7generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index 4bbb5957ed1b..bd93ff6661b8 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -44,11 +44,21 @@ struct airq_iv {
44 44
45struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); 45struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
46void airq_iv_release(struct airq_iv *iv); 46void airq_iv_release(struct airq_iv *iv);
47unsigned long airq_iv_alloc_bit(struct airq_iv *iv); 47unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num);
48void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit); 48void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num);
49unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, 49unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
50 unsigned long end); 50 unsigned long end);
51 51
52static inline unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
53{
54 return airq_iv_alloc(iv, 1);
55}
56
57static inline void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
58{
59 airq_iv_free(iv, bit, 1);
60}
61
52static inline unsigned long airq_iv_end(struct airq_iv *iv) 62static inline unsigned long airq_iv_end(struct airq_iv *iv)
53{ 63{
54 return iv->end; 64 return iv->end;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 6e6ad0680829..ec5ef891db6b 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -13,9 +13,9 @@
13 * 13 *
14 * The bitop functions are defined to work on unsigned longs, so for an 14 * The bitop functions are defined to work on unsigned longs, so for an
15 * s390x system the bits end up numbered: 15 * s390x system the bits end up numbered:
16 * |63..............0|127............64|191...........128|255...........196| 16 * |63..............0|127............64|191...........128|255...........192|
17 * and on s390: 17 * and on s390:
18 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| 18 * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
19 * 19 *
20 * There are a few little-endian macros used mostly for filesystem 20 * There are a few little-endian macros used mostly for filesystem
21 * bitmaps, these work on similar bit arrays layouts, but 21 * bitmaps, these work on similar bit arrays layouts, but
@@ -30,7 +30,7 @@
30 * on an s390x system the bits are numbered: 30 * on an s390x system the bits are numbered:
31 * |0..............63|64............127|128...........191|192...........255| 31 * |0..............63|64............127|128...........191|192...........255|
32 * and on s390: 32 * and on s390:
33 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| 33 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
34 * 34 *
35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit 35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
36 * number field needs to be reversed compared to the LSB0 encoded bit 36 * number field needs to be reversed compared to the LSB0 encoded bit
@@ -304,7 +304,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
304 * On an s390x system the bits are numbered: 304 * On an s390x system the bits are numbered:
305 * |0..............63|64............127|128...........191|192...........255| 305 * |0..............63|64............127|128...........191|192...........255|
306 * and on s390: 306 * and on s390:
307 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| 307 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
308 */ 308 */
309unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); 309unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
310unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, 310unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f201af8be580..a9c2c0686177 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -219,7 +219,9 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
219#define to_ccwdev(n) container_of(n, struct ccw_device, dev) 219#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) 220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
221 221
222extern struct ccw_device *ccw_device_probe_console(void); 222extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
223extern void ccw_device_destroy_console(struct ccw_device *);
224extern int ccw_device_enable_console(struct ccw_device *);
223extern void ccw_device_wait_idle(struct ccw_device *); 225extern void ccw_device_wait_idle(struct ccw_device *);
224extern int ccw_device_force_console(struct ccw_device *); 226extern int ccw_device_force_console(struct ccw_device *);
225 227
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 23723ce5ca7a..6e670f88d125 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -23,6 +23,7 @@ struct ccwgroup_device {
23 unsigned int count; 23 unsigned int count;
24 struct device dev; 24 struct device dev;
25 struct ccw_device *cdev[0]; 25 struct ccw_device *cdev[0];
26 struct work_struct ungroup_work;
26}; 27};
27 28
28/** 29/**
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index 4f57a4f3909a..740364856355 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -44,22 +44,15 @@ csum_partial(const void *buff, int len, __wsum sum)
44 * here even more important to align src and dst on a 32-bit (or even 44 * here even more important to align src and dst on a 32-bit (or even
45 * better 64-bit) boundary 45 * better 64-bit) boundary
46 * 46 *
47 * Copy from userspace and compute checksum. If we catch an exception 47 * Copy from userspace and compute checksum.
48 * then zero the rest of the buffer.
49 */ 48 */
50static inline __wsum 49static inline __wsum
51csum_partial_copy_from_user(const void __user *src, void *dst, 50csum_partial_copy_from_user(const void __user *src, void *dst,
52 int len, __wsum sum, 51 int len, __wsum sum,
53 int *err_ptr) 52 int *err_ptr)
54{ 53{
55 int missing; 54 if (unlikely(copy_from_user(dst, src, len)))
56
57 missing = copy_from_user(dst, src, len);
58 if (missing) {
59 memset(dst + len - missing, 0, missing);
60 *err_ptr = -EFAULT; 55 *err_ptr = -EFAULT;
61 }
62
63 return csum_partial(dst, len, sum); 56 return csum_partial(dst, len, sum);
64} 57}
65 58
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 5d7e8cf83bd6..d350ed9d0fbb 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -8,7 +8,11 @@
8#include <linux/thread_info.h> 8#include <linux/thread_info.h>
9 9
10#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) 10#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
11#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)) 11
12#define __SC_DELOUSE(t,v) ({ \
13 BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
14 (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
15})
12 16
13#define PSW32_MASK_PER 0x40000000UL 17#define PSW32_MASK_PER 0x40000000UL
14#define PSW32_MASK_DAT 0x04000000UL 18#define PSW32_MASK_DAT 0x04000000UL
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 51bcaa0fdeef..fda46bd38c99 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -5,7 +5,10 @@
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <asm/errno.h> 6#include <asm/errno.h>
7 7
8static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 8int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
9int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
10
11static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
9{ 12{
10 int op = (encoded_op >> 28) & 7; 13 int op = (encoded_op >> 28) & 7;
11 int cmp = (encoded_op >> 24) & 15; 14 int cmp = (encoded_op >> 24) & 15;
@@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
17 oparg = 1 << oparg; 20 oparg = 1 << oparg;
18 21
19 pagefault_disable(); 22 pagefault_disable();
20 ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); 23 ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
21 pagefault_enable(); 24 pagefault_enable();
22 25
23 if (!ret) { 26 if (!ret) {
@@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
34 return ret; 37 return ret;
35} 38}
36 39
37static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
38 u32 oldval, u32 newval)
39{
40 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
41}
42
43#endif /* _ASM_S390_FUTEX_H */ 40#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index eef3dd3fd9a9..9bf95bb30f1a 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -106,7 +106,9 @@ struct kvm_s390_sie_block {
106 __u64 gbea; /* 0x0180 */ 106 __u64 gbea; /* 0x0180 */
107 __u8 reserved188[24]; /* 0x0188 */ 107 __u8 reserved188[24]; /* 0x0188 */
108 __u32 fac; /* 0x01a0 */ 108 __u32 fac; /* 0x01a0 */
109 __u8 reserved1a4[68]; /* 0x01a4 */ 109 __u8 reserved1a4[20]; /* 0x01a4 */
110 __u64 cbrlo; /* 0x01b8 */
111 __u8 reserved1c0[40]; /* 0x01c0 */
110 __u64 itdba; /* 0x01e8 */ 112 __u64 itdba; /* 0x01e8 */
111 __u8 reserved1f0[16]; /* 0x01f0 */ 113 __u8 reserved1f0[16]; /* 0x01f0 */
112} __attribute__((packed)); 114} __attribute__((packed));
@@ -155,6 +157,7 @@ struct kvm_vcpu_stat {
155 u32 instruction_stsi; 157 u32 instruction_stsi;
156 u32 instruction_stfl; 158 u32 instruction_stfl;
157 u32 instruction_tprot; 159 u32 instruction_tprot;
160 u32 instruction_essa;
158 u32 instruction_sigp_sense; 161 u32 instruction_sigp_sense;
159 u32 instruction_sigp_sense_running; 162 u32 instruction_sigp_sense_running;
160 u32 instruction_sigp_external_call; 163 u32 instruction_sigp_external_call;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5d1f950704dc..38149b63dc44 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk) 49 struct task_struct *tsk)
50{ 50{
51 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 51 int cpu = smp_processor_id();
52 update_mm(next, tsk); 52
53 if (prev == next)
54 return;
55 if (atomic_inc_return(&next->context.attach_count) >> 16) {
56 /* Delay update_mm until all TLB flushes are done. */
57 set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
58 } else {
59 cpumask_set_cpu(cpu, mm_cpumask(next));
60 update_mm(next, tsk);
61 if (next->context.flush_mm)
62 /* Flush pending TLBs */
63 __tlb_flush_mm(next);
64 }
53 atomic_dec(&prev->context.attach_count); 65 atomic_dec(&prev->context.attach_count);
54 WARN_ON(atomic_read(&prev->context.attach_count) < 0); 66 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
55 atomic_inc(&next->context.attach_count); 67}
56 /* Check for TLBs not flushed yet */ 68
57 __tlb_flush_mm_lazy(next); 69#define finish_arch_post_lock_switch finish_arch_post_lock_switch
70static inline void finish_arch_post_lock_switch(void)
71{
72 struct task_struct *tsk = current;
73 struct mm_struct *mm = tsk->mm;
74
75 if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
76 return;
77 preempt_disable();
78 clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
79 while (atomic_read(&mm->context.attach_count) >> 16)
80 cpu_relax();
81
82 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
83 update_mm(mm, tsk);
84 if (mm->context.flush_mm)
85 __tlb_flush_mm(mm);
86 preempt_enable();
58} 87}
59 88
60#define enter_lazy_tlb(mm,tsk) do { } while (0) 89#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index e1408ddb94f8..884017cbfa9f 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,6 +22,7 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *);
24 24
25void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long);
25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 26int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
26 unsigned long key, bool nq); 27 unsigned long key, bool nq);
27 28
@@ -91,11 +92,22 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
91static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 92static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
92{ 93{
93 unsigned long *table = crst_table_alloc(mm); 94 unsigned long *table = crst_table_alloc(mm);
94 if (table) 95
95 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 96 if (!table)
97 return NULL;
98 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
99 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
100 crst_table_free(mm, table);
101 return NULL;
102 }
96 return (pmd_t *) table; 103 return (pmd_t *) table;
97} 104}
98#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 105
106static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
107{
108 pgtable_pmd_page_dtor(virt_to_page(pmd));
109 crst_table_free(mm, (unsigned long *) pmd);
110}
99 111
100static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 112static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
101{ 113{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 2204400d0bd5..1ab75eaacbd4 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -229,6 +229,7 @@ extern unsigned long MODULES_END;
229#define _PAGE_READ 0x010 /* SW pte read bit */ 229#define _PAGE_READ 0x010 /* SW pte read bit */
230#define _PAGE_WRITE 0x020 /* SW pte write bit */ 230#define _PAGE_WRITE 0x020 /* SW pte write bit */
231#define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 231#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
232#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
232#define __HAVE_ARCH_PTE_SPECIAL 233#define __HAVE_ARCH_PTE_SPECIAL
233 234
234/* Set of bits not changed in pte_modify */ 235/* Set of bits not changed in pte_modify */
@@ -394,6 +395,12 @@ extern unsigned long MODULES_END;
394 395
395#endif /* CONFIG_64BIT */ 396#endif /* CONFIG_64BIT */
396 397
398/* Guest Page State used for virtualization */
399#define _PGSTE_GPS_ZERO 0x0000000080000000UL
400#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
401#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
402#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
403
397/* 404/*
398 * A user page table pointer has the space-switch-event bit, the 405 * A user page table pointer has the space-switch-event bit, the
399 * private-space-control bit and the storage-alteration-event-control 406 * private-space-control bit and the storage-alteration-event-control
@@ -617,6 +624,14 @@ static inline int pte_none(pte_t pte)
617 return pte_val(pte) == _PAGE_INVALID; 624 return pte_val(pte) == _PAGE_INVALID;
618} 625}
619 626
627static inline int pte_swap(pte_t pte)
628{
629 /* Bit pattern: (pte & 0x603) == 0x402 */
630 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
631 _PAGE_TYPE | _PAGE_PRESENT))
632 == (_PAGE_INVALID | _PAGE_TYPE);
633}
634
620static inline int pte_file(pte_t pte) 635static inline int pte_file(pte_t pte)
621{ 636{
622 /* Bit pattern: (pte & 0x601) == 0x600 */ 637 /* Bit pattern: (pte & 0x601) == 0x600 */
@@ -821,20 +836,20 @@ unsigned long gmap_translate(unsigned long address, struct gmap *);
821unsigned long __gmap_fault(unsigned long address, struct gmap *); 836unsigned long __gmap_fault(unsigned long address, struct gmap *);
822unsigned long gmap_fault(unsigned long address, struct gmap *); 837unsigned long gmap_fault(unsigned long address, struct gmap *);
823void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 838void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
839void __gmap_zap(unsigned long address, struct gmap *);
824 840
825void gmap_register_ipte_notifier(struct gmap_notifier *); 841void gmap_register_ipte_notifier(struct gmap_notifier *);
826void gmap_unregister_ipte_notifier(struct gmap_notifier *); 842void gmap_unregister_ipte_notifier(struct gmap_notifier *);
827int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); 843int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
828void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); 844void gmap_do_ipte_notify(struct mm_struct *, pte_t *);
829 845
830static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, 846static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
831 unsigned long addr,
832 pte_t *ptep, pgste_t pgste) 847 pte_t *ptep, pgste_t pgste)
833{ 848{
834#ifdef CONFIG_PGSTE 849#ifdef CONFIG_PGSTE
835 if (pgste_val(pgste) & PGSTE_IN_BIT) { 850 if (pgste_val(pgste) & PGSTE_IN_BIT) {
836 pgste_val(pgste) &= ~PGSTE_IN_BIT; 851 pgste_val(pgste) &= ~PGSTE_IN_BIT;
837 gmap_do_ipte_notify(mm, addr, ptep); 852 gmap_do_ipte_notify(mm, ptep);
838 } 853 }
839#endif 854#endif
840 return pgste; 855 return pgste;
@@ -852,6 +867,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
852 867
853 if (mm_has_pgste(mm)) { 868 if (mm_has_pgste(mm)) {
854 pgste = pgste_get_lock(ptep); 869 pgste = pgste_get_lock(ptep);
870 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
855 pgste_set_key(ptep, pgste, entry); 871 pgste_set_key(ptep, pgste, entry);
856 pgste_set_pte(ptep, entry); 872 pgste_set_pte(ptep, entry);
857 pgste_set_unlock(ptep, pgste); 873 pgste_set_unlock(ptep, pgste);
@@ -881,6 +897,12 @@ static inline int pte_young(pte_t pte)
881 return (pte_val(pte) & _PAGE_YOUNG) != 0; 897 return (pte_val(pte) & _PAGE_YOUNG) != 0;
882} 898}
883 899
900#define __HAVE_ARCH_PTE_UNUSED
901static inline int pte_unused(pte_t pte)
902{
903 return pte_val(pte) & _PAGE_UNUSED;
904}
905
884/* 906/*
885 * pgd/pmd/pte modification functions 907 * pgd/pmd/pte modification functions
886 */ 908 */
@@ -1034,30 +1056,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
1034 1056
1035static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 1057static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1036{ 1058{
1037 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 1059 unsigned long pto = (unsigned long) ptep;
1060
1038#ifndef CONFIG_64BIT 1061#ifndef CONFIG_64BIT
1039 /* pto must point to the start of the segment table */ 1062 /* pto in ESA mode must point to the start of the segment table */
1040 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 1063 pto &= 0x7ffffc00;
1041#else
1042 /* ipte in zarch mode can do the math */
1043 pte_t *pto = ptep;
1044#endif 1064#endif
1045 asm volatile( 1065 /* Invalidation + global TLB flush for the pte */
1046 " ipte %2,%3" 1066 asm volatile(
1047 : "=m" (*ptep) : "m" (*ptep), 1067 " ipte %2,%3"
1048 "a" (pto), "a" (address)); 1068 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1049 } 1069}
1070
1071static inline void ptep_flush_direct(struct mm_struct *mm,
1072 unsigned long address, pte_t *ptep)
1073{
1074 if (pte_val(*ptep) & _PAGE_INVALID)
1075 return;
1076 __ptep_ipte(address, ptep);
1050} 1077}
1051 1078
1052static inline void ptep_flush_lazy(struct mm_struct *mm, 1079static inline void ptep_flush_lazy(struct mm_struct *mm,
1053 unsigned long address, pte_t *ptep) 1080 unsigned long address, pte_t *ptep)
1054{ 1081{
1055 int active = (mm == current->active_mm) ? 1 : 0; 1082 int active, count;
1056 1083
1057 if (atomic_read(&mm->context.attach_count) > active) 1084 if (pte_val(*ptep) & _PAGE_INVALID)
1058 __ptep_ipte(address, ptep); 1085 return;
1059 else 1086 active = (mm == current->active_mm) ? 1 : 0;
1087 count = atomic_add_return(0x10000, &mm->context.attach_count);
1088 if ((count & 0xffff) <= active) {
1089 pte_val(*ptep) |= _PAGE_INVALID;
1060 mm->context.flush_mm = 1; 1090 mm->context.flush_mm = 1;
1091 } else
1092 __ptep_ipte(address, ptep);
1093 atomic_sub(0x10000, &mm->context.attach_count);
1061} 1094}
1062 1095
1063#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1096#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -1070,11 +1103,11 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1070 1103
1071 if (mm_has_pgste(vma->vm_mm)) { 1104 if (mm_has_pgste(vma->vm_mm)) {
1072 pgste = pgste_get_lock(ptep); 1105 pgste = pgste_get_lock(ptep);
1073 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); 1106 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1074 } 1107 }
1075 1108
1076 pte = *ptep; 1109 pte = *ptep;
1077 __ptep_ipte(addr, ptep); 1110 ptep_flush_direct(vma->vm_mm, addr, ptep);
1078 young = pte_young(pte); 1111 young = pte_young(pte);
1079 pte = pte_mkold(pte); 1112 pte = pte_mkold(pte);
1080 1113
@@ -1116,7 +1149,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1116 1149
1117 if (mm_has_pgste(mm)) { 1150 if (mm_has_pgste(mm)) {
1118 pgste = pgste_get_lock(ptep); 1151 pgste = pgste_get_lock(ptep);
1119 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1152 pgste = pgste_ipte_notify(mm, ptep, pgste);
1120 } 1153 }
1121 1154
1122 pte = *ptep; 1155 pte = *ptep;
@@ -1140,12 +1173,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1140 1173
1141 if (mm_has_pgste(mm)) { 1174 if (mm_has_pgste(mm)) {
1142 pgste = pgste_get_lock(ptep); 1175 pgste = pgste_get_lock(ptep);
1143 pgste_ipte_notify(mm, address, ptep, pgste); 1176 pgste_ipte_notify(mm, ptep, pgste);
1144 } 1177 }
1145 1178
1146 pte = *ptep; 1179 pte = *ptep;
1147 ptep_flush_lazy(mm, address, ptep); 1180 ptep_flush_lazy(mm, address, ptep);
1148 pte_val(*ptep) |= _PAGE_INVALID;
1149 1181
1150 if (mm_has_pgste(mm)) { 1182 if (mm_has_pgste(mm)) {
1151 pgste = pgste_update_all(&pte, pgste); 1183 pgste = pgste_update_all(&pte, pgste);
@@ -1178,14 +1210,17 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1178 1210
1179 if (mm_has_pgste(vma->vm_mm)) { 1211 if (mm_has_pgste(vma->vm_mm)) {
1180 pgste = pgste_get_lock(ptep); 1212 pgste = pgste_get_lock(ptep);
1181 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); 1213 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1182 } 1214 }
1183 1215
1184 pte = *ptep; 1216 pte = *ptep;
1185 __ptep_ipte(address, ptep); 1217 ptep_flush_direct(vma->vm_mm, address, ptep);
1186 pte_val(*ptep) = _PAGE_INVALID; 1218 pte_val(*ptep) = _PAGE_INVALID;
1187 1219
1188 if (mm_has_pgste(vma->vm_mm)) { 1220 if (mm_has_pgste(vma->vm_mm)) {
1221 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1222 _PGSTE_GPS_USAGE_UNUSED)
1223 pte_val(pte) |= _PAGE_UNUSED;
1189 pgste = pgste_update_all(&pte, pgste); 1224 pgste = pgste_update_all(&pte, pgste);
1190 pgste_set_unlock(ptep, pgste); 1225 pgste_set_unlock(ptep, pgste);
1191 } 1226 }
@@ -1209,7 +1244,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1209 1244
1210 if (!full && mm_has_pgste(mm)) { 1245 if (!full && mm_has_pgste(mm)) {
1211 pgste = pgste_get_lock(ptep); 1246 pgste = pgste_get_lock(ptep);
1212 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1247 pgste = pgste_ipte_notify(mm, ptep, pgste);
1213 } 1248 }
1214 1249
1215 pte = *ptep; 1250 pte = *ptep;
@@ -1234,7 +1269,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1234 if (pte_write(pte)) { 1269 if (pte_write(pte)) {
1235 if (mm_has_pgste(mm)) { 1270 if (mm_has_pgste(mm)) {
1236 pgste = pgste_get_lock(ptep); 1271 pgste = pgste_get_lock(ptep);
1237 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1272 pgste = pgste_ipte_notify(mm, ptep, pgste);
1238 } 1273 }
1239 1274
1240 ptep_flush_lazy(mm, address, ptep); 1275 ptep_flush_lazy(mm, address, ptep);
@@ -1260,10 +1295,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1260 return 0; 1295 return 0;
1261 if (mm_has_pgste(vma->vm_mm)) { 1296 if (mm_has_pgste(vma->vm_mm)) {
1262 pgste = pgste_get_lock(ptep); 1297 pgste = pgste_get_lock(ptep);
1263 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); 1298 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1264 } 1299 }
1265 1300
1266 __ptep_ipte(address, ptep); 1301 ptep_flush_direct(vma->vm_mm, address, ptep);
1267 1302
1268 if (mm_has_pgste(vma->vm_mm)) { 1303 if (mm_has_pgste(vma->vm_mm)) {
1269 pgste_set_pte(ptep, entry); 1304 pgste_set_pte(ptep, entry);
@@ -1447,12 +1482,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
1447static inline void pmdp_flush_lazy(struct mm_struct *mm, 1482static inline void pmdp_flush_lazy(struct mm_struct *mm,
1448 unsigned long address, pmd_t *pmdp) 1483 unsigned long address, pmd_t *pmdp)
1449{ 1484{
1450 int active = (mm == current->active_mm) ? 1 : 0; 1485 int active, count;
1451 1486
1452 if ((atomic_read(&mm->context.attach_count) & 0xffff) > active) 1487 active = (mm == current->active_mm) ? 1 : 0;
1453 __pmd_idte(address, pmdp); 1488 count = atomic_add_return(0x10000, &mm->context.attach_count);
1454 else 1489 if ((count & 0xffff) <= active) {
1490 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1455 mm->context.flush_mm = 1; 1491 mm->context.flush_mm = 1;
1492 } else
1493 __pmd_idte(address, pmdp);
1494 atomic_sub(0x10000, &mm->context.attach_count);
1456} 1495}
1457 1496
1458#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1497#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 9c82cebddabd..f4783c0b7b43 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -83,6 +83,7 @@ struct per_struct_kernel {
83 * These are defined as per linux/ptrace.h, which see. 83 * These are defined as per linux/ptrace.h, which see.
84 */ 84 */
85#define arch_has_single_step() (1) 85#define arch_has_single_step() (1)
86#define arch_has_block_step() (1)
86 87
87#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 88#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
88#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 89#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index abaca2275c7a..2f5e9932b4de 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -46,6 +46,7 @@ int sclp_cpu_configure(u8 cpu);
46int sclp_cpu_deconfigure(u8 cpu); 46int sclp_cpu_deconfigure(u8 cpu);
47unsigned long long sclp_get_rnmax(void); 47unsigned long long sclp_get_rnmax(void);
48unsigned long long sclp_get_rzm(void); 48unsigned long long sclp_get_rzm(void);
49unsigned int sclp_get_max_cpu(void);
49int sclp_sdias_blk_count(void); 50int sclp_sdias_blk_count(void);
50int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 51int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
51int sclp_chp_configure(struct chp_id chpid); 52int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 94cfbe442f12..406f3a1e63ef 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -59,7 +59,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
59#define MACHINE_FLAG_DIAG44 (1UL << 4) 59#define MACHINE_FLAG_DIAG44 (1UL << 4)
60#define MACHINE_FLAG_IDTE (1UL << 5) 60#define MACHINE_FLAG_IDTE (1UL << 5)
61#define MACHINE_FLAG_DIAG9C (1UL << 6) 61#define MACHINE_FLAG_DIAG9C (1UL << 6)
62#define MACHINE_FLAG_MVCOS (1UL << 7)
63#define MACHINE_FLAG_KVM (1UL << 8) 62#define MACHINE_FLAG_KVM (1UL << 8)
64#define MACHINE_FLAG_ESOP (1UL << 9) 63#define MACHINE_FLAG_ESOP (1UL << 9)
65#define MACHINE_FLAG_EDAT1 (1UL << 10) 64#define MACHINE_FLAG_EDAT1 (1UL << 10)
@@ -85,7 +84,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
85#define MACHINE_HAS_IDTE (0) 84#define MACHINE_HAS_IDTE (0)
86#define MACHINE_HAS_DIAG44 (1) 85#define MACHINE_HAS_DIAG44 (1)
87#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) 86#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
88#define MACHINE_HAS_MVCOS (0)
89#define MACHINE_HAS_EDAT1 (0) 87#define MACHINE_HAS_EDAT1 (0)
90#define MACHINE_HAS_EDAT2 (0) 88#define MACHINE_HAS_EDAT2 (0)
91#define MACHINE_HAS_LPP (0) 89#define MACHINE_HAS_LPP (0)
@@ -98,7 +96,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
98#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 96#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
99#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) 97#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
100#define MACHINE_HAS_MVPG (1) 98#define MACHINE_HAS_MVPG (1)
101#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
102#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 99#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
103#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 100#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
104#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 101#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 10e0fcd3633d..3ccd71b90345 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void)
81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
82#define TIF_SIGPENDING 2 /* signal pending */ 82#define TIF_SIGPENDING 2 /* signal pending */
83#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 83#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
84#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
84#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ 85#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
85#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 86#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
86#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 87#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
@@ -91,11 +92,13 @@ static inline struct thread_info *current_thread_info(void)
91#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 92#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
92#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 93#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
93#define TIF_SINGLE_STEP 20 /* This task is single stepped */ 94#define TIF_SINGLE_STEP 20 /* This task is single stepped */
95#define TIF_BLOCK_STEP 21 /* This task is block stepped */
94 96
95#define _TIF_SYSCALL (1<<TIF_SYSCALL) 97#define _TIF_SYSCALL (1<<TIF_SYSCALL)
96#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 98#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
97#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 99#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
98#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
101#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
99#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) 102#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
100#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 103#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
101#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 104#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 79330af9a5f8..4133b3f72fb0 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
92#define ARCH_HAS_SORT_EXTABLE 92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE 93#define ARCH_HAS_SEARCH_EXTABLE
94 94
95struct uaccess_ops { 95int __handle_fault(unsigned long, unsigned long, int);
96 size_t (*copy_from_user)(size_t, const void __user *, void *);
97 size_t (*copy_to_user)(size_t, void __user *, const void *);
98 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
99 size_t (*clear_user)(size_t, void __user *);
100 size_t (*strnlen_user)(size_t, const char __user *);
101 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
102 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
103 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
104};
105 96
106extern struct uaccess_ops uaccess; 97/**
107extern struct uaccess_ops uaccess_mvcos; 98 * __copy_from_user: - Copy a block of data from user space, with less checking.
108extern struct uaccess_ops uaccess_pt; 99 * @to: Destination address, in kernel space.
100 * @from: Source address, in user space.
101 * @n: Number of bytes to copy.
102 *
103 * Context: User context only. This function may sleep.
104 *
105 * Copy data from user space to kernel space. Caller must check
106 * the specified block with access_ok() before calling this function.
107 *
108 * Returns number of bytes that could not be copied.
109 * On success, this will be zero.
110 *
111 * If some data could not be copied, this function will pad the copied
112 * data to the requested size using zero bytes.
113 */
114unsigned long __must_check __copy_from_user(void *to, const void __user *from,
115 unsigned long n);
116
117/**
118 * __copy_to_user: - Copy a block of data into user space, with less checking.
119 * @to: Destination address, in user space.
120 * @from: Source address, in kernel space.
121 * @n: Number of bytes to copy.
122 *
123 * Context: User context only. This function may sleep.
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 *
128 * Returns number of bytes that could not be copied.
129 * On success, this will be zero.
130 */
131unsigned long __must_check __copy_to_user(void __user *to, const void *from,
132 unsigned long n);
109 133
110extern int __handle_fault(unsigned long, unsigned long, int); 134#define __copy_to_user_inatomic __copy_to_user
135#define __copy_from_user_inatomic __copy_from_user
111 136
112static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 137static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
113{ 138{
114 size = uaccess.copy_to_user(size, ptr, x); 139 size = __copy_to_user(ptr, x, size);
115 return size ? -EFAULT : size; 140 return size ? -EFAULT : 0;
116} 141}
117 142
118static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 143static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
119{ 144{
120 size = uaccess.copy_from_user(size, ptr, x); 145 size = __copy_from_user(x, ptr, size);
121 return size ? -EFAULT : size; 146 return size ? -EFAULT : 0;
122} 147}
123 148
124/* 149/*
@@ -135,8 +160,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
135 case 2: \ 160 case 2: \
136 case 4: \ 161 case 4: \
137 case 8: \ 162 case 8: \
138 __pu_err = __put_user_fn(sizeof (*(ptr)), \ 163 __pu_err = __put_user_fn(&__x, ptr, \
139 ptr, &__x); \ 164 sizeof(*(ptr))); \
140 break; \ 165 break; \
141 default: \ 166 default: \
142 __put_user_bad(); \ 167 __put_user_bad(); \
@@ -152,7 +177,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
152}) 177})
153 178
154 179
155extern int __put_user_bad(void) __attribute__((noreturn)); 180int __put_user_bad(void) __attribute__((noreturn));
156 181
157#define __get_user(x, ptr) \ 182#define __get_user(x, ptr) \
158({ \ 183({ \
@@ -161,29 +186,29 @@ extern int __put_user_bad(void) __attribute__((noreturn));
161 switch (sizeof(*(ptr))) { \ 186 switch (sizeof(*(ptr))) { \
162 case 1: { \ 187 case 1: { \
163 unsigned char __x; \ 188 unsigned char __x; \
164 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 189 __gu_err = __get_user_fn(&__x, ptr, \
165 ptr, &__x); \ 190 sizeof(*(ptr))); \
166 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 191 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
167 break; \ 192 break; \
168 }; \ 193 }; \
169 case 2: { \ 194 case 2: { \
170 unsigned short __x; \ 195 unsigned short __x; \
171 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 196 __gu_err = __get_user_fn(&__x, ptr, \
172 ptr, &__x); \ 197 sizeof(*(ptr))); \
173 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 198 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
174 break; \ 199 break; \
175 }; \ 200 }; \
176 case 4: { \ 201 case 4: { \
177 unsigned int __x; \ 202 unsigned int __x; \
178 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 203 __gu_err = __get_user_fn(&__x, ptr, \
179 ptr, &__x); \ 204 sizeof(*(ptr))); \
180 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 205 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
181 break; \ 206 break; \
182 }; \ 207 }; \
183 case 8: { \ 208 case 8: { \
184 unsigned long long __x; \ 209 unsigned long long __x; \
185 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 210 __gu_err = __get_user_fn(&__x, ptr, \
186 ptr, &__x); \ 211 sizeof(*(ptr))); \
187 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 212 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
188 break; \ 213 break; \
189 }; \ 214 }; \
@@ -200,35 +225,12 @@ extern int __put_user_bad(void) __attribute__((noreturn));
200 __get_user(x, ptr); \ 225 __get_user(x, ptr); \
201}) 226})
202 227
203extern int __get_user_bad(void) __attribute__((noreturn)); 228int __get_user_bad(void) __attribute__((noreturn));
204 229
205#define __put_user_unaligned __put_user 230#define __put_user_unaligned __put_user
206#define __get_user_unaligned __get_user 231#define __get_user_unaligned __get_user
207 232
208/** 233/**
209 * __copy_to_user: - Copy a block of data into user space, with less checking.
210 * @to: Destination address, in user space.
211 * @from: Source address, in kernel space.
212 * @n: Number of bytes to copy.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * Copy data from kernel space to user space. Caller must check
217 * the specified block with access_ok() before calling this function.
218 *
219 * Returns number of bytes that could not be copied.
220 * On success, this will be zero.
221 */
222static inline unsigned long __must_check
223__copy_to_user(void __user *to, const void *from, unsigned long n)
224{
225 return uaccess.copy_to_user(n, to, from);
226}
227
228#define __copy_to_user_inatomic __copy_to_user
229#define __copy_from_user_inatomic __copy_from_user
230
231/**
232 * copy_to_user: - Copy a block of data into user space. 234 * copy_to_user: - Copy a block of data into user space.
233 * @to: Destination address, in user space. 235 * @to: Destination address, in user space.
234 * @from: Source address, in kernel space. 236 * @from: Source address, in kernel space.
@@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
248 return __copy_to_user(to, from, n); 250 return __copy_to_user(to, from, n);
249} 251}
250 252
251/** 253void copy_from_user_overflow(void)
252 * __copy_from_user: - Copy a block of data from user space, with less checking.
253 * @to: Destination address, in kernel space.
254 * @from: Source address, in user space.
255 * @n: Number of bytes to copy.
256 *
257 * Context: User context only. This function may sleep.
258 *
259 * Copy data from user space to kernel space. Caller must check
260 * the specified block with access_ok() before calling this function.
261 *
262 * Returns number of bytes that could not be copied.
263 * On success, this will be zero.
264 *
265 * If some data could not be copied, this function will pad the copied
266 * data to the requested size using zero bytes.
267 */
268static inline unsigned long __must_check
269__copy_from_user(void *to, const void __user *from, unsigned long n)
270{
271 return uaccess.copy_from_user(n, from, to);
272}
273
274extern void copy_from_user_overflow(void)
275#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 254#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
276__compiletime_warning("copy_from_user() buffer size is not provably correct") 255__compiletime_warning("copy_from_user() buffer size is not provably correct")
277#endif 256#endif
@@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
306 return __copy_from_user(to, from, n); 285 return __copy_from_user(to, from, n);
307} 286}
308 287
309static inline unsigned long __must_check 288unsigned long __must_check
310__copy_in_user(void __user *to, const void __user *from, unsigned long n) 289__copy_in_user(void __user *to, const void __user *from, unsigned long n);
311{
312 return uaccess.copy_in_user(n, to, from);
313}
314 290
315static inline unsigned long __must_check 291static inline unsigned long __must_check
316copy_in_user(void __user *to, const void __user *from, unsigned long n) 292copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
322/* 298/*
323 * Copy a null terminated string from userspace. 299 * Copy a null terminated string from userspace.
324 */ 300 */
301
302long __strncpy_from_user(char *dst, const char __user *src, long count);
303
325static inline long __must_check 304static inline long __must_check
326strncpy_from_user(char *dst, const char __user *src, long count) 305strncpy_from_user(char *dst, const char __user *src, long count)
327{ 306{
328 might_fault(); 307 might_fault();
329 return uaccess.strncpy_from_user(count, src, dst); 308 return __strncpy_from_user(dst, src, count);
330} 309}
331 310
332static inline unsigned long 311unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
333strnlen_user(const char __user * src, unsigned long n) 312
313static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
334{ 314{
335 might_fault(); 315 might_fault();
336 return uaccess.strnlen_user(n, src); 316 return __strnlen_user(src, n);
337} 317}
338 318
339/** 319/**
@@ -355,21 +335,14 @@ strnlen_user(const char __user * src, unsigned long n)
355/* 335/*
356 * Zero Userspace 336 * Zero Userspace
357 */ 337 */
338unsigned long __must_check __clear_user(void __user *to, unsigned long size);
358 339
359static inline unsigned long __must_check 340static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
360__clear_user(void __user *to, unsigned long n)
361{
362 return uaccess.clear_user(n, to);
363}
364
365static inline unsigned long __must_check
366clear_user(void __user *to, unsigned long n)
367{ 341{
368 might_fault(); 342 might_fault();
369 return uaccess.clear_user(n, to); 343 return __clear_user(to, n);
370} 344}
371 345
372extern int copy_to_user_real(void __user *dest, void *src, size_t count); 346int copy_to_user_real(void __user *dest, void *src, unsigned long count);
373extern int copy_from_user_real(void *dest, void __user *src, size_t count);
374 347
375#endif /* __S390_UACCESS_H */ 348#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 7e0b498a2c2b..a150f4fabe43 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -403,6 +403,12 @@ typedef struct
403#define PTRACE_TE_ABORT_RAND 0x5011 403#define PTRACE_TE_ABORT_RAND 0x5011
404 404
405/* 405/*
406 * The numbers chosen here are somewhat arbitrary but absolutely MUST
407 * not overlap with any of the number assigned in <linux/ptrace.h>.
408 */
409#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
410
411/*
406 * PT_PROT definition is loosely based on hppa bsd definition in 412 * PT_PROT definition is loosely based on hppa bsd definition in
407 * gdb/hppab-nat.c 413 * gdb/hppab-nat.c
408 */ 414 */