aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include')
-rw-r--r--arch/s390/include/asm/Kbuild5
-rw-r--r--arch/s390/include/asm/bitops.h8
-rw-r--r--arch/s390/include/asm/ccwdev.h4
-rw-r--r--arch/s390/include/asm/ccwgroup.h1
-rw-r--r--arch/s390/include/asm/checksum.h11
-rw-r--r--arch/s390/include/asm/compat.h6
-rw-r--r--arch/s390/include/asm/futex.h13
-rw-r--r--arch/s390/include/asm/kvm_host.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h39
-rw-r--r--arch/s390/include/asm/pgalloc.h18
-rw-r--r--arch/s390/include/asm/pgtable.h105
-rw-r--r--arch/s390/include/asm/ptrace.h1
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/thread_info.h3
-rw-r--r--arch/s390/include/asm/uaccess.h171
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h6
17 files changed, 231 insertions, 169 deletions
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 8386a4a1f19a..57892a8a9055 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -1,6 +1,7 @@
1 1
2 2
3generic-y += clkdev.h 3generic-y += clkdev.h
4generic-y += trace_clock.h
5generic-y += preempt.h
6generic-y += hash.h 4generic-y += hash.h
5generic-y += mcs_spinlock.h
6generic-y += preempt.h
7generic-y += trace_clock.h
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 6e6ad0680829..ec5ef891db6b 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -13,9 +13,9 @@
13 * 13 *
14 * The bitop functions are defined to work on unsigned longs, so for an 14 * The bitop functions are defined to work on unsigned longs, so for an
15 * s390x system the bits end up numbered: 15 * s390x system the bits end up numbered:
16 * |63..............0|127............64|191...........128|255...........196| 16 * |63..............0|127............64|191...........128|255...........192|
17 * and on s390: 17 * and on s390:
18 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| 18 * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
19 * 19 *
20 * There are a few little-endian macros used mostly for filesystem 20 * There are a few little-endian macros used mostly for filesystem
21 * bitmaps, these work on similar bit arrays layouts, but 21 * bitmaps, these work on similar bit arrays layouts, but
@@ -30,7 +30,7 @@
30 * on an s390x system the bits are numbered: 30 * on an s390x system the bits are numbered:
31 * |0..............63|64............127|128...........191|192...........255| 31 * |0..............63|64............127|128...........191|192...........255|
32 * and on s390: 32 * and on s390:
33 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| 33 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
34 * 34 *
35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit 35 * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
36 * number field needs to be reversed compared to the LSB0 encoded bit 36 * number field needs to be reversed compared to the LSB0 encoded bit
@@ -304,7 +304,7 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
304 * On an s390x system the bits are numbered: 304 * On an s390x system the bits are numbered:
305 * |0..............63|64............127|128...........191|192...........255| 305 * |0..............63|64............127|128...........191|192...........255|
306 * and on s390: 306 * and on s390:
307 * |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255| 307 * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255|
308 */ 308 */
309unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); 309unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
310unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, 310unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index f201af8be580..a9c2c0686177 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -219,7 +219,9 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *);
219#define to_ccwdev(n) container_of(n, struct ccw_device, dev) 219#define to_ccwdev(n) container_of(n, struct ccw_device, dev)
220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) 220#define to_ccwdrv(n) container_of(n, struct ccw_driver, driver)
221 221
222extern struct ccw_device *ccw_device_probe_console(void); 222extern struct ccw_device *ccw_device_create_console(struct ccw_driver *);
223extern void ccw_device_destroy_console(struct ccw_device *);
224extern int ccw_device_enable_console(struct ccw_device *);
223extern void ccw_device_wait_idle(struct ccw_device *); 225extern void ccw_device_wait_idle(struct ccw_device *);
224extern int ccw_device_force_console(struct ccw_device *); 226extern int ccw_device_force_console(struct ccw_device *);
225 227
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 23723ce5ca7a..6e670f88d125 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -23,6 +23,7 @@ struct ccwgroup_device {
23 unsigned int count; 23 unsigned int count;
24 struct device dev; 24 struct device dev;
25 struct ccw_device *cdev[0]; 25 struct ccw_device *cdev[0];
26 struct work_struct ungroup_work;
26}; 27};
27 28
28/** 29/**
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index 4f57a4f3909a..740364856355 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -44,22 +44,15 @@ csum_partial(const void *buff, int len, __wsum sum)
44 * here even more important to align src and dst on a 32-bit (or even 44 * here even more important to align src and dst on a 32-bit (or even
45 * better 64-bit) boundary 45 * better 64-bit) boundary
46 * 46 *
47 * Copy from userspace and compute checksum. If we catch an exception 47 * Copy from userspace and compute checksum.
48 * then zero the rest of the buffer.
49 */ 48 */
50static inline __wsum 49static inline __wsum
51csum_partial_copy_from_user(const void __user *src, void *dst, 50csum_partial_copy_from_user(const void __user *src, void *dst,
52 int len, __wsum sum, 51 int len, __wsum sum,
53 int *err_ptr) 52 int *err_ptr)
54{ 53{
55 int missing; 54 if (unlikely(copy_from_user(dst, src, len)))
56
57 missing = copy_from_user(dst, src, len);
58 if (missing) {
59 memset(dst + len - missing, 0, missing);
60 *err_ptr = -EFAULT; 55 *err_ptr = -EFAULT;
61 }
62
63 return csum_partial(dst, len, sum); 56 return csum_partial(dst, len, sum);
64} 57}
65 58
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 5d7e8cf83bd6..d350ed9d0fbb 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -8,7 +8,11 @@
8#include <linux/thread_info.h> 8#include <linux/thread_info.h>
9 9
10#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64)) 10#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p(typeof(0?(t)0:0ULL), u64))
11#define __SC_DELOUSE(t,v) (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)) 11
12#define __SC_DELOUSE(t,v) ({ \
13 BUILD_BUG_ON(sizeof(t) > 4 && !__TYPE_IS_PTR(t)); \
14 (t)(__TYPE_IS_PTR(t) ? ((v) & 0x7fffffff) : (v)); \
15})
12 16
13#define PSW32_MASK_PER 0x40000000UL 17#define PSW32_MASK_PER 0x40000000UL
14#define PSW32_MASK_DAT 0x04000000UL 18#define PSW32_MASK_DAT 0x04000000UL
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 51bcaa0fdeef..fda46bd38c99 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -5,7 +5,10 @@
5#include <linux/uaccess.h> 5#include <linux/uaccess.h>
6#include <asm/errno.h> 6#include <asm/errno.h>
7 7
8static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) 8int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
9int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
10
11static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
9{ 12{
10 int op = (encoded_op >> 28) & 7; 13 int op = (encoded_op >> 28) & 7;
11 int cmp = (encoded_op >> 24) & 15; 14 int cmp = (encoded_op >> 24) & 15;
@@ -17,7 +20,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
17 oparg = 1 << oparg; 20 oparg = 1 << oparg;
18 21
19 pagefault_disable(); 22 pagefault_disable();
20 ret = uaccess.futex_atomic_op(op, uaddr, oparg, &oldval); 23 ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
21 pagefault_enable(); 24 pagefault_enable();
22 25
23 if (!ret) { 26 if (!ret) {
@@ -34,10 +37,4 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
34 return ret; 37 return ret;
35} 38}
36 39
37static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
38 u32 oldval, u32 newval)
39{
40 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
41}
42
43#endif /* _ASM_S390_FUTEX_H */ 40#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 68897fc65950..154b60089be9 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -116,7 +116,9 @@ struct kvm_s390_sie_block {
116 __u64 gbea; /* 0x0180 */ 116 __u64 gbea; /* 0x0180 */
117 __u8 reserved188[24]; /* 0x0188 */ 117 __u8 reserved188[24]; /* 0x0188 */
118 __u32 fac; /* 0x01a0 */ 118 __u32 fac; /* 0x01a0 */
119 __u8 reserved1a4[58]; /* 0x01a4 */ 119 __u8 reserved1a4[20]; /* 0x01a4 */
120 __u64 cbrlo; /* 0x01b8 */
121 __u8 reserved1c0[30]; /* 0x01c0 */
120 __u64 pp; /* 0x01de */ 122 __u64 pp; /* 0x01de */
121 __u8 reserved1e6[2]; /* 0x01e6 */ 123 __u8 reserved1e6[2]; /* 0x01e6 */
122 __u64 itdba; /* 0x01e8 */ 124 __u64 itdba; /* 0x01e8 */
@@ -167,6 +169,7 @@ struct kvm_vcpu_stat {
167 u32 instruction_stsi; 169 u32 instruction_stsi;
168 u32 instruction_stfl; 170 u32 instruction_stfl;
169 u32 instruction_tprot; 171 u32 instruction_tprot;
172 u32 instruction_essa;
170 u32 instruction_sigp_sense; 173 u32 instruction_sigp_sense;
171 u32 instruction_sigp_sense_running; 174 u32 instruction_sigp_sense_running;
172 u32 instruction_sigp_external_call; 175 u32 instruction_sigp_external_call;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5d1f950704dc..38149b63dc44 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -48,13 +48,42 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 48static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk) 49 struct task_struct *tsk)
50{ 50{
51 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); 51 int cpu = smp_processor_id();
52 update_mm(next, tsk); 52
53 if (prev == next)
54 return;
55 if (atomic_inc_return(&next->context.attach_count) >> 16) {
56 /* Delay update_mm until all TLB flushes are done. */
57 set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
58 } else {
59 cpumask_set_cpu(cpu, mm_cpumask(next));
60 update_mm(next, tsk);
61 if (next->context.flush_mm)
62 /* Flush pending TLBs */
63 __tlb_flush_mm(next);
64 }
53 atomic_dec(&prev->context.attach_count); 65 atomic_dec(&prev->context.attach_count);
54 WARN_ON(atomic_read(&prev->context.attach_count) < 0); 66 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
55 atomic_inc(&next->context.attach_count); 67}
56 /* Check for TLBs not flushed yet */ 68
57 __tlb_flush_mm_lazy(next); 69#define finish_arch_post_lock_switch finish_arch_post_lock_switch
70static inline void finish_arch_post_lock_switch(void)
71{
72 struct task_struct *tsk = current;
73 struct mm_struct *mm = tsk->mm;
74
75 if (!test_tsk_thread_flag(tsk, TIF_TLB_WAIT))
76 return;
77 preempt_disable();
78 clear_tsk_thread_flag(tsk, TIF_TLB_WAIT);
79 while (atomic_read(&mm->context.attach_count) >> 16)
80 cpu_relax();
81
82 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
83 update_mm(mm, tsk);
84 if (mm->context.flush_mm)
85 __tlb_flush_mm(mm);
86 preempt_enable();
58} 87}
59 88
60#define enter_lazy_tlb(mm,tsk) do { } while (0) 89#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index e1408ddb94f8..884017cbfa9f 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,6 +22,7 @@ unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *);
24 24
25void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long);
25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 26int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
26 unsigned long key, bool nq); 27 unsigned long key, bool nq);
27 28
@@ -91,11 +92,22 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
91static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) 92static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
92{ 93{
93 unsigned long *table = crst_table_alloc(mm); 94 unsigned long *table = crst_table_alloc(mm);
94 if (table) 95
95 crst_table_init(table, _SEGMENT_ENTRY_EMPTY); 96 if (!table)
97 return NULL;
98 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
99 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
100 crst_table_free(mm, table);
101 return NULL;
102 }
96 return (pmd_t *) table; 103 return (pmd_t *) table;
97} 104}
98#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd) 105
106static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
107{
108 pgtable_pmd_page_dtor(virt_to_page(pmd));
109 crst_table_free(mm, (unsigned long *) pmd);
110}
99 111
100static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) 112static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
101{ 113{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 66101f6c6d81..50a75d96f939 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -229,6 +229,7 @@ extern unsigned long MODULES_END;
229#define _PAGE_READ 0x010 /* SW pte read bit */ 229#define _PAGE_READ 0x010 /* SW pte read bit */
230#define _PAGE_WRITE 0x020 /* SW pte write bit */ 230#define _PAGE_WRITE 0x020 /* SW pte write bit */
231#define _PAGE_SPECIAL 0x040 /* SW associated with special page */ 231#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
232#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
232#define __HAVE_ARCH_PTE_SPECIAL 233#define __HAVE_ARCH_PTE_SPECIAL
233 234
234/* Set of bits not changed in pte_modify */ 235/* Set of bits not changed in pte_modify */
@@ -394,6 +395,12 @@ extern unsigned long MODULES_END;
394 395
395#endif /* CONFIG_64BIT */ 396#endif /* CONFIG_64BIT */
396 397
398/* Guest Page State used for virtualization */
399#define _PGSTE_GPS_ZERO 0x0000000080000000UL
400#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
401#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
402#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
403
397/* 404/*
398 * A user page table pointer has the space-switch-event bit, the 405 * A user page table pointer has the space-switch-event bit, the
399 * private-space-control bit and the storage-alteration-event-control 406 * private-space-control bit and the storage-alteration-event-control
@@ -617,6 +624,14 @@ static inline int pte_none(pte_t pte)
617 return pte_val(pte) == _PAGE_INVALID; 624 return pte_val(pte) == _PAGE_INVALID;
618} 625}
619 626
627static inline int pte_swap(pte_t pte)
628{
629 /* Bit pattern: (pte & 0x603) == 0x402 */
630 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
631 _PAGE_TYPE | _PAGE_PRESENT))
632 == (_PAGE_INVALID | _PAGE_TYPE);
633}
634
620static inline int pte_file(pte_t pte) 635static inline int pte_file(pte_t pte)
621{ 636{
622 /* Bit pattern: (pte & 0x601) == 0x600 */ 637 /* Bit pattern: (pte & 0x601) == 0x600 */
@@ -823,20 +838,20 @@ unsigned long gmap_translate(unsigned long address, struct gmap *);
823unsigned long __gmap_fault(unsigned long address, struct gmap *); 838unsigned long __gmap_fault(unsigned long address, struct gmap *);
824unsigned long gmap_fault(unsigned long address, struct gmap *); 839unsigned long gmap_fault(unsigned long address, struct gmap *);
825void gmap_discard(unsigned long from, unsigned long to, struct gmap *); 840void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
841void __gmap_zap(unsigned long address, struct gmap *);
826 842
827void gmap_register_ipte_notifier(struct gmap_notifier *); 843void gmap_register_ipte_notifier(struct gmap_notifier *);
828void gmap_unregister_ipte_notifier(struct gmap_notifier *); 844void gmap_unregister_ipte_notifier(struct gmap_notifier *);
829int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len); 845int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
830void gmap_do_ipte_notify(struct mm_struct *, unsigned long addr, pte_t *); 846void gmap_do_ipte_notify(struct mm_struct *, pte_t *);
831 847
832static inline pgste_t pgste_ipte_notify(struct mm_struct *mm, 848static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
833 unsigned long addr,
834 pte_t *ptep, pgste_t pgste) 849 pte_t *ptep, pgste_t pgste)
835{ 850{
836#ifdef CONFIG_PGSTE 851#ifdef CONFIG_PGSTE
837 if (pgste_val(pgste) & PGSTE_IN_BIT) { 852 if (pgste_val(pgste) & PGSTE_IN_BIT) {
838 pgste_val(pgste) &= ~PGSTE_IN_BIT; 853 pgste_val(pgste) &= ~PGSTE_IN_BIT;
839 gmap_do_ipte_notify(mm, addr, ptep); 854 gmap_do_ipte_notify(mm, ptep);
840 } 855 }
841#endif 856#endif
842 return pgste; 857 return pgste;
@@ -854,6 +869,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
854 869
855 if (mm_has_pgste(mm)) { 870 if (mm_has_pgste(mm)) {
856 pgste = pgste_get_lock(ptep); 871 pgste = pgste_get_lock(ptep);
872 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
857 pgste_set_key(ptep, pgste, entry); 873 pgste_set_key(ptep, pgste, entry);
858 pgste_set_pte(ptep, entry); 874 pgste_set_pte(ptep, entry);
859 pgste_set_unlock(ptep, pgste); 875 pgste_set_unlock(ptep, pgste);
@@ -883,6 +899,12 @@ static inline int pte_young(pte_t pte)
883 return (pte_val(pte) & _PAGE_YOUNG) != 0; 899 return (pte_val(pte) & _PAGE_YOUNG) != 0;
884} 900}
885 901
902#define __HAVE_ARCH_PTE_UNUSED
903static inline int pte_unused(pte_t pte)
904{
905 return pte_val(pte) & _PAGE_UNUSED;
906}
907
886/* 908/*
887 * pgd/pmd/pte modification functions 909 * pgd/pmd/pte modification functions
888 */ 910 */
@@ -1036,30 +1058,41 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
1036 1058
1037static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 1059static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1038{ 1060{
1039 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 1061 unsigned long pto = (unsigned long) ptep;
1062
1040#ifndef CONFIG_64BIT 1063#ifndef CONFIG_64BIT
1041 /* pto must point to the start of the segment table */ 1064 /* pto in ESA mode must point to the start of the segment table */
1042 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 1065 pto &= 0x7ffffc00;
1043#else
1044 /* ipte in zarch mode can do the math */
1045 pte_t *pto = ptep;
1046#endif 1066#endif
1047 asm volatile( 1067 /* Invalidation + global TLB flush for the pte */
1048 " ipte %2,%3" 1068 asm volatile(
1049 : "=m" (*ptep) : "m" (*ptep), 1069 " ipte %2,%3"
1050 "a" (pto), "a" (address)); 1070 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1051 } 1071}
1072
1073static inline void ptep_flush_direct(struct mm_struct *mm,
1074 unsigned long address, pte_t *ptep)
1075{
1076 if (pte_val(*ptep) & _PAGE_INVALID)
1077 return;
1078 __ptep_ipte(address, ptep);
1052} 1079}
1053 1080
1054static inline void ptep_flush_lazy(struct mm_struct *mm, 1081static inline void ptep_flush_lazy(struct mm_struct *mm,
1055 unsigned long address, pte_t *ptep) 1082 unsigned long address, pte_t *ptep)
1056{ 1083{
1057 int active = (mm == current->active_mm) ? 1 : 0; 1084 int active, count;
1058 1085
1059 if (atomic_read(&mm->context.attach_count) > active) 1086 if (pte_val(*ptep) & _PAGE_INVALID)
1060 __ptep_ipte(address, ptep); 1087 return;
1061 else 1088 active = (mm == current->active_mm) ? 1 : 0;
1089 count = atomic_add_return(0x10000, &mm->context.attach_count);
1090 if ((count & 0xffff) <= active) {
1091 pte_val(*ptep) |= _PAGE_INVALID;
1062 mm->context.flush_mm = 1; 1092 mm->context.flush_mm = 1;
1093 } else
1094 __ptep_ipte(address, ptep);
1095 atomic_sub(0x10000, &mm->context.attach_count);
1063} 1096}
1064 1097
1065#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1098#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -1072,11 +1105,11 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1072 1105
1073 if (mm_has_pgste(vma->vm_mm)) { 1106 if (mm_has_pgste(vma->vm_mm)) {
1074 pgste = pgste_get_lock(ptep); 1107 pgste = pgste_get_lock(ptep);
1075 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); 1108 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1076 } 1109 }
1077 1110
1078 pte = *ptep; 1111 pte = *ptep;
1079 __ptep_ipte(addr, ptep); 1112 ptep_flush_direct(vma->vm_mm, addr, ptep);
1080 young = pte_young(pte); 1113 young = pte_young(pte);
1081 pte = pte_mkold(pte); 1114 pte = pte_mkold(pte);
1082 1115
@@ -1118,7 +1151,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1118 1151
1119 if (mm_has_pgste(mm)) { 1152 if (mm_has_pgste(mm)) {
1120 pgste = pgste_get_lock(ptep); 1153 pgste = pgste_get_lock(ptep);
1121 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1154 pgste = pgste_ipte_notify(mm, ptep, pgste);
1122 } 1155 }
1123 1156
1124 pte = *ptep; 1157 pte = *ptep;
@@ -1142,12 +1175,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1142 1175
1143 if (mm_has_pgste(mm)) { 1176 if (mm_has_pgste(mm)) {
1144 pgste = pgste_get_lock(ptep); 1177 pgste = pgste_get_lock(ptep);
1145 pgste_ipte_notify(mm, address, ptep, pgste); 1178 pgste_ipte_notify(mm, ptep, pgste);
1146 } 1179 }
1147 1180
1148 pte = *ptep; 1181 pte = *ptep;
1149 ptep_flush_lazy(mm, address, ptep); 1182 ptep_flush_lazy(mm, address, ptep);
1150 pte_val(*ptep) |= _PAGE_INVALID;
1151 1183
1152 if (mm_has_pgste(mm)) { 1184 if (mm_has_pgste(mm)) {
1153 pgste = pgste_update_all(&pte, pgste); 1185 pgste = pgste_update_all(&pte, pgste);
@@ -1180,14 +1212,17 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1180 1212
1181 if (mm_has_pgste(vma->vm_mm)) { 1213 if (mm_has_pgste(vma->vm_mm)) {
1182 pgste = pgste_get_lock(ptep); 1214 pgste = pgste_get_lock(ptep);
1183 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); 1215 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1184 } 1216 }
1185 1217
1186 pte = *ptep; 1218 pte = *ptep;
1187 __ptep_ipte(address, ptep); 1219 ptep_flush_direct(vma->vm_mm, address, ptep);
1188 pte_val(*ptep) = _PAGE_INVALID; 1220 pte_val(*ptep) = _PAGE_INVALID;
1189 1221
1190 if (mm_has_pgste(vma->vm_mm)) { 1222 if (mm_has_pgste(vma->vm_mm)) {
1223 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1224 _PGSTE_GPS_USAGE_UNUSED)
1225 pte_val(pte) |= _PAGE_UNUSED;
1191 pgste = pgste_update_all(&pte, pgste); 1226 pgste = pgste_update_all(&pte, pgste);
1192 pgste_set_unlock(ptep, pgste); 1227 pgste_set_unlock(ptep, pgste);
1193 } 1228 }
@@ -1211,7 +1246,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1211 1246
1212 if (!full && mm_has_pgste(mm)) { 1247 if (!full && mm_has_pgste(mm)) {
1213 pgste = pgste_get_lock(ptep); 1248 pgste = pgste_get_lock(ptep);
1214 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1249 pgste = pgste_ipte_notify(mm, ptep, pgste);
1215 } 1250 }
1216 1251
1217 pte = *ptep; 1252 pte = *ptep;
@@ -1236,7 +1271,7 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1236 if (pte_write(pte)) { 1271 if (pte_write(pte)) {
1237 if (mm_has_pgste(mm)) { 1272 if (mm_has_pgste(mm)) {
1238 pgste = pgste_get_lock(ptep); 1273 pgste = pgste_get_lock(ptep);
1239 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1274 pgste = pgste_ipte_notify(mm, ptep, pgste);
1240 } 1275 }
1241 1276
1242 ptep_flush_lazy(mm, address, ptep); 1277 ptep_flush_lazy(mm, address, ptep);
@@ -1262,10 +1297,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1262 return 0; 1297 return 0;
1263 if (mm_has_pgste(vma->vm_mm)) { 1298 if (mm_has_pgste(vma->vm_mm)) {
1264 pgste = pgste_get_lock(ptep); 1299 pgste = pgste_get_lock(ptep);
1265 pgste = pgste_ipte_notify(vma->vm_mm, address, ptep, pgste); 1300 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1266 } 1301 }
1267 1302
1268 __ptep_ipte(address, ptep); 1303 ptep_flush_direct(vma->vm_mm, address, ptep);
1269 1304
1270 if (mm_has_pgste(vma->vm_mm)) { 1305 if (mm_has_pgste(vma->vm_mm)) {
1271 pgste_set_pte(ptep, entry); 1306 pgste_set_pte(ptep, entry);
@@ -1449,12 +1484,16 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
1449static inline void pmdp_flush_lazy(struct mm_struct *mm, 1484static inline void pmdp_flush_lazy(struct mm_struct *mm,
1450 unsigned long address, pmd_t *pmdp) 1485 unsigned long address, pmd_t *pmdp)
1451{ 1486{
1452 int active = (mm == current->active_mm) ? 1 : 0; 1487 int active, count;
1453 1488
1454 if ((atomic_read(&mm->context.attach_count) & 0xffff) > active) 1489 active = (mm == current->active_mm) ? 1 : 0;
1455 __pmd_idte(address, pmdp); 1490 count = atomic_add_return(0x10000, &mm->context.attach_count);
1456 else 1491 if ((count & 0xffff) <= active) {
1492 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1457 mm->context.flush_mm = 1; 1493 mm->context.flush_mm = 1;
1494 } else
1495 __pmd_idte(address, pmdp);
1496 atomic_sub(0x10000, &mm->context.attach_count);
1458} 1497}
1459 1498
1460#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1499#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 9c82cebddabd..f4783c0b7b43 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -83,6 +83,7 @@ struct per_struct_kernel {
83 * These are defined as per linux/ptrace.h, which see. 83 * These are defined as per linux/ptrace.h, which see.
84 */ 84 */
85#define arch_has_single_step() (1) 85#define arch_has_single_step() (1)
86#define arch_has_block_step() (1)
86 87
87#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 88#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
88#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 89#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index abaca2275c7a..2f5e9932b4de 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -46,6 +46,7 @@ int sclp_cpu_configure(u8 cpu);
46int sclp_cpu_deconfigure(u8 cpu); 46int sclp_cpu_deconfigure(u8 cpu);
47unsigned long long sclp_get_rnmax(void); 47unsigned long long sclp_get_rnmax(void);
48unsigned long long sclp_get_rzm(void); 48unsigned long long sclp_get_rzm(void);
49unsigned int sclp_get_max_cpu(void);
49int sclp_sdias_blk_count(void); 50int sclp_sdias_blk_count(void);
50int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 51int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
51int sclp_chp_configure(struct chp_id chpid); 52int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 94cfbe442f12..406f3a1e63ef 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -59,7 +59,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
59#define MACHINE_FLAG_DIAG44 (1UL << 4) 59#define MACHINE_FLAG_DIAG44 (1UL << 4)
60#define MACHINE_FLAG_IDTE (1UL << 5) 60#define MACHINE_FLAG_IDTE (1UL << 5)
61#define MACHINE_FLAG_DIAG9C (1UL << 6) 61#define MACHINE_FLAG_DIAG9C (1UL << 6)
62#define MACHINE_FLAG_MVCOS (1UL << 7)
63#define MACHINE_FLAG_KVM (1UL << 8) 62#define MACHINE_FLAG_KVM (1UL << 8)
64#define MACHINE_FLAG_ESOP (1UL << 9) 63#define MACHINE_FLAG_ESOP (1UL << 9)
65#define MACHINE_FLAG_EDAT1 (1UL << 10) 64#define MACHINE_FLAG_EDAT1 (1UL << 10)
@@ -85,7 +84,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
85#define MACHINE_HAS_IDTE (0) 84#define MACHINE_HAS_IDTE (0)
86#define MACHINE_HAS_DIAG44 (1) 85#define MACHINE_HAS_DIAG44 (1)
87#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG) 86#define MACHINE_HAS_MVPG (S390_lowcore.machine_flags & MACHINE_FLAG_MVPG)
88#define MACHINE_HAS_MVCOS (0)
89#define MACHINE_HAS_EDAT1 (0) 87#define MACHINE_HAS_EDAT1 (0)
90#define MACHINE_HAS_EDAT2 (0) 88#define MACHINE_HAS_EDAT2 (0)
91#define MACHINE_HAS_LPP (0) 89#define MACHINE_HAS_LPP (0)
@@ -98,7 +96,6 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
98#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 96#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
99#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44) 97#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
100#define MACHINE_HAS_MVPG (1) 98#define MACHINE_HAS_MVPG (1)
101#define MACHINE_HAS_MVCOS (S390_lowcore.machine_flags & MACHINE_FLAG_MVCOS)
102#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1) 99#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
103#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2) 100#define MACHINE_HAS_EDAT2 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT2)
104#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP) 101#define MACHINE_HAS_LPP (S390_lowcore.machine_flags & MACHINE_FLAG_LPP)
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 10e0fcd3633d..3ccd71b90345 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -81,6 +81,7 @@ static inline struct thread_info *current_thread_info(void)
81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ 81#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
82#define TIF_SIGPENDING 2 /* signal pending */ 82#define TIF_SIGPENDING 2 /* signal pending */
83#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 83#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
84#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
84#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ 85#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
85#define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 86#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
86#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 87#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
@@ -91,11 +92,13 @@ static inline struct thread_info *current_thread_info(void)
91#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 92#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
92#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 93#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
93#define TIF_SINGLE_STEP 20 /* This task is single stepped */ 94#define TIF_SINGLE_STEP 20 /* This task is single stepped */
95#define TIF_BLOCK_STEP 21 /* This task is block stepped */
94 96
95#define _TIF_SYSCALL (1<<TIF_SYSCALL) 97#define _TIF_SYSCALL (1<<TIF_SYSCALL)
96#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 98#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
97#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 99#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
98#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 100#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
101#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
99#define _TIF_PER_TRAP (1<<TIF_PER_TRAP) 102#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
100#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 103#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
101#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 104#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 79330af9a5f8..4133b3f72fb0 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -92,33 +92,58 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
92#define ARCH_HAS_SORT_EXTABLE 92#define ARCH_HAS_SORT_EXTABLE
93#define ARCH_HAS_SEARCH_EXTABLE 93#define ARCH_HAS_SEARCH_EXTABLE
94 94
95struct uaccess_ops { 95int __handle_fault(unsigned long, unsigned long, int);
96 size_t (*copy_from_user)(size_t, const void __user *, void *);
97 size_t (*copy_to_user)(size_t, void __user *, const void *);
98 size_t (*copy_in_user)(size_t, void __user *, const void __user *);
99 size_t (*clear_user)(size_t, void __user *);
100 size_t (*strnlen_user)(size_t, const char __user *);
101 size_t (*strncpy_from_user)(size_t, const char __user *, char *);
102 int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
103 int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
104};
105 96
106extern struct uaccess_ops uaccess; 97/**
107extern struct uaccess_ops uaccess_mvcos; 98 * __copy_from_user: - Copy a block of data from user space, with less checking.
108extern struct uaccess_ops uaccess_pt; 99 * @to: Destination address, in kernel space.
100 * @from: Source address, in user space.
101 * @n: Number of bytes to copy.
102 *
103 * Context: User context only. This function may sleep.
104 *
105 * Copy data from user space to kernel space. Caller must check
106 * the specified block with access_ok() before calling this function.
107 *
108 * Returns number of bytes that could not be copied.
109 * On success, this will be zero.
110 *
111 * If some data could not be copied, this function will pad the copied
112 * data to the requested size using zero bytes.
113 */
114unsigned long __must_check __copy_from_user(void *to, const void __user *from,
115 unsigned long n);
116
117/**
118 * __copy_to_user: - Copy a block of data into user space, with less checking.
119 * @to: Destination address, in user space.
120 * @from: Source address, in kernel space.
121 * @n: Number of bytes to copy.
122 *
123 * Context: User context only. This function may sleep.
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 *
128 * Returns number of bytes that could not be copied.
129 * On success, this will be zero.
130 */
131unsigned long __must_check __copy_to_user(void __user *to, const void *from,
132 unsigned long n);
109 133
110extern int __handle_fault(unsigned long, unsigned long, int); 134#define __copy_to_user_inatomic __copy_to_user
135#define __copy_from_user_inatomic __copy_from_user
111 136
112static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 137static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
113{ 138{
114 size = uaccess.copy_to_user(size, ptr, x); 139 size = __copy_to_user(ptr, x, size);
115 return size ? -EFAULT : size; 140 return size ? -EFAULT : 0;
116} 141}
117 142
118static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 143static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
119{ 144{
120 size = uaccess.copy_from_user(size, ptr, x); 145 size = __copy_from_user(x, ptr, size);
121 return size ? -EFAULT : size; 146 return size ? -EFAULT : 0;
122} 147}
123 148
124/* 149/*
@@ -135,8 +160,8 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
135 case 2: \ 160 case 2: \
136 case 4: \ 161 case 4: \
137 case 8: \ 162 case 8: \
138 __pu_err = __put_user_fn(sizeof (*(ptr)), \ 163 __pu_err = __put_user_fn(&__x, ptr, \
139 ptr, &__x); \ 164 sizeof(*(ptr))); \
140 break; \ 165 break; \
141 default: \ 166 default: \
142 __put_user_bad(); \ 167 __put_user_bad(); \
@@ -152,7 +177,7 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
152}) 177})
153 178
154 179
155extern int __put_user_bad(void) __attribute__((noreturn)); 180int __put_user_bad(void) __attribute__((noreturn));
156 181
157#define __get_user(x, ptr) \ 182#define __get_user(x, ptr) \
158({ \ 183({ \
@@ -161,29 +186,29 @@ extern int __put_user_bad(void) __attribute__((noreturn));
161 switch (sizeof(*(ptr))) { \ 186 switch (sizeof(*(ptr))) { \
162 case 1: { \ 187 case 1: { \
163 unsigned char __x; \ 188 unsigned char __x; \
164 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 189 __gu_err = __get_user_fn(&__x, ptr, \
165 ptr, &__x); \ 190 sizeof(*(ptr))); \
166 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 191 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
167 break; \ 192 break; \
168 }; \ 193 }; \
169 case 2: { \ 194 case 2: { \
170 unsigned short __x; \ 195 unsigned short __x; \
171 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 196 __gu_err = __get_user_fn(&__x, ptr, \
172 ptr, &__x); \ 197 sizeof(*(ptr))); \
173 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 198 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
174 break; \ 199 break; \
175 }; \ 200 }; \
176 case 4: { \ 201 case 4: { \
177 unsigned int __x; \ 202 unsigned int __x; \
178 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 203 __gu_err = __get_user_fn(&__x, ptr, \
179 ptr, &__x); \ 204 sizeof(*(ptr))); \
180 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 205 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
181 break; \ 206 break; \
182 }; \ 207 }; \
183 case 8: { \ 208 case 8: { \
184 unsigned long long __x; \ 209 unsigned long long __x; \
185 __gu_err = __get_user_fn(sizeof (*(ptr)), \ 210 __gu_err = __get_user_fn(&__x, ptr, \
186 ptr, &__x); \ 211 sizeof(*(ptr))); \
187 (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 212 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
188 break; \ 213 break; \
189 }; \ 214 }; \
@@ -200,35 +225,12 @@ extern int __put_user_bad(void) __attribute__((noreturn));
200 __get_user(x, ptr); \ 225 __get_user(x, ptr); \
201}) 226})
202 227
203extern int __get_user_bad(void) __attribute__((noreturn)); 228int __get_user_bad(void) __attribute__((noreturn));
204 229
205#define __put_user_unaligned __put_user 230#define __put_user_unaligned __put_user
206#define __get_user_unaligned __get_user 231#define __get_user_unaligned __get_user
207 232
208/** 233/**
209 * __copy_to_user: - Copy a block of data into user space, with less checking.
210 * @to: Destination address, in user space.
211 * @from: Source address, in kernel space.
212 * @n: Number of bytes to copy.
213 *
214 * Context: User context only. This function may sleep.
215 *
216 * Copy data from kernel space to user space. Caller must check
217 * the specified block with access_ok() before calling this function.
218 *
219 * Returns number of bytes that could not be copied.
220 * On success, this will be zero.
221 */
222static inline unsigned long __must_check
223__copy_to_user(void __user *to, const void *from, unsigned long n)
224{
225 return uaccess.copy_to_user(n, to, from);
226}
227
228#define __copy_to_user_inatomic __copy_to_user
229#define __copy_from_user_inatomic __copy_from_user
230
231/**
232 * copy_to_user: - Copy a block of data into user space. 234 * copy_to_user: - Copy a block of data into user space.
233 * @to: Destination address, in user space. 235 * @to: Destination address, in user space.
234 * @from: Source address, in kernel space. 236 * @from: Source address, in kernel space.
@@ -248,30 +250,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
248 return __copy_to_user(to, from, n); 250 return __copy_to_user(to, from, n);
249} 251}
250 252
251/** 253void copy_from_user_overflow(void)
252 * __copy_from_user: - Copy a block of data from user space, with less checking.
253 * @to: Destination address, in kernel space.
254 * @from: Source address, in user space.
255 * @n: Number of bytes to copy.
256 *
257 * Context: User context only. This function may sleep.
258 *
259 * Copy data from user space to kernel space. Caller must check
260 * the specified block with access_ok() before calling this function.
261 *
262 * Returns number of bytes that could not be copied.
263 * On success, this will be zero.
264 *
265 * If some data could not be copied, this function will pad the copied
266 * data to the requested size using zero bytes.
267 */
268static inline unsigned long __must_check
269__copy_from_user(void *to, const void __user *from, unsigned long n)
270{
271 return uaccess.copy_from_user(n, from, to);
272}
273
274extern void copy_from_user_overflow(void)
275#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS 254#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
276__compiletime_warning("copy_from_user() buffer size is not provably correct") 255__compiletime_warning("copy_from_user() buffer size is not provably correct")
277#endif 256#endif
@@ -306,11 +285,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
306 return __copy_from_user(to, from, n); 285 return __copy_from_user(to, from, n);
307} 286}
308 287
309static inline unsigned long __must_check 288unsigned long __must_check
310__copy_in_user(void __user *to, const void __user *from, unsigned long n) 289__copy_in_user(void __user *to, const void __user *from, unsigned long n);
311{
312 return uaccess.copy_in_user(n, to, from);
313}
314 290
315static inline unsigned long __must_check 291static inline unsigned long __must_check
316copy_in_user(void __user *to, const void __user *from, unsigned long n) 292copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -322,18 +298,22 @@ copy_in_user(void __user *to, const void __user *from, unsigned long n)
322/* 298/*
323 * Copy a null terminated string from userspace. 299 * Copy a null terminated string from userspace.
324 */ 300 */
301
302long __strncpy_from_user(char *dst, const char __user *src, long count);
303
325static inline long __must_check 304static inline long __must_check
326strncpy_from_user(char *dst, const char __user *src, long count) 305strncpy_from_user(char *dst, const char __user *src, long count)
327{ 306{
328 might_fault(); 307 might_fault();
329 return uaccess.strncpy_from_user(count, src, dst); 308 return __strncpy_from_user(dst, src, count);
330} 309}
331 310
332static inline unsigned long 311unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
333strnlen_user(const char __user * src, unsigned long n) 312
313static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
334{ 314{
335 might_fault(); 315 might_fault();
336 return uaccess.strnlen_user(n, src); 316 return __strnlen_user(src, n);
337} 317}
338 318
339/** 319/**
@@ -355,21 +335,14 @@ strnlen_user(const char __user * src, unsigned long n)
355/* 335/*
356 * Zero Userspace 336 * Zero Userspace
357 */ 337 */
338unsigned long __must_check __clear_user(void __user *to, unsigned long size);
358 339
359static inline unsigned long __must_check 340static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
360__clear_user(void __user *to, unsigned long n)
361{
362 return uaccess.clear_user(n, to);
363}
364
365static inline unsigned long __must_check
366clear_user(void __user *to, unsigned long n)
367{ 341{
368 might_fault(); 342 might_fault();
369 return uaccess.clear_user(n, to); 343 return __clear_user(to, n);
370} 344}
371 345
372extern int copy_to_user_real(void __user *dest, void *src, size_t count); 346int copy_to_user_real(void __user *dest, void *src, unsigned long count);
373extern int copy_from_user_real(void *dest, void __user *src, size_t count);
374 347
375#endif /* __S390_UACCESS_H */ 348#endif /* __S390_UACCESS_H */
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 7e0b498a2c2b..a150f4fabe43 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -403,6 +403,12 @@ typedef struct
403#define PTRACE_TE_ABORT_RAND 0x5011 403#define PTRACE_TE_ABORT_RAND 0x5011
404 404
405/* 405/*
406 * The numbers chosen here are somewhat arbitrary but absolutely MUST
407 * not overlap with any of the number assigned in <linux/ptrace.h>.
408 */
409#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
410
411/*
406 * PT_PROT definition is loosely based on hppa bsd definition in 412 * PT_PROT definition is loosely based on hppa bsd definition in
407 * gdb/hppab-nat.c 413 * gdb/hppab-nat.c
408 */ 414 */