aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/crypto/aes_s390.c19
-rw-r--r--arch/s390/include/asm/page.h38
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c3
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/time.c46
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S30
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S22
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/lib/uaccess_pt.c3
14 files changed, 97 insertions, 95 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 314fced4fc14..5877e71901b3 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,7 +101,7 @@ config S390
101 select GENERIC_CPU_DEVICES if !SMP 101 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT 102 select GENERIC_FIND_FIRST_BIT
103 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
104 select GENERIC_TIME_VSYSCALL_OLD 104 select GENERIC_TIME_VSYSCALL
105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
107 select HAVE_ARCH_SECCOMP_FILTER 107 select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 46cae138ece2..4363528dc8fd 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -35,7 +35,6 @@ static u8 *ctrblk;
35static char keylen_flag; 35static char keylen_flag;
36 36
37struct s390_aes_ctx { 37struct s390_aes_ctx {
38 u8 iv[AES_BLOCK_SIZE];
39 u8 key[AES_MAX_KEY_SIZE]; 38 u8 key[AES_MAX_KEY_SIZE];
40 long enc; 39 long enc;
41 long dec; 40 long dec;
@@ -441,30 +440,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
441 return aes_set_key(tfm, in_key, key_len); 440 return aes_set_key(tfm, in_key, key_len);
442} 441}
443 442
444static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 443static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
445 struct blkcipher_walk *walk) 444 struct blkcipher_walk *walk)
446{ 445{
446 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
447 int ret = blkcipher_walk_virt(desc, walk); 447 int ret = blkcipher_walk_virt(desc, walk);
448 unsigned int nbytes = walk->nbytes; 448 unsigned int nbytes = walk->nbytes;
449 struct {
450 u8 iv[AES_BLOCK_SIZE];
451 u8 key[AES_MAX_KEY_SIZE];
452 } param;
449 453
450 if (!nbytes) 454 if (!nbytes)
451 goto out; 455 goto out;
452 456
453 memcpy(param, walk->iv, AES_BLOCK_SIZE); 457 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
458 memcpy(param.key, sctx->key, sctx->key_len);
454 do { 459 do {
455 /* only use complete blocks */ 460 /* only use complete blocks */
456 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 461 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
457 u8 *out = walk->dst.virt.addr; 462 u8 *out = walk->dst.virt.addr;
458 u8 *in = walk->src.virt.addr; 463 u8 *in = walk->src.virt.addr;
459 464
460 ret = crypt_s390_kmc(func, param, out, in, n); 465 ret = crypt_s390_kmc(func, &param, out, in, n);
461 if (ret < 0 || ret != n) 466 if (ret < 0 || ret != n)
462 return -EIO; 467 return -EIO;
463 468
464 nbytes &= AES_BLOCK_SIZE - 1; 469 nbytes &= AES_BLOCK_SIZE - 1;
465 ret = blkcipher_walk_done(desc, walk, nbytes); 470 ret = blkcipher_walk_done(desc, walk, nbytes);
466 } while ((nbytes = walk->nbytes)); 471 } while ((nbytes = walk->nbytes));
467 memcpy(walk->iv, param, AES_BLOCK_SIZE); 472 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
468 473
469out: 474out:
470 return ret; 475 return ret;
@@ -481,7 +486,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
481 return fallback_blk_enc(desc, dst, src, nbytes); 486 return fallback_blk_enc(desc, dst, src, nbytes);
482 487
483 blkcipher_walk_init(&walk, dst, src, nbytes); 488 blkcipher_walk_init(&walk, dst, src, nbytes);
484 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 489 return cbc_aes_crypt(desc, sctx->enc, &walk);
485} 490}
486 491
487static int cbc_aes_decrypt(struct blkcipher_desc *desc, 492static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +500,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
495 return fallback_blk_dec(desc, dst, src, nbytes); 500 return fallback_blk_dec(desc, dst, src, nbytes);
496 501
497 blkcipher_walk_init(&walk, dst, src, nbytes); 502 blkcipher_walk_init(&walk, dst, src, nbytes);
498 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 503 return cbc_aes_crypt(desc, sctx->dec, &walk);
499} 504}
500 505
501static struct crypto_alg cbc_aes_alg = { 506static struct crypto_alg cbc_aes_alg = {
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 316c8503a3b4..114258eeaacd 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
48 : "memory", "cc"); 48 : "memory", "cc");
49} 49}
50 50
51/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
53 * bypass caches when copying a page. Especially when copying huge pages
54 * this keeps L1 and L2 data caches alive.
55 */
51static inline void copy_page(void *to, void *from) 56static inline void copy_page(void *to, void *from)
52{ 57{
53 if (MACHINE_HAS_MVPG) { 58 register void *reg2 asm ("2") = to;
54 register unsigned long reg0 asm ("0") = 0; 59 register unsigned long reg3 asm ("3") = 0x1000;
55 asm volatile( 60 register void *reg4 asm ("4") = from;
56 " mvpg %0,%1" 61 register unsigned long reg5 asm ("5") = 0xb0001000;
57 : : "a" (to), "a" (from), "d" (reg0) 62 asm volatile(
58 : "memory", "cc"); 63 " mvcl 2,4"
59 } else 64 : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
60 asm volatile( 65 : : "memory", "cc");
61 " mvc 0(256,%0),0(%1)\n"
62 " mvc 256(256,%0),256(%1)\n"
63 " mvc 512(256,%0),512(%1)\n"
64 " mvc 768(256,%0),768(%1)\n"
65 " mvc 1024(256,%0),1024(%1)\n"
66 " mvc 1280(256,%0),1280(%1)\n"
67 " mvc 1536(256,%0),1536(%1)\n"
68 " mvc 1792(256,%0),1792(%1)\n"
69 " mvc 2048(256,%0),2048(%1)\n"
70 " mvc 2304(256,%0),2304(%1)\n"
71 " mvc 2560(256,%0),2560(%1)\n"
72 " mvc 2816(256,%0),2816(%1)\n"
73 " mvc 3072(256,%0),3072(%1)\n"
74 " mvc 3328(256,%0),3328(%1)\n"
75 " mvc 3584(256,%0),3584(%1)\n"
76 " mvc 3840(256,%0),3840(%1)\n"
77 : : "a" (to), "a" (from) : "memory");
78} 66}
79 67
80#define clear_user_page(page, vaddr, pg) clear_page(page) 68#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a73eb2e1e918..bc9746a7d47c 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,8 +26,9 @@ struct vdso_data {
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 wtom_clock_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u32 tz_dsttime; /* Type of dst correction 0x34 */
29 __u32 ectg_available; 29 __u32 ectg_available; /* ECTG instruction present 0x38 */
30 __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ 30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
31}; 32};
32 33
33struct vdso_per_cpu_data { 34struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2416138ebd3e..496116cd65ec 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -65,7 +65,8 @@ int main(void)
65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
68 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); 68 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
69 DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
69 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 70 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
70 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 71 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
71 /* constants used by the vdso */ 72 /* constants used by the vdso */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6e2442978409..95e7ba0fbb7e 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
194 return -EINVAL; 194 return -EINVAL;
195 195
196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
197 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 197 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | 198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | 199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); 200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 4a460c44e17e..813ec7260878 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_DEFAULT /* 38 */ 81PGM_CHECK_64BIT(do_dat_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index fb535874a246..d8fd508ccd1e 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
94 return -EINVAL; 94 return -EINVAL;
95 95
96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
97 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 97 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); 98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
99 /* Check for invalid user address space control. */ 99 /* Check for invalid user address space control. */
100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 064c3082ab33..dd95f1631621 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
108 set_clock_comparator(S390_lowcore.clock_comparator); 108 set_clock_comparator(S390_lowcore.clock_comparator);
109} 109}
110 110
111static int s390_next_ktime(ktime_t expires, 111static int s390_next_event(unsigned long delta,
112 struct clock_event_device *evt) 112 struct clock_event_device *evt)
113{ 113{
114 struct timespec ts; 114 S390_lowcore.clock_comparator = get_tod_clock() + delta;
115 u64 nsecs;
116
117 ts.tv_sec = ts.tv_nsec = 0;
118 monotonic_to_bootbased(&ts);
119 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
120 do_div(nsecs, 125);
121 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
122 /* Program the maximum value if we have an overflow (== year 2042) */
123 if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
124 S390_lowcore.clock_comparator = -1ULL;
125 set_clock_comparator(S390_lowcore.clock_comparator); 115 set_clock_comparator(S390_lowcore.clock_comparator);
126 return 0; 116 return 0;
127} 117}
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
146 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
147 cd = &per_cpu(comparators, cpu); 137 cd = &per_cpu(comparators, cpu);
148 cd->name = "comparator"; 138 cd->name = "comparator";
149 cd->features = CLOCK_EVT_FEAT_ONESHOT | 139 cd->features = CLOCK_EVT_FEAT_ONESHOT;
150 CLOCK_EVT_FEAT_KTIME;
151 cd->mult = 16777; 140 cd->mult = 16777;
152 cd->shift = 12; 141 cd->shift = 12;
153 cd->min_delta_ns = 1; 142 cd->min_delta_ns = 1;
154 cd->max_delta_ns = LONG_MAX; 143 cd->max_delta_ns = LONG_MAX;
155 cd->rating = 400; 144 cd->rating = 400;
156 cd->cpumask = cpumask_of(cpu); 145 cd->cpumask = cpumask_of(cpu);
157 cd->set_next_ktime = s390_next_ktime; 146 cd->set_next_event = s390_next_event;
158 cd->set_mode = s390_set_mode; 147 cd->set_mode = s390_set_mode;
159 148
160 clockevents_register_device(cd); 149 clockevents_register_device(cd);
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
221 return &clocksource_tod; 210 return &clocksource_tod;
222} 211}
223 212
224void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 213void update_vsyscall(struct timekeeper *tk)
225 struct clocksource *clock, u32 mult)
226{ 214{
227 if (clock != &clocksource_tod) 215 u64 nsecps;
216
217 if (tk->clock != &clocksource_tod)
228 return; 218 return;
229 219
230 /* Make userspace gettimeofday spin until we're done. */ 220 /* Make userspace gettimeofday spin until we're done. */
231 ++vdso_data->tb_update_count; 221 ++vdso_data->tb_update_count;
232 smp_wmb(); 222 smp_wmb();
233 vdso_data->xtime_tod_stamp = clock->cycle_last; 223 vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
234 vdso_data->xtime_clock_sec = wall_time->tv_sec; 224 vdso_data->xtime_clock_sec = tk->xtime_sec;
235 vdso_data->xtime_clock_nsec = wall_time->tv_nsec; 225 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
236 vdso_data->wtom_clock_sec = wtm->tv_sec; 226 vdso_data->wtom_clock_sec =
237 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
238 vdso_data->ntp_mult = mult; 228 vdso_data->wtom_clock_nsec = tk->xtime_nsec +
229 + (tk->wall_to_monotonic.tv_nsec << tk->shift);
230 nsecps = (u64) NSEC_PER_SEC << tk->shift;
231 while (vdso_data->wtom_clock_nsec >= nsecps) {
232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++;
234 }
235 vdso_data->tk_mult = tk->mult;
236 vdso_data->tk_shift = tk->shift;
239 smp_wmb(); 237 smp_wmb();
240 ++vdso_data->tb_update_count; 238 ++vdso_data->tb_update_count;
241} 239}
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index b2224e0b974c..5be8e472f57d 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,25 +38,26 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
42 lr %r2,%r0 42 lr %r2,%r0
43 l %r0,__VDSO_NTP_MULT(%r5) 43 l %r0,__VDSO_TK_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 a %r0,__VDSO_NTP_MULT(%r5) 47 a %r0,__VDSO_TK_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
51 al %r1,__VDSO_XTIME_NSEC+4(%r5) 50 al %r1,__VDSO_XTIME_NSEC+4(%r5)
52 brc 12,4f 51 brc 12,4f
53 ahi %r0,1 52 ahi %r0,1
544: l %r2,__VDSO_XTIME_SEC+4(%r5) 534: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
55 al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
56 al %r1,__VDSO_WTOM_NSEC+4(%r5) 54 al %r1,__VDSO_WTOM_NSEC+4(%r5)
57 brc 12,5f 55 brc 12,5f
58 ahi %r0,1 56 ahi %r0,1
595: al %r2,__VDSO_WTOM_SEC+4(%r5) 575: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
58 srdl %r0,0(%r2) /* >> tk->shift */
59 l %r2,__VDSO_XTIME_SEC+4(%r5)
60 al %r2,__VDSO_WTOM_SEC+4(%r5)
60 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 61 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
61 jne 1b 62 jne 1b
62 basr %r5,0 63 basr %r5,0
@@ -86,20 +87,21 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 87 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 88 brc 3,12f
88 ahi %r0,-1 89 ahi %r0,-1
8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 9012: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 lr %r2,%r0 91 lr %r2,%r0
91 l %r0,__VDSO_NTP_MULT(%r5) 92 l %r0,__VDSO_TK_MULT(%r5)
92 ltr %r1,%r1 93 ltr %r1,%r1
93 mr %r0,%r0 94 mr %r0,%r0
94 jnm 13f 95 jnm 13f
95 a %r0,__VDSO_NTP_MULT(%r5) 96 a %r0,__VDSO_TK_MULT(%r5)
9613: alr %r0,%r2 9713: alr %r0,%r2
97 srdl %r0,12 98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
99 al %r1,__VDSO_XTIME_NSEC+4(%r5) 99 al %r1,__VDSO_XTIME_NSEC+4(%r5)
100 brc 12,14f 100 brc 12,14f
101 ahi %r0,1 101 ahi %r0,1
10214: l %r2,__VDSO_XTIME_SEC+4(%r5) 10214: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
103 srdl %r0,0(%r2) /* >> tk->shift */
104 l %r2,__VDSO_XTIME_SEC+4(%r5)
103 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 105 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
104 jne 11b 106 jne 11b
105 basr %r5,0 107 basr %r5,0
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 2d3633175e3b..fd621a950f7c 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 l %r0,__VDSO_NTP_MULT(%r5) 40 l %r0,__VDSO_TK_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 a %r0,__VDSO_NTP_MULT(%r5) 44 a %r0,__VDSO_TK_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 46 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
48 al %r1,__VDSO_XTIME_NSEC+4(%r5) 47 al %r1,__VDSO_XTIME_NSEC+4(%r5)
49 brc 12,5f 48 brc 12,5f
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
515: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) 505: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
52 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 51 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
53 jne 1b 52 jne 1b
53 l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r4) /* >> tk->shift */
54 l %r4,24(%r15) /* get tv_sec from stack */ 55 l %r4,24(%r15) /* get tv_sec from stack */
55 basr %r5,0 56 basr %r5,0
566: ltr %r0,%r0 576: ltr %r0,%r0
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index d46c95ed5f19..0add1072ba30 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -34,14 +34,15 @@ __kernel_clock_gettime:
34 tmll %r4,0x0001 /* pending update ? loop */ 34 tmll %r4,0x0001 /* pending update ? loop */
35 jnz 0b 35 jnz 0b
36 stck 48(%r15) /* Store TOD clock */ 36 stck 48(%r15) /* Store TOD clock */
37 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
38 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
39 alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
37 lg %r1,48(%r15) 40 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 41 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 42 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 43 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 44 alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
42 lg %r0,__VDSO_XTIME_SEC(%r5) 45 srlg %r1,%r1,0(%r2) /* >> tk->shift */
43 alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
44 alg %r0,__VDSO_WTOM_SEC(%r5)
45 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 46 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
46 jne 0b 47 jne 0b
47 larl %r5,13f 48 larl %r5,13f
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
62 tmll %r4,0x0001 /* pending update ? loop */ 63 tmll %r4,0x0001 /* pending update ? loop */
63 jnz 5b 64 jnz 5b
64 stck 48(%r15) /* Store TOD clock */ 65 stck 48(%r15) /* Store TOD clock */
66 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
65 lg %r1,48(%r15) 67 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 68 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 69 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 70 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 71 srlg %r1,%r1,0(%r2) /* >> tk->shift */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 72 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
71 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 73 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
72 jne 5b 74 jne 5b
73 larl %r5,13f 75 larl %r5,13f
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 36ee674722ec..d0860d1d0ccc 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
38 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 37 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
39 jne 0b 38 jne 0b
39 lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 srlg %r1,%r1,0(%r5) /* >> tk->shift */
40 larl %r5,5f 41 larl %r5,5f
412: clg %r1,0(%r5) 422: clg %r1,0(%r5)
42 jl 3f 43 jl 3f
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 97e03caf7825..dbdab3e7a1a6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
78 * contains the (negative) exception code. 78 * contains the (negative) exception code.
79 */ 79 */
80#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
81
81static unsigned long follow_table(struct mm_struct *mm, 82static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write) 83 unsigned long address, int write)
83{ 84{
84 unsigned long *table = (unsigned long *)__pa(mm->pgd); 85 unsigned long *table = (unsigned long *)__pa(mm->pgd);
85 86
87 if (unlikely(address > mm->context.asce_limit - 1))
88 return -0x38UL;
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1: 90 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff); 91 table = table + ((address >> 53) & 0x7ff);