aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/crypto/aes_s390.c50
-rw-r--r--arch/s390/include/asm/page.h38
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/asm-offsets.c4
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/pgm_check.S2
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/time.c46
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S31
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S24
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S9
-rw-r--r--arch/s390/lib/uaccess_pt.c3
17 files changed, 123 insertions, 119 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 314fced4fc14..1e1a03d2d19f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -101,7 +101,7 @@ config S390
101 select GENERIC_CPU_DEVICES if !SMP 101 select GENERIC_CPU_DEVICES if !SMP
102 select GENERIC_FIND_FIRST_BIT 102 select GENERIC_FIND_FIRST_BIT
103 select GENERIC_SMP_IDLE_THREAD 103 select GENERIC_SMP_IDLE_THREAD
104 select GENERIC_TIME_VSYSCALL_OLD 104 select GENERIC_TIME_VSYSCALL
105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
107 select HAVE_ARCH_SECCOMP_FILTER 107 select HAVE_ARCH_SECCOMP_FILTER
@@ -347,14 +347,14 @@ config SMP
347 Even if you don't know what to do here, say Y. 347 Even if you don't know what to do here, say Y.
348 348
349config NR_CPUS 349config NR_CPUS
350 int "Maximum number of CPUs (2-64)" 350 int "Maximum number of CPUs (2-256)"
351 range 2 64 351 range 2 256
352 depends on SMP 352 depends on SMP
353 default "32" if !64BIT 353 default "32" if !64BIT
354 default "64" if 64BIT 354 default "64" if 64BIT
355 help 355 help
356 This allows you to specify the maximum number of CPUs which this 356 This allows you to specify the maximum number of CPUs which this
357 kernel will support. The maximum supported value is 64 and the 357 kernel will support. The maximum supported value is 256 and the
358 minimum value which makes sense is 2. 358 minimum value which makes sense is 2.
359 359
360 This is purely to save memory - each supported CPU adds 360 This is purely to save memory - each supported CPU adds
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 46cae138ece2..b3feabd39f31 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -35,7 +35,6 @@ static u8 *ctrblk;
35static char keylen_flag; 35static char keylen_flag;
36 36
37struct s390_aes_ctx { 37struct s390_aes_ctx {
38 u8 iv[AES_BLOCK_SIZE];
39 u8 key[AES_MAX_KEY_SIZE]; 38 u8 key[AES_MAX_KEY_SIZE];
40 long enc; 39 long enc;
41 long dec; 40 long dec;
@@ -56,8 +55,7 @@ struct pcc_param {
56 55
57struct s390_xts_ctx { 56struct s390_xts_ctx {
58 u8 key[32]; 57 u8 key[32];
59 u8 xts_param[16]; 58 u8 pcc_key[32];
60 struct pcc_param pcc;
61 long enc; 59 long enc;
62 long dec; 60 long dec;
63 int key_len; 61 int key_len;
@@ -441,30 +439,36 @@ static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
441 return aes_set_key(tfm, in_key, key_len); 439 return aes_set_key(tfm, in_key, key_len);
442} 440}
443 441
444static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param, 442static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
445 struct blkcipher_walk *walk) 443 struct blkcipher_walk *walk)
446{ 444{
445 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
447 int ret = blkcipher_walk_virt(desc, walk); 446 int ret = blkcipher_walk_virt(desc, walk);
448 unsigned int nbytes = walk->nbytes; 447 unsigned int nbytes = walk->nbytes;
448 struct {
449 u8 iv[AES_BLOCK_SIZE];
450 u8 key[AES_MAX_KEY_SIZE];
451 } param;
449 452
450 if (!nbytes) 453 if (!nbytes)
451 goto out; 454 goto out;
452 455
453 memcpy(param, walk->iv, AES_BLOCK_SIZE); 456 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
457 memcpy(param.key, sctx->key, sctx->key_len);
454 do { 458 do {
455 /* only use complete blocks */ 459 /* only use complete blocks */
456 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1); 460 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
457 u8 *out = walk->dst.virt.addr; 461 u8 *out = walk->dst.virt.addr;
458 u8 *in = walk->src.virt.addr; 462 u8 *in = walk->src.virt.addr;
459 463
460 ret = crypt_s390_kmc(func, param, out, in, n); 464 ret = crypt_s390_kmc(func, &param, out, in, n);
461 if (ret < 0 || ret != n) 465 if (ret < 0 || ret != n)
462 return -EIO; 466 return -EIO;
463 467
464 nbytes &= AES_BLOCK_SIZE - 1; 468 nbytes &= AES_BLOCK_SIZE - 1;
465 ret = blkcipher_walk_done(desc, walk, nbytes); 469 ret = blkcipher_walk_done(desc, walk, nbytes);
466 } while ((nbytes = walk->nbytes)); 470 } while ((nbytes = walk->nbytes));
467 memcpy(walk->iv, param, AES_BLOCK_SIZE); 471 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
468 472
469out: 473out:
470 return ret; 474 return ret;
@@ -481,7 +485,7 @@ static int cbc_aes_encrypt(struct blkcipher_desc *desc,
481 return fallback_blk_enc(desc, dst, src, nbytes); 485 return fallback_blk_enc(desc, dst, src, nbytes);
482 486
483 blkcipher_walk_init(&walk, dst, src, nbytes); 487 blkcipher_walk_init(&walk, dst, src, nbytes);
484 return cbc_aes_crypt(desc, sctx->enc, sctx->iv, &walk); 488 return cbc_aes_crypt(desc, sctx->enc, &walk);
485} 489}
486 490
487static int cbc_aes_decrypt(struct blkcipher_desc *desc, 491static int cbc_aes_decrypt(struct blkcipher_desc *desc,
@@ -495,7 +499,7 @@ static int cbc_aes_decrypt(struct blkcipher_desc *desc,
495 return fallback_blk_dec(desc, dst, src, nbytes); 499 return fallback_blk_dec(desc, dst, src, nbytes);
496 500
497 blkcipher_walk_init(&walk, dst, src, nbytes); 501 blkcipher_walk_init(&walk, dst, src, nbytes);
498 return cbc_aes_crypt(desc, sctx->dec, sctx->iv, &walk); 502 return cbc_aes_crypt(desc, sctx->dec, &walk);
499} 503}
500 504
501static struct crypto_alg cbc_aes_alg = { 505static struct crypto_alg cbc_aes_alg = {
@@ -586,7 +590,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
586 xts_ctx->enc = KM_XTS_128_ENCRYPT; 590 xts_ctx->enc = KM_XTS_128_ENCRYPT;
587 xts_ctx->dec = KM_XTS_128_DECRYPT; 591 xts_ctx->dec = KM_XTS_128_DECRYPT;
588 memcpy(xts_ctx->key + 16, in_key, 16); 592 memcpy(xts_ctx->key + 16, in_key, 16);
589 memcpy(xts_ctx->pcc.key + 16, in_key + 16, 16); 593 memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
590 break; 594 break;
591 case 48: 595 case 48:
592 xts_ctx->enc = 0; 596 xts_ctx->enc = 0;
@@ -597,7 +601,7 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
597 xts_ctx->enc = KM_XTS_256_ENCRYPT; 601 xts_ctx->enc = KM_XTS_256_ENCRYPT;
598 xts_ctx->dec = KM_XTS_256_DECRYPT; 602 xts_ctx->dec = KM_XTS_256_DECRYPT;
599 memcpy(xts_ctx->key, in_key, 32); 603 memcpy(xts_ctx->key, in_key, 32);
600 memcpy(xts_ctx->pcc.key, in_key + 32, 32); 604 memcpy(xts_ctx->pcc_key, in_key + 32, 32);
601 break; 605 break;
602 default: 606 default:
603 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 607 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
@@ -616,29 +620,33 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
616 unsigned int nbytes = walk->nbytes; 620 unsigned int nbytes = walk->nbytes;
617 unsigned int n; 621 unsigned int n;
618 u8 *in, *out; 622 u8 *in, *out;
619 void *param; 623 struct pcc_param pcc_param;
624 struct {
625 u8 key[32];
626 u8 init[16];
627 } xts_param;
620 628
621 if (!nbytes) 629 if (!nbytes)
622 goto out; 630 goto out;
623 631
624 memset(xts_ctx->pcc.block, 0, sizeof(xts_ctx->pcc.block)); 632 memset(pcc_param.block, 0, sizeof(pcc_param.block));
625 memset(xts_ctx->pcc.bit, 0, sizeof(xts_ctx->pcc.bit)); 633 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
626 memset(xts_ctx->pcc.xts, 0, sizeof(xts_ctx->pcc.xts)); 634 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
627 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); 635 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
628 param = xts_ctx->pcc.key + offset; 636 memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
629 ret = crypt_s390_pcc(func, param); 637 ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
630 if (ret < 0) 638 if (ret < 0)
631 return -EIO; 639 return -EIO;
632 640
633 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); 641 memcpy(xts_param.key, xts_ctx->key, 32);
634 param = xts_ctx->key + offset; 642 memcpy(xts_param.init, pcc_param.xts, 16);
635 do { 643 do {
636 /* only use complete blocks */ 644 /* only use complete blocks */
637 n = nbytes & ~(AES_BLOCK_SIZE - 1); 645 n = nbytes & ~(AES_BLOCK_SIZE - 1);
638 out = walk->dst.virt.addr; 646 out = walk->dst.virt.addr;
639 in = walk->src.virt.addr; 647 in = walk->src.virt.addr;
640 648
641 ret = crypt_s390_km(func, param, out, in, n); 649 ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
642 if (ret < 0 || ret != n) 650 if (ret < 0 || ret != n)
643 return -EIO; 651 return -EIO;
644 652
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 316c8503a3b4..114258eeaacd 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -48,33 +48,21 @@ static inline void clear_page(void *page)
48 : "memory", "cc"); 48 : "memory", "cc");
49} 49}
50 50
51/*
52 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
53 * bypass caches when copying a page. Especially when copying huge pages
54 * this keeps L1 and L2 data caches alive.
55 */
51static inline void copy_page(void *to, void *from) 56static inline void copy_page(void *to, void *from)
52{ 57{
53 if (MACHINE_HAS_MVPG) { 58 register void *reg2 asm ("2") = to;
54 register unsigned long reg0 asm ("0") = 0; 59 register unsigned long reg3 asm ("3") = 0x1000;
55 asm volatile( 60 register void *reg4 asm ("4") = from;
56 " mvpg %0,%1" 61 register unsigned long reg5 asm ("5") = 0xb0001000;
57 : : "a" (to), "a" (from), "d" (reg0) 62 asm volatile(
58 : "memory", "cc"); 63 " mvcl 2,4"
59 } else 64 : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
60 asm volatile( 65 : : "memory", "cc");
61 " mvc 0(256,%0),0(%1)\n"
62 " mvc 256(256,%0),256(%1)\n"
63 " mvc 512(256,%0),512(%1)\n"
64 " mvc 768(256,%0),768(%1)\n"
65 " mvc 1024(256,%0),1024(%1)\n"
66 " mvc 1280(256,%0),1280(%1)\n"
67 " mvc 1536(256,%0),1536(%1)\n"
68 " mvc 1792(256,%0),1792(%1)\n"
69 " mvc 2048(256,%0),2048(%1)\n"
70 " mvc 2304(256,%0),2304(%1)\n"
71 " mvc 2560(256,%0),2560(%1)\n"
72 " mvc 2816(256,%0),2816(%1)\n"
73 " mvc 3072(256,%0),3072(%1)\n"
74 " mvc 3328(256,%0),3328(%1)\n"
75 " mvc 3584(256,%0),3584(%1)\n"
76 " mvc 3840(256,%0),3840(%1)\n"
77 : : "a" (to), "a" (from) : "memory");
78} 66}
79 67
80#define clear_user_page(page, vaddr, pg) clear_page(page) 68#define clear_user_page(page, vaddr, pg) clear_page(page)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 30ef748bc161..2f390956c7c1 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <asm/chpid.h> 10#include <asm/chpid.h>
11#include <asm/cpu.h>
11 12
12#define SCLP_CHP_INFO_MASK_SIZE 32 13#define SCLP_CHP_INFO_MASK_SIZE 32
13 14
@@ -37,7 +38,7 @@ struct sclp_cpu_info {
37 unsigned int standby; 38 unsigned int standby;
38 unsigned int combined; 39 unsigned int combined;
39 int has_cpu_type; 40 int has_cpu_type;
40 struct sclp_cpu_entry cpu[255]; 41 struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
41}; 42};
42 43
43int sclp_get_cpu_info(struct sclp_cpu_info *info); 44int sclp_get_cpu_info(struct sclp_cpu_info *info);
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a73eb2e1e918..bc9746a7d47c 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -26,8 +26,9 @@ struct vdso_data {
26 __u64 wtom_clock_nsec; /* 0x28 */ 26 __u64 wtom_clock_nsec; /* 0x28 */
27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 28 __u32 tz_dsttime; /* Type of dst correction 0x34 */
29 __u32 ectg_available; 29 __u32 ectg_available; /* ECTG instruction present 0x38 */
30 __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ 30 __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */
31 __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */
31}; 32};
32 33
33struct vdso_per_cpu_data { 34struct vdso_per_cpu_data {
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 2416138ebd3e..e4c99a183651 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -65,12 +65,14 @@ int main(void)
65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
68 DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); 68 DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
69 DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
69 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 70 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
70 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 71 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
71 /* constants used by the vdso */ 72 /* constants used by the vdso */
72 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); 73 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
73 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); 74 DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
75 DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
74 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 76 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
75 BLANK(); 77 BLANK();
76 /* idle data offsets */ 78 /* idle data offsets */
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 6e2442978409..95e7ba0fbb7e 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -194,7 +194,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
194 return -EINVAL; 194 return -EINVAL;
195 195
196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
197 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 197 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | 198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | 199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE); 200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
index 4a460c44e17e..813ec7260878 100644
--- a/arch/s390/kernel/pgm_check.S
+++ b/arch/s390/kernel/pgm_check.S
@@ -78,7 +78,7 @@ PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */ 78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */ 79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */ 80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_DEFAULT /* 38 */ 81PGM_CHECK_64BIT(do_dat_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */ 84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index fb535874a246..d8fd508ccd1e 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -94,7 +94,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
94 return -EINVAL; 94 return -EINVAL;
95 95
96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
97 regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 97 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); 98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
99 /* Check for invalid user address space control. */ 99 /* Check for invalid user address space control. */
100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME) 100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 064c3082ab33..dd95f1631621 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -108,20 +108,10 @@ static void fixup_clock_comparator(unsigned long long delta)
108 set_clock_comparator(S390_lowcore.clock_comparator); 108 set_clock_comparator(S390_lowcore.clock_comparator);
109} 109}
110 110
111static int s390_next_ktime(ktime_t expires, 111static int s390_next_event(unsigned long delta,
112 struct clock_event_device *evt) 112 struct clock_event_device *evt)
113{ 113{
114 struct timespec ts; 114 S390_lowcore.clock_comparator = get_tod_clock() + delta;
115 u64 nsecs;
116
117 ts.tv_sec = ts.tv_nsec = 0;
118 monotonic_to_bootbased(&ts);
119 nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires));
120 do_div(nsecs, 125);
121 S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9);
122 /* Program the maximum value if we have an overflow (== year 2042) */
123 if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc))
124 S390_lowcore.clock_comparator = -1ULL;
125 set_clock_comparator(S390_lowcore.clock_comparator); 115 set_clock_comparator(S390_lowcore.clock_comparator);
126 return 0; 116 return 0;
127} 117}
@@ -146,15 +136,14 @@ void init_cpu_timer(void)
146 cpu = smp_processor_id(); 136 cpu = smp_processor_id();
147 cd = &per_cpu(comparators, cpu); 137 cd = &per_cpu(comparators, cpu);
148 cd->name = "comparator"; 138 cd->name = "comparator";
149 cd->features = CLOCK_EVT_FEAT_ONESHOT | 139 cd->features = CLOCK_EVT_FEAT_ONESHOT;
150 CLOCK_EVT_FEAT_KTIME;
151 cd->mult = 16777; 140 cd->mult = 16777;
152 cd->shift = 12; 141 cd->shift = 12;
153 cd->min_delta_ns = 1; 142 cd->min_delta_ns = 1;
154 cd->max_delta_ns = LONG_MAX; 143 cd->max_delta_ns = LONG_MAX;
155 cd->rating = 400; 144 cd->rating = 400;
156 cd->cpumask = cpumask_of(cpu); 145 cd->cpumask = cpumask_of(cpu);
157 cd->set_next_ktime = s390_next_ktime; 146 cd->set_next_event = s390_next_event;
158 cd->set_mode = s390_set_mode; 147 cd->set_mode = s390_set_mode;
159 148
160 clockevents_register_device(cd); 149 clockevents_register_device(cd);
@@ -221,21 +210,30 @@ struct clocksource * __init clocksource_default_clock(void)
221 return &clocksource_tod; 210 return &clocksource_tod;
222} 211}
223 212
224void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 213void update_vsyscall(struct timekeeper *tk)
225 struct clocksource *clock, u32 mult)
226{ 214{
227 if (clock != &clocksource_tod) 215 u64 nsecps;
216
217 if (tk->clock != &clocksource_tod)
228 return; 218 return;
229 219
230 /* Make userspace gettimeofday spin until we're done. */ 220 /* Make userspace gettimeofday spin until we're done. */
231 ++vdso_data->tb_update_count; 221 ++vdso_data->tb_update_count;
232 smp_wmb(); 222 smp_wmb();
233 vdso_data->xtime_tod_stamp = clock->cycle_last; 223 vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
234 vdso_data->xtime_clock_sec = wall_time->tv_sec; 224 vdso_data->xtime_clock_sec = tk->xtime_sec;
235 vdso_data->xtime_clock_nsec = wall_time->tv_nsec; 225 vdso_data->xtime_clock_nsec = tk->xtime_nsec;
236 vdso_data->wtom_clock_sec = wtm->tv_sec; 226 vdso_data->wtom_clock_sec =
237 vdso_data->wtom_clock_nsec = wtm->tv_nsec; 227 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
238 vdso_data->ntp_mult = mult; 228 vdso_data->wtom_clock_nsec = tk->xtime_nsec +
229 + (tk->wall_to_monotonic.tv_nsec << tk->shift);
230 nsecps = (u64) NSEC_PER_SEC << tk->shift;
231 while (vdso_data->wtom_clock_nsec >= nsecps) {
232 vdso_data->wtom_clock_nsec -= nsecps;
233 vdso_data->wtom_clock_sec++;
234 }
235 vdso_data->tk_mult = tk->mult;
236 vdso_data->tk_shift = tk->shift;
239 smp_wmb(); 237 smp_wmb();
240 ++vdso_data->tb_update_count; 238 ++vdso_data->tb_update_count;
241} 239}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a84476f2a9bb..613649096783 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
125 psal[i] = 0x80000000; 125 psal[i] = 0x80000000;
126 126
127 lowcore->paste[4] = (u32)(addr_t) psal; 127 lowcore->paste[4] = (u32)(addr_t) psal;
128 psal[0] = 0x20000000; 128 psal[0] = 0x02000000;
129 psal[2] = (u32)(addr_t) aste; 129 psal[2] = (u32)(addr_t) aste;
130 *(unsigned long *) (aste + 2) = segment_table + 130 *(unsigned long *) (aste + 2) = segment_table +
131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; 131 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index b2224e0b974c..65fc3979c2f1 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -38,25 +38,21 @@ __kernel_clock_gettime:
38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
39 brc 3,2f 39 brc 3,2f
40 ahi %r0,-1 40 ahi %r0,-1
412: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 412: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
42 lr %r2,%r0 42 lr %r2,%r0
43 l %r0,__VDSO_NTP_MULT(%r5) 43 l %r0,__VDSO_TK_MULT(%r5)
44 ltr %r1,%r1 44 ltr %r1,%r1
45 mr %r0,%r0 45 mr %r0,%r0
46 jnm 3f 46 jnm 3f
47 a %r0,__VDSO_NTP_MULT(%r5) 47 a %r0,__VDSO_TK_MULT(%r5)
483: alr %r0,%r2 483: alr %r0,%r2
49 srdl %r0,12 49 al %r0,__VDSO_WTOM_NSEC(%r5)
50 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
51 al %r1,__VDSO_XTIME_NSEC+4(%r5)
52 brc 12,4f
53 ahi %r0,1
544: l %r2,__VDSO_XTIME_SEC+4(%r5)
55 al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
56 al %r1,__VDSO_WTOM_NSEC+4(%r5) 50 al %r1,__VDSO_WTOM_NSEC+4(%r5)
57 brc 12,5f 51 brc 12,5f
58 ahi %r0,1 52 ahi %r0,1
595: al %r2,__VDSO_WTOM_SEC+4(%r5) 535: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r2) /* >> tk->shift */
55 l %r2,__VDSO_WTOM_SEC+4(%r5)
60 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 56 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
61 jne 1b 57 jne 1b
62 basr %r5,0 58 basr %r5,0
@@ -86,20 +82,21 @@ __kernel_clock_gettime:
86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 82 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
87 brc 3,12f 83 brc 3,12f
88 ahi %r0,-1 84 ahi %r0,-1
8912: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 8512: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 lr %r2,%r0 86 lr %r2,%r0
91 l %r0,__VDSO_NTP_MULT(%r5) 87 l %r0,__VDSO_TK_MULT(%r5)
92 ltr %r1,%r1 88 ltr %r1,%r1
93 mr %r0,%r0 89 mr %r0,%r0
94 jnm 13f 90 jnm 13f
95 a %r0,__VDSO_NTP_MULT(%r5) 91 a %r0,__VDSO_TK_MULT(%r5)
9613: alr %r0,%r2 9213: alr %r0,%r2
97 srdl %r0,12 93 al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
98 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
99 al %r1,__VDSO_XTIME_NSEC+4(%r5) 94 al %r1,__VDSO_XTIME_NSEC+4(%r5)
100 brc 12,14f 95 brc 12,14f
101 ahi %r0,1 96 ahi %r0,1
10214: l %r2,__VDSO_XTIME_SEC+4(%r5) 9714: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
98 srdl %r0,0(%r2) /* >> tk->shift */
99 l %r2,__VDSO_XTIME_SEC+4(%r5)
103 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 100 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
104 jne 11b 101 jne 11b
105 basr %r5,0 102 basr %r5,0
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 2d3633175e3b..fd621a950f7c 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -35,15 +35,14 @@ __kernel_gettimeofday:
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 36 brc 3,3f
37 ahi %r0,-1 37 ahi %r0,-1
383: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 383: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
39 st %r0,24(%r15) 39 st %r0,24(%r15)
40 l %r0,__VDSO_NTP_MULT(%r5) 40 l %r0,__VDSO_TK_MULT(%r5)
41 ltr %r1,%r1 41 ltr %r1,%r1
42 mr %r0,%r0 42 mr %r0,%r0
43 jnm 4f 43 jnm 4f
44 a %r0,__VDSO_NTP_MULT(%r5) 44 a %r0,__VDSO_TK_MULT(%r5)
454: al %r0,24(%r15) 454: al %r0,24(%r15)
46 srdl %r0,12
47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 46 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
48 al %r1,__VDSO_XTIME_NSEC+4(%r5) 47 al %r1,__VDSO_XTIME_NSEC+4(%r5)
49 brc 12,5f 48 brc 12,5f
@@ -51,6 +50,8 @@ __kernel_gettimeofday:
515: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) 505: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5)
52 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 51 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
53 jne 1b 52 jne 1b
53 l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r4) /* >> tk->shift */
54 l %r4,24(%r15) /* get tv_sec from stack */ 55 l %r4,24(%r15) /* get tv_sec from stack */
55 basr %r5,0 56 basr %r5,0
566: ltr %r0,%r0 576: ltr %r0,%r0
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 176e1f75f9aa..34deba7c7ed1 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -23,7 +23,9 @@ __kernel_clock_getres:
23 je 0f 23 je 0f
24 cghi %r2,__CLOCK_MONOTONIC 24 cghi %r2,__CLOCK_MONOTONIC
25 je 0f 25 je 0f
26 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 26 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
27 je 0f
28 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
27 jne 2f 29 jne 2f
28 larl %r5,_vdso_data 30 larl %r5,_vdso_data
29 icm %r0,15,__LC_ECTG_OK(%r5) 31 icm %r0,15,__LC_ECTG_OK(%r5)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index d46c95ed5f19..91940ed33a4a 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,7 +22,9 @@ __kernel_clock_gettime:
22 larl %r5,_vdso_data 22 larl %r5,_vdso_data
23 cghi %r2,__CLOCK_REALTIME 23 cghi %r2,__CLOCK_REALTIME
24 je 4f 24 je 4f
25 cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 25 cghi %r2,__CLOCK_THREAD_CPUTIME_ID
26 je 9f
27 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
26 je 9f 28 je 9f
27 cghi %r2,__CLOCK_MONOTONIC 29 cghi %r2,__CLOCK_MONOTONIC
28 jne 12f 30 jne 12f
@@ -34,14 +36,13 @@ __kernel_clock_gettime:
34 tmll %r4,0x0001 /* pending update ? loop */ 36 tmll %r4,0x0001 /* pending update ? loop */
35 jnz 0b 37 jnz 0b
36 stck 48(%r15) /* Store TOD clock */ 38 stck 48(%r15) /* Store TOD clock */
39 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 lg %r0,__VDSO_WTOM_SEC(%r5)
37 lg %r1,48(%r15) 41 lg %r1,48(%r15)
38 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 42 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
39 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 43 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
40 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 44 alg %r1,__VDSO_WTOM_NSEC(%r5)
41 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 45 srlg %r1,%r1,0(%r2) /* >> tk->shift */
42 lg %r0,__VDSO_XTIME_SEC(%r5)
43 alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */
44 alg %r0,__VDSO_WTOM_SEC(%r5)
45 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 46 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
46 jne 0b 47 jne 0b
47 larl %r5,13f 48 larl %r5,13f
@@ -62,12 +63,13 @@ __kernel_clock_gettime:
62 tmll %r4,0x0001 /* pending update ? loop */ 63 tmll %r4,0x0001 /* pending update ? loop */
63 jnz 5b 64 jnz 5b
64 stck 48(%r15) /* Store TOD clock */ 65 stck 48(%r15) /* Store TOD clock */
66 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
65 lg %r1,48(%r15) 67 lg %r1,48(%r15)
66 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 68 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
67 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 69 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
68 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 70 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
69 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 71 srlg %r1,%r1,0(%r2) /* >> tk->shift */
70 lg %r0,__VDSO_XTIME_SEC(%r5) 72 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
71 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 73 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
72 jne 5b 74 jne 5b
73 larl %r5,13f 75 larl %r5,13f
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 36ee674722ec..d0860d1d0ccc 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -31,12 +31,13 @@ __kernel_gettimeofday:
31 stck 48(%r15) /* Store TOD clock */ 31 stck 48(%r15) /* Store TOD clock */
32 lg %r1,48(%r15) 32 lg %r1,48(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 36 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
37 lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */
38 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 37 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
39 jne 0b 38 jne 0b
39 lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
40 srlg %r1,%r1,0(%r5) /* >> tk->shift */
40 larl %r5,5f 41 larl %r5,5f
412: clg %r1,0(%r5) 422: clg %r1,0(%r5)
42 jl 3f 43 jl 3f
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 97e03caf7825..dbdab3e7a1a6 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t count, void __user *to,
78 * contains the (negative) exception code. 78 * contains the (negative) exception code.
79 */ 79 */
80#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
81
81static unsigned long follow_table(struct mm_struct *mm, 82static unsigned long follow_table(struct mm_struct *mm,
82 unsigned long address, int write) 83 unsigned long address, int write)
83{ 84{
84 unsigned long *table = (unsigned long *)__pa(mm->pgd); 85 unsigned long *table = (unsigned long *)__pa(mm->pgd);
85 86
87 if (unlikely(address > mm->context.asce_limit - 1))
88 return -0x38UL;
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1: 90 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff); 91 table = table + ((address >> 53) & 0x7ff);