aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/Kbuild1
-rw-r--r--arch/s390/Kconfig70
-rw-r--r--arch/s390/Makefile1
-rw-r--r--arch/s390/crypto/aes_s390.c18
-rw-r--r--arch/s390/crypto/des_s390.c12
-rw-r--r--arch/s390/crypto/ghash_s390.c21
-rw-r--r--arch/s390/crypto/sha_common.c9
-rw-r--r--arch/s390/include/asm/bitops.h81
-rw-r--r--arch/s390/include/asm/ccwdev.h6
-rw-r--r--arch/s390/include/asm/ccwgroup.h3
-rw-r--r--arch/s390/include/asm/clp.h28
-rw-r--r--arch/s390/include/asm/dma-mapping.h76
-rw-r--r--arch/s390/include/asm/dma.h19
-rw-r--r--arch/s390/include/asm/hw_irq.h22
-rw-r--r--arch/s390/include/asm/io.h55
-rw-r--r--arch/s390/include/asm/irq.h12
-rw-r--r--arch/s390/include/asm/isc.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pci.h156
-rw-r--r--arch/s390/include/asm/pci_clp.h182
-rw-r--r--arch/s390/include/asm/pci_dma.h196
-rw-r--r--arch/s390/include/asm/pci_insn.h280
-rw-r--r--arch/s390/include/asm/pci_io.h194
-rw-r--r--arch/s390/include/asm/pgtable.h11
-rw-r--r--arch/s390/include/asm/sclp.h2
-rw-r--r--arch/s390/include/asm/topology.h34
-rw-r--r--arch/s390/include/asm/vga.h6
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/dis.c578
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/entry.h21
-rw-r--r--arch/s390/kernel/entry64.S36
-rw-r--r--arch/s390/kernel/head.S74
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/pgm_check.S152
-rw-r--r--arch/s390/kernel/setup.c39
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/topology.c113
-rw-r--r--arch/s390/kernel/traps.c52
-rw-r--r--arch/s390/mm/Makefile12
-rw-r--r--arch/s390/mm/dump_pagetables.c7
-rw-r--r--arch/s390/mm/fault.c31
-rw-r--r--arch/s390/mm/init.c29
-rw-r--r--arch/s390/mm/pageattr.c82
-rw-r--r--arch/s390/mm/pgtable.c16
-rw-r--r--arch/s390/mm/vmem.c46
-rw-r--r--arch/s390/net/bpf_jit_comp.c28
-rw-r--r--arch/s390/pci/Makefile6
-rw-r--r--arch/s390/pci/pci.c1103
-rw-r--r--arch/s390/pci/pci_clp.c324
-rw-r--r--arch/s390/pci/pci_dma.c506
-rw-r--r--arch/s390/pci/pci_event.c93
-rw-r--r--arch/s390/pci/pci_msi.c141
-rw-r--r--arch/s390/pci/pci_sysfs.c86
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/pci/hotplug/Kconfig11
-rw-r--r--drivers/pci/hotplug/Makefile1
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c252
-rw-r--r--drivers/pci/msi.c6
-rw-r--r--drivers/s390/block/dasd.c97
-rw-r--r--drivers/s390/block/dasd_devmap.c34
-rw-r--r--drivers/s390/block/dasd_eckd.c92
-rw-r--r--drivers/s390/block/dasd_fba.c23
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c11
-rw-r--r--drivers/s390/char/sclp.h3
-rw-r--r--drivers/s390/char/sclp_cmd.c81
-rw-r--r--drivers/s390/cio/ccwgroup.c26
-rw-r--r--drivers/s390/cio/chsc.c156
-rw-r--r--drivers/s390/cio/device.c11
-rw-r--r--drivers/s390/cio/device.h2
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c10
-rw-r--r--drivers/s390/cio/qdio_main.c52
-rw-r--r--drivers/s390/cio/qdio_setup.c9
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c68
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h2
-rw-r--r--include/asm-generic/io.h21
-rw-r--r--include/linux/irq.h10
80 files changed, 5313 insertions, 774 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index cc45d25487b0..647c3eccc3d0 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -6,3 +6,4 @@ obj-$(CONFIG_S390_HYPFS_FS) += hypfs/
6obj-$(CONFIG_APPLDATA_BASE) += appldata/ 6obj-$(CONFIG_APPLDATA_BASE) += appldata/
7obj-$(CONFIG_MATHEMU) += math-emu/ 7obj-$(CONFIG_MATHEMU) += math-emu/
8obj-y += net/ 8obj-y += net/
9obj-$(CONFIG_PCI) += pci/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3cbb8757704e..32425af9d68d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -34,12 +34,6 @@ config GENERIC_BUG
34config GENERIC_BUG_RELATIVE_POINTERS 34config GENERIC_BUG_RELATIVE_POINTERS
35 def_bool y 35 def_bool y
36 36
37config NO_IOMEM
38 def_bool y
39
40config NO_DMA
41 def_bool y
42
43config ARCH_DMA_ADDR_T_64BIT 37config ARCH_DMA_ADDR_T_64BIT
44 def_bool 64BIT 38 def_bool 64BIT
45 39
@@ -58,6 +52,12 @@ config KEXEC
58config AUDIT_ARCH 52config AUDIT_ARCH
59 def_bool y 53 def_bool y
60 54
55config NO_IOPORT
56 def_bool y
57
58config PCI_QUIRKS
59 def_bool n
60
61config S390 61config S390
62 def_bool y 62 def_bool y
63 select USE_GENERIC_SMP_HELPERS if SMP 63 select USE_GENERIC_SMP_HELPERS if SMP
@@ -171,6 +171,10 @@ config HAVE_MARCH_Z196_FEATURES
171 def_bool n 171 def_bool n
172 select HAVE_MARCH_Z10_FEATURES 172 select HAVE_MARCH_Z10_FEATURES
173 173
174config HAVE_MARCH_ZEC12_FEATURES
175 def_bool n
176 select HAVE_MARCH_Z196_FEATURES
177
174choice 178choice
175 prompt "Processor type" 179 prompt "Processor type"
176 default MARCH_G5 180 default MARCH_G5
@@ -222,6 +226,13 @@ config MARCH_Z196
222 (2818 and 2817 series). The kernel will be slightly faster but will 226 (2818 and 2817 series). The kernel will be slightly faster but will
223 not work on older machines. 227 not work on older machines.
224 228
229config MARCH_ZEC12
230 bool "IBM zEC12"
231 select HAVE_MARCH_ZEC12_FEATURES if 64BIT
232 help
233 Select this to enable optimizations for IBM zEC12 (2827 series). The
234 kernel will be slightly faster but will not work on older machines.
235
225endchoice 236endchoice
226 237
227config 64BIT 238config 64BIT
@@ -426,6 +437,53 @@ config QDIO
426 437
427 If unsure, say Y. 438 If unsure, say Y.
428 439
440menuconfig PCI
441 bool "PCI support"
442 default n
443 depends on 64BIT
444 select ARCH_SUPPORTS_MSI
445 select PCI_MSI
446 help
447 Enable PCI support.
448
449if PCI
450
451config PCI_NR_FUNCTIONS
452 int "Maximum number of PCI functions (1-4096)"
453 range 1 4096
454 default "64"
455 help
456 This allows you to specify the maximum number of PCI functions which
457 this kernel will support.
458
459source "drivers/pci/Kconfig"
460source "drivers/pci/pcie/Kconfig"
461source "drivers/pci/hotplug/Kconfig"
462
463endif # PCI
464
465config PCI_DOMAINS
466 def_bool PCI
467
468config HAS_IOMEM
469 def_bool PCI
470
471config IOMMU_HELPER
472 def_bool PCI
473
474config HAS_DMA
475 def_bool PCI
476 select HAVE_DMA_API_DEBUG
477
478config NEED_SG_DMA_LENGTH
479 def_bool PCI
480
481config HAVE_DMA_ATTRS
482 def_bool PCI
483
484config NEED_DMA_MAP_STATE
485 def_bool PCI
486
429config CHSC_SCH 487config CHSC_SCH
430 def_tristate m 488 def_tristate m
431 prompt "Support for CHSC subchannels" 489 prompt "Support for CHSC subchannels"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 49e76e8b477d..4b8e08b56f49 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -41,6 +41,7 @@ cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
41cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109) 41cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
42cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10) 42cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
43cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196) 43cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
44cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
44 45
45#KBUILD_IMAGE is necessary for make rpm 46#KBUILD_IMAGE is necessary for make rpm
46KBUILD_IMAGE :=arch/s390/boot/image 47KBUILD_IMAGE :=arch/s390/boot/image
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index da3c1a7dcd8e..b4dbade8ca24 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -325,7 +325,8 @@ static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
325 u8 *in = walk->src.virt.addr; 325 u8 *in = walk->src.virt.addr;
326 326
327 ret = crypt_s390_km(func, param, out, in, n); 327 ret = crypt_s390_km(func, param, out, in, n);
328 BUG_ON((ret < 0) || (ret != n)); 328 if (ret < 0 || ret != n)
329 return -EIO;
329 330
330 nbytes &= AES_BLOCK_SIZE - 1; 331 nbytes &= AES_BLOCK_SIZE - 1;
331 ret = blkcipher_walk_done(desc, walk, nbytes); 332 ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -457,7 +458,8 @@ static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
457 u8 *in = walk->src.virt.addr; 458 u8 *in = walk->src.virt.addr;
458 459
459 ret = crypt_s390_kmc(func, param, out, in, n); 460 ret = crypt_s390_kmc(func, param, out, in, n);
460 BUG_ON((ret < 0) || (ret != n)); 461 if (ret < 0 || ret != n)
462 return -EIO;
461 463
462 nbytes &= AES_BLOCK_SIZE - 1; 464 nbytes &= AES_BLOCK_SIZE - 1;
463 ret = blkcipher_walk_done(desc, walk, nbytes); 465 ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -625,7 +627,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
625 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak)); 627 memcpy(xts_ctx->pcc.tweak, walk->iv, sizeof(xts_ctx->pcc.tweak));
626 param = xts_ctx->pcc.key + offset; 628 param = xts_ctx->pcc.key + offset;
627 ret = crypt_s390_pcc(func, param); 629 ret = crypt_s390_pcc(func, param);
628 BUG_ON(ret < 0); 630 if (ret < 0)
631 return -EIO;
629 632
630 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16); 633 memcpy(xts_ctx->xts_param, xts_ctx->pcc.xts, 16);
631 param = xts_ctx->key + offset; 634 param = xts_ctx->key + offset;
@@ -636,7 +639,8 @@ static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
636 in = walk->src.virt.addr; 639 in = walk->src.virt.addr;
637 640
638 ret = crypt_s390_km(func, param, out, in, n); 641 ret = crypt_s390_km(func, param, out, in, n);
639 BUG_ON(ret < 0 || ret != n); 642 if (ret < 0 || ret != n)
643 return -EIO;
640 644
641 nbytes &= AES_BLOCK_SIZE - 1; 645 nbytes &= AES_BLOCK_SIZE - 1;
642 ret = blkcipher_walk_done(desc, walk, nbytes); 646 ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -769,7 +773,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
769 crypto_inc(ctrblk + i, AES_BLOCK_SIZE); 773 crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
770 } 774 }
771 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); 775 ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
772 BUG_ON(ret < 0 || ret != n); 776 if (ret < 0 || ret != n)
777 return -EIO;
773 if (n > AES_BLOCK_SIZE) 778 if (n > AES_BLOCK_SIZE)
774 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, 779 memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
775 AES_BLOCK_SIZE); 780 AES_BLOCK_SIZE);
@@ -788,7 +793,8 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
788 in = walk->src.virt.addr; 793 in = walk->src.virt.addr;
789 ret = crypt_s390_kmctr(func, sctx->key, buf, in, 794 ret = crypt_s390_kmctr(func, sctx->key, buf, in,
790 AES_BLOCK_SIZE, ctrblk); 795 AES_BLOCK_SIZE, ctrblk);
791 BUG_ON(ret < 0 || ret != AES_BLOCK_SIZE); 796 if (ret < 0 || ret != AES_BLOCK_SIZE)
797 return -EIO;
792 memcpy(out, buf, nbytes); 798 memcpy(out, buf, nbytes);
793 crypto_inc(ctrblk, AES_BLOCK_SIZE); 799 crypto_inc(ctrblk, AES_BLOCK_SIZE);
794 ret = blkcipher_walk_done(desc, walk, 0); 800 ret = blkcipher_walk_done(desc, walk, 0);
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index b49fb96f4207..bcca01c9989d 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -94,7 +94,8 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
94 u8 *in = walk->src.virt.addr; 94 u8 *in = walk->src.virt.addr;
95 95
96 ret = crypt_s390_km(func, key, out, in, n); 96 ret = crypt_s390_km(func, key, out, in, n);
97 BUG_ON((ret < 0) || (ret != n)); 97 if (ret < 0 || ret != n)
98 return -EIO;
98 99
99 nbytes &= DES_BLOCK_SIZE - 1; 100 nbytes &= DES_BLOCK_SIZE - 1;
100 ret = blkcipher_walk_done(desc, walk, nbytes); 101 ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -120,7 +121,8 @@ static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
120 u8 *in = walk->src.virt.addr; 121 u8 *in = walk->src.virt.addr;
121 122
122 ret = crypt_s390_kmc(func, iv, out, in, n); 123 ret = crypt_s390_kmc(func, iv, out, in, n);
123 BUG_ON((ret < 0) || (ret != n)); 124 if (ret < 0 || ret != n)
125 return -EIO;
124 126
125 nbytes &= DES_BLOCK_SIZE - 1; 127 nbytes &= DES_BLOCK_SIZE - 1;
126 ret = blkcipher_walk_done(desc, walk, nbytes); 128 ret = blkcipher_walk_done(desc, walk, nbytes);
@@ -386,7 +388,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
386 crypto_inc(ctrblk + i, DES_BLOCK_SIZE); 388 crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
387 } 389 }
388 ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); 390 ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
389 BUG_ON((ret < 0) || (ret != n)); 391 if (ret < 0 || ret != n)
392 return -EIO;
390 if (n > DES_BLOCK_SIZE) 393 if (n > DES_BLOCK_SIZE)
391 memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, 394 memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
392 DES_BLOCK_SIZE); 395 DES_BLOCK_SIZE);
@@ -404,7 +407,8 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
404 in = walk->src.virt.addr; 407 in = walk->src.virt.addr;
405 ret = crypt_s390_kmctr(func, ctx->key, buf, in, 408 ret = crypt_s390_kmctr(func, ctx->key, buf, in,
406 DES_BLOCK_SIZE, ctrblk); 409 DES_BLOCK_SIZE, ctrblk);
407 BUG_ON(ret < 0 || ret != DES_BLOCK_SIZE); 410 if (ret < 0 || ret != DES_BLOCK_SIZE)
411 return -EIO;
408 memcpy(out, buf, nbytes); 412 memcpy(out, buf, nbytes);
409 crypto_inc(ctrblk, DES_BLOCK_SIZE); 413 crypto_inc(ctrblk, DES_BLOCK_SIZE);
410 ret = blkcipher_walk_done(desc, walk, 0); 414 ret = blkcipher_walk_done(desc, walk, 0);
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index 1ebd3a15cca4..d43485d142e9 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -72,14 +72,16 @@ static int ghash_update(struct shash_desc *desc,
72 if (!dctx->bytes) { 72 if (!dctx->bytes) {
73 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, 73 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
74 GHASH_BLOCK_SIZE); 74 GHASH_BLOCK_SIZE);
75 BUG_ON(ret != GHASH_BLOCK_SIZE); 75 if (ret != GHASH_BLOCK_SIZE)
76 return -EIO;
76 } 77 }
77 } 78 }
78 79
79 n = srclen & ~(GHASH_BLOCK_SIZE - 1); 80 n = srclen & ~(GHASH_BLOCK_SIZE - 1);
80 if (n) { 81 if (n) {
81 ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); 82 ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
82 BUG_ON(ret != n); 83 if (ret != n)
84 return -EIO;
83 src += n; 85 src += n;
84 srclen -= n; 86 srclen -= n;
85 } 87 }
@@ -92,7 +94,7 @@ static int ghash_update(struct shash_desc *desc,
92 return 0; 94 return 0;
93} 95}
94 96
95static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) 97static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
96{ 98{
97 u8 *buf = dctx->buffer; 99 u8 *buf = dctx->buffer;
98 int ret; 100 int ret;
@@ -103,21 +105,24 @@ static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
103 memset(pos, 0, dctx->bytes); 105 memset(pos, 0, dctx->bytes);
104 106
105 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); 107 ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
106 BUG_ON(ret != GHASH_BLOCK_SIZE); 108 if (ret != GHASH_BLOCK_SIZE)
109 return -EIO;
107 } 110 }
108 111
109 dctx->bytes = 0; 112 dctx->bytes = 0;
113 return 0;
110} 114}
111 115
112static int ghash_final(struct shash_desc *desc, u8 *dst) 116static int ghash_final(struct shash_desc *desc, u8 *dst)
113{ 117{
114 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); 118 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
115 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); 119 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
120 int ret;
116 121
117 ghash_flush(ctx, dctx); 122 ret = ghash_flush(ctx, dctx);
118 memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); 123 if (!ret)
119 124 memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
120 return 0; 125 return ret;
121} 126}
122 127
123static struct shash_alg ghash_alg = { 128static struct shash_alg ghash_alg = {
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index bd37d09b9d3c..8620b0ec9c42 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -36,7 +36,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
36 if (index) { 36 if (index) {
37 memcpy(ctx->buf + index, data, bsize - index); 37 memcpy(ctx->buf + index, data, bsize - index);
38 ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize); 38 ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
39 BUG_ON(ret != bsize); 39 if (ret != bsize)
40 return -EIO;
40 data += bsize - index; 41 data += bsize - index;
41 len -= bsize - index; 42 len -= bsize - index;
42 index = 0; 43 index = 0;
@@ -46,7 +47,8 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
46 if (len >= bsize) { 47 if (len >= bsize) {
47 ret = crypt_s390_kimd(ctx->func, ctx->state, data, 48 ret = crypt_s390_kimd(ctx->func, ctx->state, data,
48 len & ~(bsize - 1)); 49 len & ~(bsize - 1));
49 BUG_ON(ret != (len & ~(bsize - 1))); 50 if (ret != (len & ~(bsize - 1)))
51 return -EIO;
50 data += ret; 52 data += ret;
51 len -= ret; 53 len -= ret;
52 } 54 }
@@ -88,7 +90,8 @@ int s390_sha_final(struct shash_desc *desc, u8 *out)
88 memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); 90 memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
89 91
90 ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end); 92 ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
91 BUG_ON(ret != end); 93 if (ret != end)
94 return -EIO;
92 95
93 /* copy digest to out */ 96 /* copy digest to out */
94 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); 97 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 6f573890fb28..15422933c60b 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -640,6 +640,87 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
640} 640}
641#define find_first_bit find_first_bit 641#define find_first_bit find_first_bit
642 642
643/*
644 * Big endian variant whichs starts bit counting from left using
645 * the flogr (find leftmost one) instruction.
646 */
647static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
648{
649 register unsigned long bit asm("2") = val;
650 register unsigned long out asm("3");
651
652 asm volatile (
653 " .insn rre,0xb9830000,%[bit],%[bit]\n"
654 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
655 return nr + bit;
656}
657
658/*
659 * 64 bit special left bitops format:
660 * order in memory:
661 * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
662 * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
663 * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
664 * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
665 * after that follows the next long with bit numbers
666 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
667 * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
668 * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
669 * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
670 * The reason for this bit ordering is the fact that
671 * the hardware sets bits in a bitmap starting at bit 0
672 * and we don't want to scan the bitmap from the 'wrong
673 * end'.
674 */
675static inline unsigned long find_first_bit_left(const unsigned long *addr,
676 unsigned long size)
677{
678 unsigned long bytes, bits;
679
680 if (!size)
681 return 0;
682 bytes = __ffs_word_loop(addr, size);
683 bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
684 return (bits < size) ? bits : size;
685}
686
687static inline int find_next_bit_left(const unsigned long *addr,
688 unsigned long size,
689 unsigned long offset)
690{
691 const unsigned long *p;
692 unsigned long bit, set;
693
694 if (offset >= size)
695 return size;
696 bit = offset & (__BITOPS_WORDSIZE - 1);
697 offset -= bit;
698 size -= offset;
699 p = addr + offset / __BITOPS_WORDSIZE;
700 if (bit) {
701 set = __flo_word(0, *p & (~0UL << bit));
702 if (set >= size)
703 return size + offset;
704 if (set < __BITOPS_WORDSIZE)
705 return set + offset;
706 offset += __BITOPS_WORDSIZE;
707 size -= __BITOPS_WORDSIZE;
708 p++;
709 }
710 return offset + find_first_bit_left(p, size);
711}
712
713#define for_each_set_bit_left(bit, addr, size) \
714 for ((bit) = find_first_bit_left((addr), (size)); \
715 (bit) < (size); \
716 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
717
718/* same as for_each_set_bit() but use bit as value to start with */
719#define for_each_set_bit_left_cont(bit, addr, size) \
720 for ((bit) = find_next_bit_left((addr), (size), (bit)); \
721 (bit) < (size); \
722 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
723
643/** 724/**
644 * find_next_zero_bit - find the first zero bit in a memory region 725 * find_next_zero_bit - find the first zero bit in a memory region
645 * @addr: The address to base the search on 726 * @addr: The address to base the search on
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 1cb4bb3f32d9..6d1f3573f0df 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -18,6 +18,9 @@ struct irb;
18struct ccw1; 18struct ccw1;
19struct ccw_dev_id; 19struct ccw_dev_id;
20 20
21/* from asm/schid.h */
22struct subchannel_id;
23
21/* simplified initializers for struct ccw_device: 24/* simplified initializers for struct ccw_device:
22 * CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one 25 * CCW_DEVICE and CCW_DEVICE_DEVTYPE initialize one
23 * entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */ 26 * entry in your MODULE_DEVICE_TABLE and set the match_flag correctly */
@@ -223,8 +226,7 @@ extern int ccw_device_force_console(void);
223 226
224int ccw_device_siosl(struct ccw_device *); 227int ccw_device_siosl(struct ccw_device *);
225 228
226// FIXME: these have to go 229extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
227extern int _ccw_device_get_subchannel_number(struct ccw_device *);
228 230
229extern void *ccw_device_get_chp_desc(struct ccw_device *, int); 231extern void *ccw_device_get_chp_desc(struct ccw_device *, int);
230#endif /* _S390_CCWDEV_H_ */ 232#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 01a905eb11e0..23723ce5ca7a 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -59,6 +59,9 @@ extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
59int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv, 59int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
60 int num_devices, const char *buf); 60 int num_devices, const char *buf);
61 61
62extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
63extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
64
62extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev); 65extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
63extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev); 66extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
64 67
diff --git a/arch/s390/include/asm/clp.h b/arch/s390/include/asm/clp.h
new file mode 100644
index 000000000000..6c3aecc245ff
--- /dev/null
+++ b/arch/s390/include/asm/clp.h
@@ -0,0 +1,28 @@
1#ifndef _ASM_S390_CLP_H
2#define _ASM_S390_CLP_H
3
4/* CLP common request & response block size */
5#define CLP_BLK_SIZE (PAGE_SIZE * 2)
6
7struct clp_req_hdr {
8 u16 len;
9 u16 cmd;
10} __packed;
11
12struct clp_rsp_hdr {
13 u16 len;
14 u16 rsp;
15} __packed;
16
17/* CLP Response Codes */
18#define CLP_RC_OK 0x0010 /* Command request successfully */
19#define CLP_RC_CMD 0x0020 /* Command code not recognized */
20#define CLP_RC_PERM 0x0030 /* Command not authorized */
21#define CLP_RC_FMT 0x0040 /* Invalid command request format */
22#define CLP_RC_LEN 0x0050 /* Invalid command request length */
23#define CLP_RC_8K 0x0060 /* Command requires 8K LPCB */
24#define CLP_RC_RESNOT0 0x0070 /* Reserved field not zero */
25#define CLP_RC_NODATA 0x0080 /* No data available */
26#define CLP_RC_FC_UNKNOWN 0x0100 /* Function code not recognized */
27
28#endif
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..8a32f7dfd3af
--- /dev/null
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -0,0 +1,76 @@
1#ifndef _ASM_S390_DMA_MAPPING_H
2#define _ASM_S390_DMA_MAPPING_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/mm.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-attrs.h>
9#include <linux/dma-debug.h>
10#include <linux/io.h>
11
12#define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
13
14extern struct dma_map_ops s390_dma_ops;
15
16static inline struct dma_map_ops *get_dma_ops(struct device *dev)
17{
18 return &s390_dma_ops;
19}
20
21extern int dma_set_mask(struct device *dev, u64 mask);
22extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
23extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
24 enum dma_data_direction direction);
25
26#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
27#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
28
29#include <asm-generic/dma-mapping-common.h>
30
31static inline int dma_supported(struct device *dev, u64 mask)
32{
33 struct dma_map_ops *dma_ops = get_dma_ops(dev);
34
35 if (dma_ops->dma_supported == NULL)
36 return 1;
37 return dma_ops->dma_supported(dev, mask);
38}
39
40static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
41{
42 if (!dev->dma_mask)
43 return 0;
44 return addr + size - 1 <= *dev->dma_mask;
45}
46
47static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
48{
49 struct dma_map_ops *dma_ops = get_dma_ops(dev);
50
51 if (dma_ops->mapping_error)
52 return dma_ops->mapping_error(dev, dma_addr);
53 return (dma_addr == 0UL);
54}
55
56static inline void *dma_alloc_coherent(struct device *dev, size_t size,
57 dma_addr_t *dma_handle, gfp_t flag)
58{
59 struct dma_map_ops *ops = get_dma_ops(dev);
60 void *ret;
61
62 ret = ops->alloc(dev, size, dma_handle, flag, NULL);
63 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
64 return ret;
65}
66
67static inline void dma_free_coherent(struct device *dev, size_t size,
68 void *cpu_addr, dma_addr_t dma_handle)
69{
70 struct dma_map_ops *dma_ops = get_dma_ops(dev);
71
72 dma_ops->free(dev, size, cpu_addr, dma_handle, NULL);
73 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
74}
75
76#endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index 6fb6de4f15b0..de015d85e3e5 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -1,14 +1,13 @@
1/* 1#ifndef _ASM_S390_DMA_H
2 * S390 version 2#define _ASM_S390_DMA_H
3 */
4
5#ifndef _ASM_DMA_H
6#define _ASM_DMA_H
7 3
8#include <asm/io.h> /* need byte IO */ 4#include <asm/io.h>
9 5
6/*
7 * MAX_DMA_ADDRESS is ambiguous because on s390 its completely unrelated
8 * to DMA. It _is_ used for the s390 memory zone split at 2GB caused
9 * by the 31 bit heritage.
10 */
10#define MAX_DMA_ADDRESS 0x80000000 11#define MAX_DMA_ADDRESS 0x80000000
11 12
12#define free_dma(x) do { } while (0) 13#endif /* _ASM_S390_DMA_H */
13
14#endif /* _ASM_DMA_H */
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h
new file mode 100644
index 000000000000..7e3d2586c1ff
--- /dev/null
+++ b/arch/s390/include/asm/hw_irq.h
@@ -0,0 +1,22 @@
1#ifndef _HW_IRQ_H
2#define _HW_IRQ_H
3
4#include <linux/msi.h>
5#include <linux/pci.h>
6
7static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
8{
9 return __irq_get_msi_desc(irq);
10}
11
12/* Must be called with msi map lock held */
13static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
14{
15 if (!msi)
16 return -EINVAL;
17
18 msi->irq = irq;
19 return 0;
20}
21
22#endif
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 559e921a6bba..16c3eb164f4f 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -9,9 +9,9 @@
9#ifndef _S390_IO_H 9#ifndef _S390_IO_H
10#define _S390_IO_H 10#define _S390_IO_H
11 11
12#include <linux/kernel.h>
12#include <asm/page.h> 13#include <asm/page.h>
13 14#include <asm/pci_io.h>
14#define IO_SPACE_LIMIT 0xffffffff
15 15
16/* 16/*
17 * Change virtual addresses to physical addresses and vv. 17 * Change virtual addresses to physical addresses and vv.
@@ -24,10 +24,11 @@ static inline unsigned long virt_to_phys(volatile void * address)
24 " lra %0,0(%1)\n" 24 " lra %0,0(%1)\n"
25 " jz 0f\n" 25 " jz 0f\n"
26 " la %0,0\n" 26 " la %0,0\n"
27 "0:" 27 "0:"
28 : "=a" (real_address) : "a" (address) : "cc"); 28 : "=a" (real_address) : "a" (address) : "cc");
29 return real_address; 29 return real_address;
30} 30}
31#define virt_to_phys virt_to_phys
31 32
32static inline void * phys_to_virt(unsigned long address) 33static inline void * phys_to_virt(unsigned long address)
33{ 34{
@@ -42,4 +43,50 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
42 */ 43 */
43#define xlate_dev_kmem_ptr(p) p 44#define xlate_dev_kmem_ptr(p) p
44 45
46#define IO_SPACE_LIMIT 0
47
48#ifdef CONFIG_PCI
49
50#define ioremap_nocache(addr, size) ioremap(addr, size)
51#define ioremap_wc ioremap_nocache
52
53/* TODO: s390 cannot support io_remap_pfn_range... */
54#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
55 remap_pfn_range(vma, vaddr, pfn, size, prot)
56
57static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
58{
59 return (void __iomem *) offset;
60}
61
62static inline void iounmap(volatile void __iomem *addr)
63{
64}
65
66/*
67 * s390 needs a private implementation of pci_iomap since ioremap with its
68 * offset parameter isn't sufficient. That's because BAR spaces are not
69 * disjunctive on s390 so we need the bar parameter of pci_iomap to find
70 * the corresponding device and create the mapping cookie.
71 */
72#define pci_iomap pci_iomap
73#define pci_iounmap pci_iounmap
74
75#define memcpy_fromio(dst, src, count) zpci_memcpy_fromio(dst, src, count)
76#define memcpy_toio(dst, src, count) zpci_memcpy_toio(dst, src, count)
77#define memset_io(dst, val, count) zpci_memset_io(dst, val, count)
78
79#define __raw_readb zpci_read_u8
80#define __raw_readw zpci_read_u16
81#define __raw_readl zpci_read_u32
82#define __raw_readq zpci_read_u64
83#define __raw_writeb zpci_write_u8
84#define __raw_writew zpci_write_u16
85#define __raw_writel zpci_write_u32
86#define __raw_writeq zpci_write_u64
87
88#endif /* CONFIG_PCI */
89
90#include <asm-generic/io.h>
91
45#endif 92#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 6703dd986fd4..e6972f85d2b0 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -33,6 +33,8 @@ enum interruption_class {
33 IOINT_APB, 33 IOINT_APB,
34 IOINT_ADM, 34 IOINT_ADM,
35 IOINT_CSC, 35 IOINT_CSC,
36 IOINT_PCI,
37 IOINT_MSI,
36 NMI_NMI, 38 NMI_NMI,
37 NR_IRQS, 39 NR_IRQS,
38}; 40};
@@ -51,4 +53,14 @@ void service_subclass_irq_unregister(void);
51void measurement_alert_subclass_register(void); 53void measurement_alert_subclass_register(void);
52void measurement_alert_subclass_unregister(void); 54void measurement_alert_subclass_unregister(void);
53 55
56#ifdef CONFIG_LOCKDEP
57# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
58# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
59 disable_irq_nosync(irq)
60# define disable_irq_lockdep(irq) disable_irq(irq)
61# define enable_irq_lockdep(irq) enable_irq(irq)
62# define enable_irq_lockdep_irqrestore(irq, flags) \
63 enable_irq(irq)
64#endif
65
54#endif /* _ASM_IRQ_H */ 66#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/isc.h b/arch/s390/include/asm/isc.h
index 5ae606456b0a..68d7d68300f2 100644
--- a/arch/s390/include/asm/isc.h
+++ b/arch/s390/include/asm/isc.h
@@ -18,6 +18,7 @@
18#define CHSC_SCH_ISC 7 /* CHSC subchannels */ 18#define CHSC_SCH_ISC 7 /* CHSC subchannels */
19/* Adapter interrupts. */ 19/* Adapter interrupts. */
20#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */ 20#define QDIO_AIRQ_ISC IO_SCH_ISC /* I/O subchannel in qdio mode */
21#define PCI_ISC 2 /* PCI I/O subchannels */
21#define AP_ISC 6 /* adjunct processor (crypto) devices */ 22#define AP_ISC 6 /* adjunct processor (crypto) devices */
22 23
23/* Functions for registration of I/O interruption subclasses */ 24/* Functions for registration of I/O interruption subclasses */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 39faa4ac9660..a86ad4084073 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -30,6 +30,8 @@
30#include <asm/setup.h> 30#include <asm/setup.h>
31#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
32 32
33void storage_key_init_range(unsigned long start, unsigned long end);
34
33static unsigned long pfmf(unsigned long function, unsigned long address) 35static unsigned long pfmf(unsigned long function, unsigned long address)
34{ 36{
35 asm volatile( 37 asm volatile(
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 42a145c9ddd6..a6175ad0c42f 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -1,10 +1,158 @@
1#ifndef __ASM_S390_PCI_H 1#ifndef __ASM_S390_PCI_H
2#define __ASM_S390_PCI_H 2#define __ASM_S390_PCI_H
3 3
4/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code 4/* must be set before including asm-generic/pci.h */
5 * includes it even if CONFIG_PCI is not set.
6 */
7#define PCI_DMA_BUS_IS_PHYS (0) 5#define PCI_DMA_BUS_IS_PHYS (0)
6/* must be set before including pci_clp.h */
7#define PCI_BAR_COUNT 6
8 8
9#endif /* __ASM_S390_PCI_H */ 9#include <asm-generic/pci.h>
10#include <asm-generic/pci-dma-compat.h>
11#include <asm/pci_clp.h>
10 12
13#define PCIBIOS_MIN_IO 0x1000
14#define PCIBIOS_MIN_MEM 0x10000000
15
16#define pcibios_assign_all_busses() (0)
17
18void __iomem *pci_iomap(struct pci_dev *, int, unsigned long);
19void pci_iounmap(struct pci_dev *, void __iomem *);
20int pci_domain_nr(struct pci_bus *);
21int pci_proc_domain(struct pci_bus *);
22
23/* MSI arch hooks */
24#define arch_setup_msi_irqs arch_setup_msi_irqs
25#define arch_teardown_msi_irqs arch_teardown_msi_irqs
26
27#define ZPCI_BUS_NR 0 /* default bus number */
28#define ZPCI_DEVFN 0 /* default device number */
29
30/* PCI Function Controls */
31#define ZPCI_FC_FN_ENABLED 0x80
32#define ZPCI_FC_ERROR 0x40
33#define ZPCI_FC_BLOCKED 0x20
34#define ZPCI_FC_DMA_ENABLED 0x10
35
36struct msi_map {
37 unsigned long irq;
38 struct msi_desc *msi;
39 struct hlist_node msi_chain;
40};
41
42#define ZPCI_NR_MSI_VECS 64
43#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
44
45enum zpci_state {
46 ZPCI_FN_STATE_RESERVED,
47 ZPCI_FN_STATE_STANDBY,
48 ZPCI_FN_STATE_CONFIGURED,
49 ZPCI_FN_STATE_ONLINE,
50 NR_ZPCI_FN_STATES,
51};
52
53struct zpci_bar_struct {
54 u32 val; /* bar start & 3 flag bits */
55 u8 size; /* order 2 exponent */
56 u16 map_idx; /* index into bar mapping array */
57};
58
59/* Private data per function */
60struct zpci_dev {
61 struct pci_dev *pdev;
62 struct pci_bus *bus;
63 struct list_head entry; /* list of all zpci_devices, needed for hotplug, etc. */
64
65 enum zpci_state state;
66 u32 fid; /* function ID, used by sclp */
67 u32 fh; /* function handle, used by insn's */
68 u16 pchid; /* physical channel ID */
69 u8 pfgid; /* function group ID */
70 u16 domain;
71
72 /* IRQ stuff */
73 u64 msi_addr; /* MSI address */
74 struct zdev_irq_map *irq_map;
75 struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
76 unsigned int aisb; /* number of the summary bit */
77
78 /* DMA stuff */
79 unsigned long *dma_table;
80 spinlock_t dma_table_lock;
81 int tlb_refresh;
82
83 spinlock_t iommu_bitmap_lock;
84 unsigned long *iommu_bitmap;
85 unsigned long iommu_size;
86 unsigned long iommu_pages;
87 unsigned int next_bit;
88
89 struct zpci_bar_struct bars[PCI_BAR_COUNT];
90
91 u64 start_dma; /* Start of available DMA addresses */
92 u64 end_dma; /* End of available DMA addresses */
93 u64 dma_mask; /* DMA address space mask */
94
95 enum pci_bus_speed max_bus_speed;
96};
97
98struct pci_hp_callback_ops {
99 int (*create_slot) (struct zpci_dev *zdev);
100 void (*remove_slot) (struct zpci_dev *zdev);
101};
102
103static inline bool zdev_enabled(struct zpci_dev *zdev)
104{
105 return (zdev->fh & (1UL << 31)) ? true : false;
106}
107
108/* -----------------------------------------------------------------------------
109 Prototypes
110----------------------------------------------------------------------------- */
111/* Base stuff */
112struct zpci_dev *zpci_alloc_device(void);
113int zpci_create_device(struct zpci_dev *);
114int zpci_enable_device(struct zpci_dev *);
115void zpci_stop_device(struct zpci_dev *);
116void zpci_free_device(struct zpci_dev *);
117int zpci_scan_device(struct zpci_dev *);
118int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
119int zpci_unregister_ioat(struct zpci_dev *, u8);
120
121/* CLP */
122int clp_find_pci_devices(void);
123int clp_add_pci_device(u32, u32, int);
124int clp_enable_fh(struct zpci_dev *, u8);
125int clp_disable_fh(struct zpci_dev *);
126
127/* MSI */
128struct msi_desc *__irq_get_msi_desc(unsigned int);
129int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
130int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
131void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
132int zpci_msihash_init(void);
133void zpci_msihash_exit(void);
134
135/* Error handling and recovery */
136void zpci_event_error(void *);
137void zpci_event_availability(void *);
138
139/* Helpers */
140struct zpci_dev *get_zdev(struct pci_dev *);
141struct zpci_dev *get_zdev_by_fid(u32);
142bool zpci_fid_present(u32);
143
144/* sysfs */
145int zpci_sysfs_add_device(struct device *);
146void zpci_sysfs_remove_device(struct device *);
147
148/* DMA */
149int zpci_dma_init(void);
150void zpci_dma_exit(void);
151
152/* Hotplug */
153extern struct mutex zpci_list_lock;
154extern struct list_head zpci_list;
155extern struct pci_hp_callback_ops hotplug_ops;
156extern unsigned int pci_probe;
157
158#endif
diff --git a/arch/s390/include/asm/pci_clp.h b/arch/s390/include/asm/pci_clp.h
new file mode 100644
index 000000000000..d31d739f8689
--- /dev/null
+++ b/arch/s390/include/asm/pci_clp.h
@@ -0,0 +1,182 @@
1#ifndef _ASM_S390_PCI_CLP_H
2#define _ASM_S390_PCI_CLP_H
3
4#include <asm/clp.h>
5
6/*
7 * Call Logical Processor - Command Codes
8 */
9#define CLP_LIST_PCI 0x0002
10#define CLP_QUERY_PCI_FN 0x0003
11#define CLP_QUERY_PCI_FNGRP 0x0004
12#define CLP_SET_PCI_FN 0x0005
13
14/* PCI function handle list entry */
15struct clp_fh_list_entry {
16 u16 device_id;
17 u16 vendor_id;
18 u32 config_state : 1;
19 u32 : 31;
20 u32 fid; /* PCI function id */
21 u32 fh; /* PCI function handle */
22} __packed;
23
24#define CLP_RC_SETPCIFN_FH 0x0101 /* Invalid PCI fn handle */
25#define CLP_RC_SETPCIFN_FHOP 0x0102 /* Fn handle not valid for op */
26#define CLP_RC_SETPCIFN_DMAAS 0x0103 /* Invalid DMA addr space */
27#define CLP_RC_SETPCIFN_RES 0x0104 /* Insufficient resources */
28#define CLP_RC_SETPCIFN_ALRDY 0x0105 /* Fn already in requested state */
29#define CLP_RC_SETPCIFN_ERR 0x0106 /* Fn in permanent error state */
30#define CLP_RC_SETPCIFN_RECPND 0x0107 /* Error recovery pending */
31#define CLP_RC_SETPCIFN_BUSY 0x0108 /* Fn busy */
32#define CLP_RC_LISTPCI_BADRT 0x010a /* Resume token not recognized */
33#define CLP_RC_QUERYPCIFG_PFGID 0x010b /* Unrecognized PFGID */
34
35/* request or response block header length */
36#define LIST_PCI_HDR_LEN 32
37
38/* Number of function handles fitting in response block */
39#define CLP_FH_LIST_NR_ENTRIES \
40 ((CLP_BLK_SIZE - 2 * LIST_PCI_HDR_LEN) \
41 / sizeof(struct clp_fh_list_entry))
42
43#define CLP_SET_ENABLE_PCI_FN 0 /* Yes, 0 enables it */
44#define CLP_SET_DISABLE_PCI_FN 1 /* Yes, 1 disables it */
45
46#define CLP_UTIL_STR_LEN 64
47
48/* List PCI functions request */
49struct clp_req_list_pci {
50 struct clp_req_hdr hdr;
51 u32 fmt : 4; /* cmd request block format */
52 u32 : 28;
53 u64 reserved1;
54 u64 resume_token;
55 u64 reserved2;
56} __packed;
57
58/* List PCI functions response */
59struct clp_rsp_list_pci {
60 struct clp_rsp_hdr hdr;
61 u32 fmt : 4; /* cmd request block format */
62 u32 : 28;
63 u64 reserved1;
64 u64 resume_token;
65 u32 reserved2;
66 u16 max_fn;
67 u8 reserved3;
68 u8 entry_size;
69 struct clp_fh_list_entry fh_list[CLP_FH_LIST_NR_ENTRIES];
70} __packed;
71
72/* Query PCI function request */
73struct clp_req_query_pci {
74 struct clp_req_hdr hdr;
75 u32 fmt : 4; /* cmd request block format */
76 u32 : 28;
77 u64 reserved1;
78 u32 fh; /* function handle */
79 u32 reserved2;
80 u64 reserved3;
81} __packed;
82
83/* Query PCI function response */
84struct clp_rsp_query_pci {
85 struct clp_rsp_hdr hdr;
86 u32 fmt : 4; /* cmd request block format */
87 u32 : 28;
88 u64 reserved1;
89 u16 vfn; /* virtual fn number */
90 u16 : 7;
91 u16 util_str_avail : 1; /* utility string available? */
92 u16 pfgid : 8; /* pci function group id */
93 u32 fid; /* pci function id */
94 u8 bar_size[PCI_BAR_COUNT];
95 u16 pchid;
96 u32 bar[PCI_BAR_COUNT];
97 u64 reserved2;
98 u64 sdma; /* start dma as */
99 u64 edma; /* end dma as */
100 u64 reserved3[6];
101 u8 util_str[CLP_UTIL_STR_LEN]; /* utility string */
102} __packed;
103
104/* Query PCI function group request */
105struct clp_req_query_pci_grp {
106 struct clp_req_hdr hdr;
107 u32 fmt : 4; /* cmd request block format */
108 u32 : 28;
109 u64 reserved1;
110 u32 : 24;
111 u32 pfgid : 8; /* function group id */
112 u32 reserved2;
113 u64 reserved3;
114} __packed;
115
116/* Query PCI function group response */
117struct clp_rsp_query_pci_grp {
118 struct clp_rsp_hdr hdr;
119 u32 fmt : 4; /* cmd request block format */
120 u32 : 28;
121 u64 reserved1;
122 u16 : 4;
123 u16 noi : 12; /* number of interrupts */
124 u8 version;
125 u8 : 6;
126 u8 frame : 1;
127 u8 refresh : 1; /* TLB refresh mode */
128 u16 reserved2;
129 u16 mui;
130 u64 reserved3;
131 u64 dasm; /* dma address space mask */
132 u64 msia; /* MSI address */
133 u64 reserved4;
134 u64 reserved5;
135} __packed;
136
137/* Set PCI function request */
138struct clp_req_set_pci {
139 struct clp_req_hdr hdr;
140 u32 fmt : 4; /* cmd request block format */
141 u32 : 28;
142 u64 reserved1;
143 u32 fh; /* function handle */
144 u16 reserved2;
145 u8 oc; /* operation controls */
146 u8 ndas; /* number of dma spaces */
147 u64 reserved3;
148} __packed;
149
150/* Set PCI function response */
151struct clp_rsp_set_pci {
152 struct clp_rsp_hdr hdr;
153 u32 fmt : 4; /* cmd request block format */
154 u32 : 28;
155 u64 reserved1;
156 u32 fh; /* function handle */
157 u32 reserved3;
158 u64 reserved4;
159} __packed;
160
161/* Combined request/response block structures used by clp insn */
162struct clp_req_rsp_list_pci {
163 struct clp_req_list_pci request;
164 struct clp_rsp_list_pci response;
165} __packed;
166
167struct clp_req_rsp_set_pci {
168 struct clp_req_set_pci request;
169 struct clp_rsp_set_pci response;
170} __packed;
171
172struct clp_req_rsp_query_pci {
173 struct clp_req_query_pci request;
174 struct clp_rsp_query_pci response;
175} __packed;
176
177struct clp_req_rsp_query_pci_grp {
178 struct clp_req_query_pci_grp request;
179 struct clp_rsp_query_pci_grp response;
180} __packed;
181
182#endif
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
new file mode 100644
index 000000000000..30b4c179c38c
--- /dev/null
+++ b/arch/s390/include/asm/pci_dma.h
@@ -0,0 +1,196 @@
1#ifndef _ASM_S390_PCI_DMA_H
2#define _ASM_S390_PCI_DMA_H
3
4/* I/O Translation Anchor (IOTA) */
5enum zpci_ioat_dtype {
6 ZPCI_IOTA_STO = 0,
7 ZPCI_IOTA_RTTO = 1,
8 ZPCI_IOTA_RSTO = 2,
9 ZPCI_IOTA_RFTO = 3,
10 ZPCI_IOTA_PFAA = 4,
11 ZPCI_IOTA_IOPFAA = 5,
12 ZPCI_IOTA_IOPTO = 7
13};
14
15#define ZPCI_IOTA_IOT_ENABLED 0x800UL
16#define ZPCI_IOTA_DT_ST (ZPCI_IOTA_STO << 2)
17#define ZPCI_IOTA_DT_RT (ZPCI_IOTA_RTTO << 2)
18#define ZPCI_IOTA_DT_RS (ZPCI_IOTA_RSTO << 2)
19#define ZPCI_IOTA_DT_RF (ZPCI_IOTA_RFTO << 2)
20#define ZPCI_IOTA_DT_PF (ZPCI_IOTA_PFAA << 2)
21#define ZPCI_IOTA_FS_4K 0
22#define ZPCI_IOTA_FS_1M 1
23#define ZPCI_IOTA_FS_2G 2
24#define ZPCI_KEY (PAGE_DEFAULT_KEY << 5)
25
26#define ZPCI_IOTA_STO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_ST)
27#define ZPCI_IOTA_RTTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RT)
28#define ZPCI_IOTA_RSTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RS)
29#define ZPCI_IOTA_RFTO_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_RF)
30#define ZPCI_IOTA_RFAA_FLAG (ZPCI_IOTA_IOT_ENABLED | ZPCI_KEY | ZPCI_IOTA_DT_PF | ZPCI_IOTA_FS_2G)
31
32/* I/O Region and segment tables */
33#define ZPCI_INDEX_MASK 0x7ffUL
34
35#define ZPCI_TABLE_TYPE_MASK 0xc
36#define ZPCI_TABLE_TYPE_RFX 0xc
37#define ZPCI_TABLE_TYPE_RSX 0x8
38#define ZPCI_TABLE_TYPE_RTX 0x4
39#define ZPCI_TABLE_TYPE_SX 0x0
40
41#define ZPCI_TABLE_LEN_RFX 0x3
42#define ZPCI_TABLE_LEN_RSX 0x3
43#define ZPCI_TABLE_LEN_RTX 0x3
44
45#define ZPCI_TABLE_OFFSET_MASK 0xc0
46#define ZPCI_TABLE_SIZE 0x4000
47#define ZPCI_TABLE_ALIGN ZPCI_TABLE_SIZE
48#define ZPCI_TABLE_ENTRY_SIZE (sizeof(unsigned long))
49#define ZPCI_TABLE_ENTRIES (ZPCI_TABLE_SIZE / ZPCI_TABLE_ENTRY_SIZE)
50
51#define ZPCI_TABLE_BITS 11
52#define ZPCI_PT_BITS 8
53#define ZPCI_ST_SHIFT (ZPCI_PT_BITS + PAGE_SHIFT)
54#define ZPCI_RT_SHIFT (ZPCI_ST_SHIFT + ZPCI_TABLE_BITS)
55
56#define ZPCI_RTE_FLAG_MASK 0x3fffUL
57#define ZPCI_RTE_ADDR_MASK (~ZPCI_RTE_FLAG_MASK)
58#define ZPCI_STE_FLAG_MASK 0x7ffUL
59#define ZPCI_STE_ADDR_MASK (~ZPCI_STE_FLAG_MASK)
60
61/* I/O Page tables */
62#define ZPCI_PTE_VALID_MASK 0x400
63#define ZPCI_PTE_INVALID 0x400
64#define ZPCI_PTE_VALID 0x000
65#define ZPCI_PT_SIZE 0x800
66#define ZPCI_PT_ALIGN ZPCI_PT_SIZE
67#define ZPCI_PT_ENTRIES (ZPCI_PT_SIZE / ZPCI_TABLE_ENTRY_SIZE)
68#define ZPCI_PT_MASK (ZPCI_PT_ENTRIES - 1)
69
70#define ZPCI_PTE_FLAG_MASK 0xfffUL
71#define ZPCI_PTE_ADDR_MASK (~ZPCI_PTE_FLAG_MASK)
72
73/* Shared bits */
74#define ZPCI_TABLE_VALID 0x00
75#define ZPCI_TABLE_INVALID 0x20
76#define ZPCI_TABLE_PROTECTED 0x200
77#define ZPCI_TABLE_UNPROTECTED 0x000
78
79#define ZPCI_TABLE_VALID_MASK 0x20
80#define ZPCI_TABLE_PROT_MASK 0x200
81
82static inline unsigned int calc_rtx(dma_addr_t ptr)
83{
84 return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
85}
86
87static inline unsigned int calc_sx(dma_addr_t ptr)
88{
89 return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
90}
91
92static inline unsigned int calc_px(dma_addr_t ptr)
93{
94 return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
95}
96
97static inline void set_pt_pfaa(unsigned long *entry, void *pfaa)
98{
99 *entry &= ZPCI_PTE_FLAG_MASK;
100 *entry |= ((unsigned long) pfaa & ZPCI_PTE_ADDR_MASK);
101}
102
103static inline void set_rt_sto(unsigned long *entry, void *sto)
104{
105 *entry &= ZPCI_RTE_FLAG_MASK;
106 *entry |= ((unsigned long) sto & ZPCI_RTE_ADDR_MASK);
107 *entry |= ZPCI_TABLE_TYPE_RTX;
108}
109
110static inline void set_st_pto(unsigned long *entry, void *pto)
111{
112 *entry &= ZPCI_STE_FLAG_MASK;
113 *entry |= ((unsigned long) pto & ZPCI_STE_ADDR_MASK);
114 *entry |= ZPCI_TABLE_TYPE_SX;
115}
116
117static inline void validate_rt_entry(unsigned long *entry)
118{
119 *entry &= ~ZPCI_TABLE_VALID_MASK;
120 *entry &= ~ZPCI_TABLE_OFFSET_MASK;
121 *entry |= ZPCI_TABLE_VALID;
122 *entry |= ZPCI_TABLE_LEN_RTX;
123}
124
125static inline void validate_st_entry(unsigned long *entry)
126{
127 *entry &= ~ZPCI_TABLE_VALID_MASK;
128 *entry |= ZPCI_TABLE_VALID;
129}
130
131static inline void invalidate_table_entry(unsigned long *entry)
132{
133 *entry &= ~ZPCI_TABLE_VALID_MASK;
134 *entry |= ZPCI_TABLE_INVALID;
135}
136
137static inline void invalidate_pt_entry(unsigned long *entry)
138{
139 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
140 *entry &= ~ZPCI_PTE_VALID_MASK;
141 *entry |= ZPCI_PTE_INVALID;
142}
143
144static inline void validate_pt_entry(unsigned long *entry)
145{
146 WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
147 *entry &= ~ZPCI_PTE_VALID_MASK;
148 *entry |= ZPCI_PTE_VALID;
149}
150
151static inline void entry_set_protected(unsigned long *entry)
152{
153 *entry &= ~ZPCI_TABLE_PROT_MASK;
154 *entry |= ZPCI_TABLE_PROTECTED;
155}
156
157static inline void entry_clr_protected(unsigned long *entry)
158{
159 *entry &= ~ZPCI_TABLE_PROT_MASK;
160 *entry |= ZPCI_TABLE_UNPROTECTED;
161}
162
163static inline int reg_entry_isvalid(unsigned long entry)
164{
165 return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
166}
167
168static inline int pt_entry_isvalid(unsigned long entry)
169{
170 return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
171}
172
173static inline int entry_isprotected(unsigned long entry)
174{
175 return (entry & ZPCI_TABLE_PROT_MASK) == ZPCI_TABLE_PROTECTED;
176}
177
178static inline unsigned long *get_rt_sto(unsigned long entry)
179{
180 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
181 ? (unsigned long *) (entry & ZPCI_RTE_ADDR_MASK)
182 : NULL;
183}
184
185static inline unsigned long *get_st_pto(unsigned long entry)
186{
187 return ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
188 ? (unsigned long *) (entry & ZPCI_STE_ADDR_MASK)
189 : NULL;
190}
191
192/* Prototypes */
193int zpci_dma_init_device(struct zpci_dev *);
194void zpci_dma_exit_device(struct zpci_dev *);
195
196#endif
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
new file mode 100644
index 000000000000..1486a98d5dad
--- /dev/null
+++ b/arch/s390/include/asm/pci_insn.h
@@ -0,0 +1,280 @@
1#ifndef _ASM_S390_PCI_INSN_H
2#define _ASM_S390_PCI_INSN_H
3
4#include <linux/delay.h>
5
6#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
7
8/* Load/Store status codes */
9#define ZPCI_PCI_ST_FUNC_NOT_ENABLED 4
10#define ZPCI_PCI_ST_FUNC_IN_ERR 8
11#define ZPCI_PCI_ST_BLOCKED 12
12#define ZPCI_PCI_ST_INSUF_RES 16
13#define ZPCI_PCI_ST_INVAL_AS 20
14#define ZPCI_PCI_ST_FUNC_ALREADY_ENABLED 24
15#define ZPCI_PCI_ST_DMA_AS_NOT_ENABLED 28
16#define ZPCI_PCI_ST_2ND_OP_IN_INV_AS 36
17#define ZPCI_PCI_ST_FUNC_NOT_AVAIL 40
18#define ZPCI_PCI_ST_ALREADY_IN_RQ_STATE 44
19
20/* Load/Store return codes */
21#define ZPCI_PCI_LS_OK 0
22#define ZPCI_PCI_LS_ERR 1
23#define ZPCI_PCI_LS_BUSY 2
24#define ZPCI_PCI_LS_INVAL_HANDLE 3
25
26/* Load/Store address space identifiers */
27#define ZPCI_PCIAS_MEMIO_0 0
28#define ZPCI_PCIAS_MEMIO_1 1
29#define ZPCI_PCIAS_MEMIO_2 2
30#define ZPCI_PCIAS_MEMIO_3 3
31#define ZPCI_PCIAS_MEMIO_4 4
32#define ZPCI_PCIAS_MEMIO_5 5
33#define ZPCI_PCIAS_CFGSPC 15
34
35/* Modify PCI Function Controls */
36#define ZPCI_MOD_FC_REG_INT 2
37#define ZPCI_MOD_FC_DEREG_INT 3
38#define ZPCI_MOD_FC_REG_IOAT 4
39#define ZPCI_MOD_FC_DEREG_IOAT 5
40#define ZPCI_MOD_FC_REREG_IOAT 6
41#define ZPCI_MOD_FC_RESET_ERROR 7
42#define ZPCI_MOD_FC_RESET_BLOCK 9
43#define ZPCI_MOD_FC_SET_MEASURE 10
44
45/* FIB function controls */
46#define ZPCI_FIB_FC_ENABLED 0x80
47#define ZPCI_FIB_FC_ERROR 0x40
48#define ZPCI_FIB_FC_LS_BLOCKED 0x20
49#define ZPCI_FIB_FC_DMAAS_REG 0x10
50
51/* FIB function controls */
52#define ZPCI_FIB_FC_ENABLED 0x80
53#define ZPCI_FIB_FC_ERROR 0x40
54#define ZPCI_FIB_FC_LS_BLOCKED 0x20
55#define ZPCI_FIB_FC_DMAAS_REG 0x10
56
57/* Function Information Block */
58struct zpci_fib {
59 u32 fmt : 8; /* format */
60 u32 : 24;
61 u32 reserved1;
62 u8 fc; /* function controls */
63 u8 reserved2;
64 u16 reserved3;
65 u32 reserved4;
66 u64 pba; /* PCI base address */
67 u64 pal; /* PCI address limit */
68 u64 iota; /* I/O Translation Anchor */
69 u32 : 1;
70 u32 isc : 3; /* Interrupt subclass */
71 u32 noi : 12; /* Number of interrupts */
72 u32 : 2;
73 u32 aibvo : 6; /* Adapter interrupt bit vector offset */
74 u32 sum : 1; /* Adapter int summary bit enabled */
75 u32 : 1;
76 u32 aisbo : 6; /* Adapter int summary bit offset */
77 u32 reserved5;
78 u64 aibv; /* Adapter int bit vector address */
79 u64 aisb; /* Adapter int summary bit address */
80 u64 fmb_addr; /* Function measurement block address and key */
81 u64 reserved6;
82 u64 reserved7;
83} __packed;
84
85/* Modify PCI Function Controls */
86static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
87{
88 u8 cc;
89
90 asm volatile (
91 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
92 " ipm %[cc]\n"
93 " srl %[cc],28\n"
94 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
95 : : "cc");
96 *status = req >> 24 & 0xff;
97 return cc;
98}
99
100static inline int mpcifc_instr(u64 req, struct zpci_fib *fib)
101{
102 u8 cc, status;
103
104 do {
105 cc = __mpcifc(req, fib, &status);
106 if (cc == 2)
107 msleep(ZPCI_INSN_BUSY_DELAY);
108 } while (cc == 2);
109
110 if (cc)
111 printk_once(KERN_ERR "%s: error cc: %d status: %d\n",
112 __func__, cc, status);
113 return (cc) ? -EIO : 0;
114}
115
116/* Refresh PCI Translations */
117static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
118{
119 register u64 __addr asm("2") = addr;
120 register u64 __range asm("3") = range;
121 u8 cc;
122
123 asm volatile (
124 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
125 " ipm %[cc]\n"
126 " srl %[cc],28\n"
127 : [cc] "=d" (cc), [fn] "+d" (fn)
128 : [addr] "d" (__addr), "d" (__range)
129 : "cc");
130 *status = fn >> 24 & 0xff;
131 return cc;
132}
133
134static inline int rpcit_instr(u64 fn, u64 addr, u64 range)
135{
136 u8 cc, status;
137
138 do {
139 cc = __rpcit(fn, addr, range, &status);
140 if (cc == 2)
141 udelay(ZPCI_INSN_BUSY_DELAY);
142 } while (cc == 2);
143
144 if (cc)
145 printk_once(KERN_ERR "%s: error cc: %d status: %d dma_addr: %Lx size: %Lx\n",
146 __func__, cc, status, addr, range);
147 return (cc) ? -EIO : 0;
148}
149
150/* Store PCI function controls */
151static inline u8 __stpcifc(u32 handle, u8 space, struct zpci_fib *fib, u8 *status)
152{
153 u64 fn = (u64) handle << 32 | space << 16;
154 u8 cc;
155
156 asm volatile (
157 " .insn rxy,0xe300000000d4,%[fn],%[fib]\n"
158 " ipm %[cc]\n"
159 " srl %[cc],28\n"
160 : [cc] "=d" (cc), [fn] "+d" (fn), [fib] "=m" (*fib)
161 : : "cc");
162 *status = fn >> 24 & 0xff;
163 return cc;
164}
165
166/* Set Interruption Controls */
167static inline void sic_instr(u16 ctl, char *unused, u8 isc)
168{
169 asm volatile (
170 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
171 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
172}
173
174/* PCI Load */
175static inline u8 __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
176{
177 register u64 __req asm("2") = req;
178 register u64 __offset asm("3") = offset;
179 u64 __data;
180 u8 cc;
181
182 asm volatile (
183 " .insn rre,0xb9d20000,%[data],%[req]\n"
184 " ipm %[cc]\n"
185 " srl %[cc],28\n"
186 : [cc] "=d" (cc), [data] "=d" (__data), [req] "+d" (__req)
187 : "d" (__offset)
188 : "cc");
189 *status = __req >> 24 & 0xff;
190 *data = __data;
191 return cc;
192}
193
194static inline int pcilg_instr(u64 *data, u64 req, u64 offset)
195{
196 u8 cc, status;
197
198 do {
199 cc = __pcilg(data, req, offset, &status);
200 if (cc == 2)
201 udelay(ZPCI_INSN_BUSY_DELAY);
202 } while (cc == 2);
203
204 if (cc) {
205 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
206 __func__, cc, status, req, offset);
207 /* TODO: on IO errors set data to 0xff...
208 * here or in users of pcilg (le conversion)?
209 */
210 }
211 return (cc) ? -EIO : 0;
212}
213
214/* PCI Store */
215static inline u8 __pcistg(u64 data, u64 req, u64 offset, u8 *status)
216{
217 register u64 __req asm("2") = req;
218 register u64 __offset asm("3") = offset;
219 u8 cc;
220
221 asm volatile (
222 " .insn rre,0xb9d00000,%[data],%[req]\n"
223 " ipm %[cc]\n"
224 " srl %[cc],28\n"
225 : [cc] "=d" (cc), [req] "+d" (__req)
226 : "d" (__offset), [data] "d" (data)
227 : "cc");
228 *status = __req >> 24 & 0xff;
229 return cc;
230}
231
232static inline int pcistg_instr(u64 data, u64 req, u64 offset)
233{
234 u8 cc, status;
235
236 do {
237 cc = __pcistg(data, req, offset, &status);
238 if (cc == 2)
239 udelay(ZPCI_INSN_BUSY_DELAY);
240 } while (cc == 2);
241
242 if (cc)
243 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
244 __func__, cc, status, req, offset);
245 return (cc) ? -EIO : 0;
246}
247
248/* PCI Store Block */
249static inline u8 __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
250{
251 u8 cc;
252
253 asm volatile (
254 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
255 " ipm %[cc]\n"
256 " srl %[cc],28\n"
257 : [cc] "=d" (cc), [req] "+d" (req)
258 : [offset] "d" (offset), [data] "Q" (*data)
259 : "cc");
260 *status = req >> 24 & 0xff;
261 return cc;
262}
263
264static inline int pcistb_instr(const u64 *data, u64 req, u64 offset)
265{
266 u8 cc, status;
267
268 do {
269 cc = __pcistb(data, req, offset, &status);
270 if (cc == 2)
271 udelay(ZPCI_INSN_BUSY_DELAY);
272 } while (cc == 2);
273
274 if (cc)
275 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n",
276 __func__, cc, status, req, offset);
277 return (cc) ? -EIO : 0;
278}
279
280#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
new file mode 100644
index 000000000000..5fd81f31d6c7
--- /dev/null
+++ b/arch/s390/include/asm/pci_io.h
@@ -0,0 +1,194 @@
1#ifndef _ASM_S390_PCI_IO_H
2#define _ASM_S390_PCI_IO_H
3
4#ifdef CONFIG_PCI
5
6#include <linux/kernel.h>
7#include <linux/slab.h>
8#include <asm/pci_insn.h>
9
10/* I/O Map */
11#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
12#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
13#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
14#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
15
16struct zpci_iomap_entry {
17 u32 fh;
18 u8 bar;
19};
20
21extern struct zpci_iomap_entry *zpci_iomap_start;
22
23#define ZPCI_IDX(addr) \
24 (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
25#define ZPCI_OFFSET(addr) \
26 ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
27
28#define ZPCI_CREATE_REQ(handle, space, len) \
29 ((u64) handle << 32 | space << 16 | len)
30
31#define zpci_read(LENGTH, RETTYPE) \
32static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
33{ \
34 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
35 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
36 u64 data; \
37 int rc; \
38 \
39 rc = pcilg_instr(&data, req, ZPCI_OFFSET(addr)); \
40 if (rc) \
41 data = -1ULL; \
42 return (RETTYPE) data; \
43}
44
45#define zpci_write(LENGTH, VALTYPE) \
46static inline void zpci_write_##VALTYPE(VALTYPE val, \
47 const volatile void __iomem *addr) \
48{ \
49 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
51 u64 data = (VALTYPE) val; \
52 \
53 pcistg_instr(data, req, ZPCI_OFFSET(addr)); \
54}
55
56zpci_read(8, u64)
57zpci_read(4, u32)
58zpci_read(2, u16)
59zpci_read(1, u8)
60zpci_write(8, u64)
61zpci_write(4, u32)
62zpci_write(2, u16)
63zpci_write(1, u8)
64
65static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
66{
67 u64 val;
68
69 switch (len) {
70 case 1:
71 val = (u64) *((u8 *) data);
72 break;
73 case 2:
74 val = (u64) *((u16 *) data);
75 break;
76 case 4:
77 val = (u64) *((u32 *) data);
78 break;
79 case 8:
80 val = (u64) *((u64 *) data);
81 break;
82 default:
83 val = 0; /* let FW report error */
84 break;
85 }
86 return pcistg_instr(val, req, offset);
87}
88
89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
90{
91 u64 data;
92 u8 cc;
93
94 cc = pcilg_instr(&data, req, offset);
95 switch (len) {
96 case 1:
97 *((u8 *) dst) = (u8) data;
98 break;
99 case 2:
100 *((u16 *) dst) = (u16) data;
101 break;
102 case 4:
103 *((u32 *) dst) = (u32) data;
104 break;
105 case 8:
106 *((u64 *) dst) = (u64) data;
107 break;
108 }
109 return cc;
110}
111
112static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
113{
114 return pcistb_instr(data, req, offset);
115}
116
117static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
118{
119 int count = len > max ? max : len, size = 1;
120
121 while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
122 dst = dst >> 1;
123 src = src >> 1;
124 size = size << 1;
125 }
126 return size;
127}
128
129static inline int zpci_memcpy_fromio(void *dst,
130 const volatile void __iomem *src,
131 unsigned long n)
132{
133 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
134 u64 req, offset = ZPCI_OFFSET(src);
135 int size, rc = 0;
136
137 while (n > 0) {
138 size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8);
139 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
140 rc = zpci_read_single(req, dst, offset, size);
141 if (rc)
142 break;
143 offset += size;
144 dst += size;
145 n -= size;
146 }
147 return rc;
148}
149
150static inline int zpci_memcpy_toio(volatile void __iomem *dst,
151 const void *src, unsigned long n)
152{
153 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
154 u64 req, offset = ZPCI_OFFSET(dst);
155 int size, rc = 0;
156
157 if (!src)
158 return -EINVAL;
159
160 while (n > 0) {
161 size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128);
162 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
163
164 if (size > 8) /* main path */
165 rc = zpci_write_block(req, src, offset);
166 else
167 rc = zpci_write_single(req, src, offset, size);
168 if (rc)
169 break;
170 offset += size;
171 src += size;
172 n -= size;
173 }
174 return rc;
175}
176
177static inline int zpci_memset_io(volatile void __iomem *dst,
178 unsigned char val, size_t count)
179{
180 u8 *src = kmalloc(count, GFP_KERNEL);
181 int rc;
182
183 if (src == NULL)
184 return -ENOMEM;
185 memset(src, val, count);
186
187 rc = zpci_memcpy_toio(dst, src, count);
188 kfree(src);
189 return rc;
190}
191
192#endif /* CONFIG_PCI */
193
194#endif /* _ASM_S390_PCI_IO_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c814e6f5b57d..c928dc1938f2 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -35,7 +35,6 @@
35extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); 35extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
36extern void paging_init(void); 36extern void paging_init(void);
37extern void vmem_map_init(void); 37extern void vmem_map_init(void);
38extern void fault_init(void);
39 38
40/* 39/*
41 * The S390 doesn't have any external MMU info: the kernel page 40 * The S390 doesn't have any external MMU info: the kernel page
@@ -336,6 +335,8 @@ extern unsigned long MODULES_END;
336#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 335#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
337#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 336#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
338 337
338#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
339
339/* Bits in the segment table entry */ 340/* Bits in the segment table entry */
340#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 341#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
341#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 342#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
@@ -435,6 +436,7 @@ static inline int pgd_bad(pgd_t pgd) { return 0; }
435 436
436static inline int pud_present(pud_t pud) { return 1; } 437static inline int pud_present(pud_t pud) { return 1; }
437static inline int pud_none(pud_t pud) { return 0; } 438static inline int pud_none(pud_t pud) { return 0; }
439static inline int pud_large(pud_t pud) { return 0; }
438static inline int pud_bad(pud_t pud) { return 0; } 440static inline int pud_bad(pud_t pud) { return 0; }
439 441
440#else /* CONFIG_64BIT */ 442#else /* CONFIG_64BIT */
@@ -480,6 +482,13 @@ static inline int pud_none(pud_t pud)
480 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 482 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
481} 483}
482 484
485static inline int pud_large(pud_t pud)
486{
487 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
488 return 0;
489 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
490}
491
483static inline int pud_bad(pud_t pud) 492static inline int pud_bad(pud_t pud)
484{ 493{
485 /* 494 /*
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index e62a555557ee..833788693f09 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -55,5 +55,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info);
55void sclp_get_ipl_info(struct sclp_ipl_info *info); 55void sclp_get_ipl_info(struct sclp_ipl_info *info);
56bool sclp_has_linemode(void); 56bool sclp_has_linemode(void);
57bool sclp_has_vt220(void); 57bool sclp_has_vt220(void);
58int sclp_pci_configure(u32 fid);
59int sclp_pci_deconfigure(u32 fid);
58 60
59#endif /* _ASM_S390_SCLP_H */ 61#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 9935cbd6a46f..05425b18c0aa 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -8,32 +8,34 @@ struct cpu;
8 8
9#ifdef CONFIG_SCHED_BOOK 9#ifdef CONFIG_SCHED_BOOK
10 10
11extern unsigned char cpu_socket_id[NR_CPUS]; 11struct cpu_topology_s390 {
12#define topology_physical_package_id(cpu) (cpu_socket_id[cpu]) 12 unsigned short core_id;
13 unsigned short socket_id;
14 unsigned short book_id;
15 cpumask_t core_mask;
16 cpumask_t book_mask;
17};
18
19extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
20
21#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
22#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
23#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
24#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
25#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask)
13 26
14extern unsigned char cpu_core_id[NR_CPUS]; 27#define mc_capable() 1
15extern cpumask_t cpu_core_map[NR_CPUS];
16 28
17static inline const struct cpumask *cpu_coregroup_mask(int cpu) 29static inline const struct cpumask *cpu_coregroup_mask(int cpu)
18{ 30{
19 return &cpu_core_map[cpu]; 31 return &cpu_topology[cpu].core_mask;
20} 32}
21 33
22#define topology_core_id(cpu) (cpu_core_id[cpu])
23#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
24#define mc_capable() (1)
25
26extern unsigned char cpu_book_id[NR_CPUS];
27extern cpumask_t cpu_book_map[NR_CPUS];
28
29static inline const struct cpumask *cpu_book_mask(int cpu) 34static inline const struct cpumask *cpu_book_mask(int cpu)
30{ 35{
31 return &cpu_book_map[cpu]; 36 return &cpu_topology[cpu].book_mask;
32} 37}
33 38
34#define topology_book_id(cpu) (cpu_book_id[cpu])
35#define topology_book_cpumask(cpu) (&cpu_book_map[cpu])
36
37int topology_cpu_init(struct cpu *); 39int topology_cpu_init(struct cpu *);
38int topology_set_cpu_management(int fc); 40int topology_set_cpu_management(int fc);
39void topology_schedule_update(void); 41void topology_schedule_update(void);
diff --git a/arch/s390/include/asm/vga.h b/arch/s390/include/asm/vga.h
new file mode 100644
index 000000000000..d375526c261f
--- /dev/null
+++ b/arch/s390/include/asm/vga.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_S390_VGA_H
2#define _ASM_S390_VGA_H
3
4/* Avoid compile errors due to missing asm/vga.h */
5
6#endif /* _ASM_S390_VGA_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4da52fe31743..2ac311ef5c9b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -23,7 +23,7 @@ CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ 23obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ 24 processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ 25 debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o 26 sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
27 27
28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 28obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 29obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f00286bd2ef9..a7f9abd98cf2 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -83,22 +83,29 @@ enum {
83 U4_12, /* 4 bit unsigned value starting at 12 */ 83 U4_12, /* 4 bit unsigned value starting at 12 */
84 U4_16, /* 4 bit unsigned value starting at 16 */ 84 U4_16, /* 4 bit unsigned value starting at 16 */
85 U4_20, /* 4 bit unsigned value starting at 20 */ 85 U4_20, /* 4 bit unsigned value starting at 20 */
86 U4_24, /* 4 bit unsigned value starting at 24 */
87 U4_28, /* 4 bit unsigned value starting at 28 */
86 U4_32, /* 4 bit unsigned value starting at 32 */ 88 U4_32, /* 4 bit unsigned value starting at 32 */
89 U4_36, /* 4 bit unsigned value starting at 36 */
87 U8_8, /* 8 bit unsigned value starting at 8 */ 90 U8_8, /* 8 bit unsigned value starting at 8 */
88 U8_16, /* 8 bit unsigned value starting at 16 */ 91 U8_16, /* 8 bit unsigned value starting at 16 */
89 U8_24, /* 8 bit unsigned value starting at 24 */ 92 U8_24, /* 8 bit unsigned value starting at 24 */
90 U8_32, /* 8 bit unsigned value starting at 32 */ 93 U8_32, /* 8 bit unsigned value starting at 32 */
91 I8_8, /* 8 bit signed value starting at 8 */ 94 I8_8, /* 8 bit signed value starting at 8 */
92 I8_32, /* 8 bit signed value starting at 32 */ 95 I8_32, /* 8 bit signed value starting at 32 */
96 J12_12, /* PC relative offset at 12 */
93 I16_16, /* 16 bit signed value starting at 16 */ 97 I16_16, /* 16 bit signed value starting at 16 */
94 I16_32, /* 32 bit signed value starting at 16 */ 98 I16_32, /* 32 bit signed value starting at 16 */
95 U16_16, /* 16 bit unsigned value starting at 16 */ 99 U16_16, /* 16 bit unsigned value starting at 16 */
96 U16_32, /* 32 bit unsigned value starting at 16 */ 100 U16_32, /* 32 bit unsigned value starting at 16 */
97 J16_16, /* PC relative jump offset at 16 */ 101 J16_16, /* PC relative jump offset at 16 */
102 J16_32, /* PC relative offset at 16 */
103 I24_24, /* 24 bit signed value starting at 24 */
98 J32_16, /* PC relative long offset at 16 */ 104 J32_16, /* PC relative long offset at 16 */
99 I32_16, /* 32 bit signed value starting at 16 */ 105 I32_16, /* 32 bit signed value starting at 16 */
100 U32_16, /* 32 bit unsigned value starting at 16 */ 106 U32_16, /* 32 bit unsigned value starting at 16 */
101 M_16, /* 4 bit optional mask starting at 16 */ 107 M_16, /* 4 bit optional mask starting at 16 */
108 M_20, /* 4 bit optional mask starting at 20 */
102 RO_28, /* optional GPR starting at position 28 */ 109 RO_28, /* optional GPR starting at position 28 */
103}; 110};
104 111
@@ -109,6 +116,8 @@ enum {
109enum { 116enum {
110 INSTR_INVALID, 117 INSTR_INVALID,
111 INSTR_E, 118 INSTR_E,
119 INSTR_IE_UU,
120 INSTR_MII_UPI,
112 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU, 121 INSTR_RIE_R0IU, INSTR_RIE_R0UU, INSTR_RIE_RRP, INSTR_RIE_RRPU,
113 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0, 122 INSTR_RIE_RRUUU, INSTR_RIE_RUPI, INSTR_RIE_RUPU, INSTR_RIE_RRI0,
114 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP, 123 INSTR_RIL_RI, INSTR_RIL_RP, INSTR_RIL_RU, INSTR_RIL_UP,
@@ -118,13 +127,15 @@ enum {
118 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF, 127 INSTR_RRE_FF, INSTR_RRE_FR, INSTR_RRE_R0, INSTR_RRE_RA, INSTR_RRE_RF,
119 INSTR_RRE_RR, INSTR_RRE_RR_OPT, 128 INSTR_RRE_RR, INSTR_RRE_RR_OPT,
120 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR, 129 INSTR_RRF_0UFF, INSTR_RRF_F0FF, INSTR_RRF_F0FF2, INSTR_RRF_F0FR,
121 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_M0RR, INSTR_RRF_R0RR, 130 INSTR_RRF_FFRU, INSTR_RRF_FUFF, INSTR_RRF_FUFF2, INSTR_RRF_M0RR,
122 INSTR_RRF_R0RR2, INSTR_RRF_RURR, INSTR_RRF_U0FF, INSTR_RRF_U0RF, 131 INSTR_RRF_R0RR, INSTR_RRF_R0RR2, INSTR_RRF_RMRR, INSTR_RRF_RURR,
123 INSTR_RRF_U0RR, INSTR_RRF_UUFF, INSTR_RRR_F0FF, INSTR_RRS_RRRDU, 132 INSTR_RRF_U0FF, INSTR_RRF_U0RF, INSTR_RRF_U0RR, INSTR_RRF_UUFF,
133 INSTR_RRF_UUFR, INSTR_RRF_UURF,
134 INSTR_RRR_F0FF, INSTR_RRS_RRRDU,
124 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR, 135 INSTR_RR_FF, INSTR_RR_R0, INSTR_RR_RR, INSTR_RR_U0, INSTR_RR_UR,
125 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD, 136 INSTR_RSE_CCRD, INSTR_RSE_RRRD, INSTR_RSE_RURD,
126 INSTR_RSI_RRP, 137 INSTR_RSI_RRP,
127 INSTR_RSL_R0RD, 138 INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
128 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD, 139 INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
129 INSTR_RSY_RDRM, 140 INSTR_RSY_RDRM,
130 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD, 141 INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
@@ -136,6 +147,7 @@ enum {
136 INSTR_SIL_RDI, INSTR_SIL_RDU, 147 INSTR_SIL_RDI, INSTR_SIL_RDU,
137 INSTR_SIY_IRD, INSTR_SIY_URD, 148 INSTR_SIY_IRD, INSTR_SIY_URD,
138 INSTR_SI_URD, 149 INSTR_SI_URD,
150 INSTR_SMI_U0RDP,
139 INSTR_SSE_RDRD, 151 INSTR_SSE_RDRD,
140 INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2, 152 INSTR_SSF_RRDRD, INSTR_SSF_RRDRD2,
141 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD, 153 INSTR_SS_L0RDRD, INSTR_SS_LIRDRD, INSTR_SS_LLRDRD, INSTR_SS_RRRDRD,
@@ -191,31 +203,42 @@ static const struct operand operands[] =
191 [U4_12] = { 4, 12, 0 }, 203 [U4_12] = { 4, 12, 0 },
192 [U4_16] = { 4, 16, 0 }, 204 [U4_16] = { 4, 16, 0 },
193 [U4_20] = { 4, 20, 0 }, 205 [U4_20] = { 4, 20, 0 },
206 [U4_24] = { 4, 24, 0 },
207 [U4_28] = { 4, 28, 0 },
194 [U4_32] = { 4, 32, 0 }, 208 [U4_32] = { 4, 32, 0 },
209 [U4_36] = { 4, 36, 0 },
195 [U8_8] = { 8, 8, 0 }, 210 [U8_8] = { 8, 8, 0 },
196 [U8_16] = { 8, 16, 0 }, 211 [U8_16] = { 8, 16, 0 },
197 [U8_24] = { 8, 24, 0 }, 212 [U8_24] = { 8, 24, 0 },
198 [U8_32] = { 8, 32, 0 }, 213 [U8_32] = { 8, 32, 0 },
214 [J12_12] = { 12, 12, OPERAND_PCREL },
199 [I16_16] = { 16, 16, OPERAND_SIGNED }, 215 [I16_16] = { 16, 16, OPERAND_SIGNED },
200 [U16_16] = { 16, 16, 0 }, 216 [U16_16] = { 16, 16, 0 },
201 [U16_32] = { 16, 32, 0 }, 217 [U16_32] = { 16, 32, 0 },
202 [J16_16] = { 16, 16, OPERAND_PCREL }, 218 [J16_16] = { 16, 16, OPERAND_PCREL },
219 [J16_32] = { 16, 32, OPERAND_PCREL },
203 [I16_32] = { 16, 32, OPERAND_SIGNED }, 220 [I16_32] = { 16, 32, OPERAND_SIGNED },
221 [I24_24] = { 24, 24, OPERAND_SIGNED },
204 [J32_16] = { 32, 16, OPERAND_PCREL }, 222 [J32_16] = { 32, 16, OPERAND_PCREL },
205 [I32_16] = { 32, 16, OPERAND_SIGNED }, 223 [I32_16] = { 32, 16, OPERAND_SIGNED },
206 [U32_16] = { 32, 16, 0 }, 224 [U32_16] = { 32, 16, 0 },
207 [M_16] = { 4, 16, 0 }, 225 [M_16] = { 4, 16, 0 },
226 [M_20] = { 4, 20, 0 },
208 [RO_28] = { 4, 28, OPERAND_GPR } 227 [RO_28] = { 4, 28, OPERAND_GPR }
209}; 228};
210 229
211static const unsigned char formats[][7] = { 230static const unsigned char formats[][7] = {
212 [INSTR_E] = { 0xff, 0,0,0,0,0,0 }, 231 [INSTR_E] = { 0xff, 0,0,0,0,0,0 },
232 [INSTR_IE_UU] = { 0xff, U4_24,U4_28,0,0,0,0 },
233 [INSTR_MII_UPI] = { 0xff, U4_8,J12_12,I24_24 },
234 [INSTR_RIE_R0IU] = { 0xff, R_8,I16_16,U4_32,0,0,0 },
213 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 }, 235 [INSTR_RIE_R0UU] = { 0xff, R_8,U16_16,U4_32,0,0,0 },
236 [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 },
214 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 }, 237 [INSTR_RIE_RRPU] = { 0xff, R_8,R_12,U4_32,J16_16,0,0 },
215 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, 238 [INSTR_RIE_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
216 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 }, 239 [INSTR_RIE_RRUUU] = { 0xff, R_8,R_12,U8_16,U8_24,U8_32,0 },
217 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 }, 240 [INSTR_RIE_RUPI] = { 0xff, R_8,I8_32,U4_12,J16_16,0,0 },
218 [INSTR_RIE_RRI0] = { 0xff, R_8,R_12,I16_16,0,0,0 }, 241 [INSTR_RIE_RUPU] = { 0xff, R_8,U8_32,U4_12,J16_16,0,0 },
219 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 }, 242 [INSTR_RIL_RI] = { 0x0f, R_8,I32_16,0,0,0,0 },
220 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 }, 243 [INSTR_RIL_RP] = { 0x0f, R_8,J32_16,0,0,0,0 },
221 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 }, 244 [INSTR_RIL_RU] = { 0x0f, R_8,U32_16,0,0,0,0 },
@@ -245,14 +268,18 @@ static const unsigned char formats[][7] = {
245 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 }, 268 [INSTR_RRF_F0FR] = { 0xff, F_24,F_16,R_28,0,0,0 },
246 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 }, 269 [INSTR_RRF_FFRU] = { 0xff, F_24,F_16,R_28,U4_20,0,0 },
247 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 }, 270 [INSTR_RRF_FUFF] = { 0xff, F_24,F_16,F_28,U4_20,0,0 },
271 [INSTR_RRF_FUFF2] = { 0xff, F_24,F_28,F_16,U4_20,0,0 },
248 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 }, 272 [INSTR_RRF_M0RR] = { 0xff, R_24,R_28,M_16,0,0,0 },
249 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 }, 273 [INSTR_RRF_R0RR] = { 0xff, R_24,R_16,R_28,0,0,0 },
250 [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 }, 274 [INSTR_RRF_R0RR2] = { 0xff, R_24,R_28,R_16,0,0,0 },
275 [INSTR_RRF_RMRR] = { 0xff, R_24,R_16,R_28,M_20,0,0 },
251 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 }, 276 [INSTR_RRF_RURR] = { 0xff, R_24,R_28,R_16,U4_20,0,0 },
252 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 }, 277 [INSTR_RRF_U0FF] = { 0xff, F_24,U4_16,F_28,0,0,0 },
253 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 }, 278 [INSTR_RRF_U0RF] = { 0xff, R_24,U4_16,F_28,0,0,0 },
254 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 }, 279 [INSTR_RRF_U0RR] = { 0xff, R_24,R_28,U4_16,0,0,0 },
255 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 }, 280 [INSTR_RRF_UUFF] = { 0xff, F_24,U4_16,F_28,U4_20,0,0 },
281 [INSTR_RRF_UUFR] = { 0xff, F_24,U4_16,R_28,U4_20,0,0 },
282 [INSTR_RRF_UURF] = { 0xff, R_24,U4_16,F_28,U4_20,0,0 },
256 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 }, 283 [INSTR_RRR_F0FF] = { 0xff, F_24,F_28,F_16,0,0,0 },
257 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 }, 284 [INSTR_RRS_RRRDU] = { 0xff, R_8,R_12,U4_32,D_20,B_16,0 },
258 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 }, 285 [INSTR_RR_FF] = { 0xff, F_8,F_12,0,0,0,0 },
@@ -264,12 +291,13 @@ static const unsigned char formats[][7] = {
264 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 }, 291 [INSTR_RSE_RRRD] = { 0xff, R_8,R_12,D_20,B_16,0,0 },
265 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 }, 292 [INSTR_RSE_RURD] = { 0xff, R_8,U4_12,D_20,B_16,0,0 },
266 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 }, 293 [INSTR_RSI_RRP] = { 0xff, R_8,R_12,J16_16,0,0,0 },
294 [INSTR_RSL_LRDFU] = { 0xff, F_32,D_20,L4_8,B_16,U4_36,0 },
267 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 }, 295 [INSTR_RSL_R0RD] = { 0xff, D_20,L4_8,B_16,0,0,0 },
268 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 }, 296 [INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
269 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 }, 297 [INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
298 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
270 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 }, 299 [INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
271 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 }, 300 [INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
272 [INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
273 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 }, 301 [INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
274 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 }, 302 [INSTR_RS_CCRD] = { 0xff, C_8,C_12,D_20,B_16,0,0 },
275 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 }, 303 [INSTR_RS_R0RD] = { 0xff, R_8,D_20,B_16,0,0,0 },
@@ -289,9 +317,10 @@ static const unsigned char formats[][7] = {
289 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 }, 317 [INSTR_SIY_IRD] = { 0xff, D20_20,B_16,I8_8,0,0,0 },
290 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 }, 318 [INSTR_SIY_URD] = { 0xff, D20_20,B_16,U8_8,0,0,0 },
291 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 }, 319 [INSTR_SI_URD] = { 0xff, D_20,B_16,U8_8,0,0,0 },
320 [INSTR_SMI_U0RDP] = { 0xff, U4_8,J16_32,D_20,B_16,0,0 },
292 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 }, 321 [INSTR_SSE_RDRD] = { 0xff, D_20,B_16,D_36,B_32,0,0 },
293 [INSTR_SSF_RRDRD] = { 0x00, D_20,B_16,D_36,B_32,R_8,0 }, 322 [INSTR_SSF_RRDRD] = { 0x0f, D_20,B_16,D_36,B_32,R_8,0 },
294 [INSTR_SSF_RRDRD2]= { 0x00, R_8,D_20,B_16,D_36,B_32,0 }, 323 [INSTR_SSF_RRDRD2]= { 0x0f, R_8,D_20,B_16,D_36,B_32,0 },
295 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 }, 324 [INSTR_SS_L0RDRD] = { 0xff, D_20,L8_8,B_16,D_36,B_32,0 },
296 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 }, 325 [INSTR_SS_LIRDRD] = { 0xff, D_20,L4_8,B_16,D_36,B_32,U4_12 },
297 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 }, 326 [INSTR_SS_LLRDRD] = { 0xff, D_20,L4_8,B_16,D_36,L4_12,B_32 },
@@ -304,46 +333,157 @@ static const unsigned char formats[][7] = {
304 333
305enum { 334enum {
306 LONG_INSN_ALGHSIK, 335 LONG_INSN_ALGHSIK,
336 LONG_INSN_ALHHHR,
337 LONG_INSN_ALHHLR,
307 LONG_INSN_ALHSIK, 338 LONG_INSN_ALHSIK,
339 LONG_INSN_ALSIHN,
340 LONG_INSN_CDFBRA,
341 LONG_INSN_CDGBRA,
342 LONG_INSN_CDGTRA,
343 LONG_INSN_CDLFBR,
344 LONG_INSN_CDLFTR,
345 LONG_INSN_CDLGBR,
346 LONG_INSN_CDLGTR,
347 LONG_INSN_CEFBRA,
348 LONG_INSN_CEGBRA,
349 LONG_INSN_CELFBR,
350 LONG_INSN_CELGBR,
351 LONG_INSN_CFDBRA,
352 LONG_INSN_CFEBRA,
353 LONG_INSN_CFXBRA,
354 LONG_INSN_CGDBRA,
355 LONG_INSN_CGDTRA,
356 LONG_INSN_CGEBRA,
357 LONG_INSN_CGXBRA,
358 LONG_INSN_CGXTRA,
359 LONG_INSN_CLFDBR,
360 LONG_INSN_CLFDTR,
361 LONG_INSN_CLFEBR,
308 LONG_INSN_CLFHSI, 362 LONG_INSN_CLFHSI,
363 LONG_INSN_CLFXBR,
364 LONG_INSN_CLFXTR,
365 LONG_INSN_CLGDBR,
366 LONG_INSN_CLGDTR,
367 LONG_INSN_CLGEBR,
309 LONG_INSN_CLGFRL, 368 LONG_INSN_CLGFRL,
310 LONG_INSN_CLGHRL, 369 LONG_INSN_CLGHRL,
311 LONG_INSN_CLGHSI, 370 LONG_INSN_CLGHSI,
371 LONG_INSN_CLGXBR,
372 LONG_INSN_CLGXTR,
312 LONG_INSN_CLHHSI, 373 LONG_INSN_CLHHSI,
374 LONG_INSN_CXFBRA,
375 LONG_INSN_CXGBRA,
376 LONG_INSN_CXGTRA,
377 LONG_INSN_CXLFBR,
378 LONG_INSN_CXLFTR,
379 LONG_INSN_CXLGBR,
380 LONG_INSN_CXLGTR,
381 LONG_INSN_FIDBRA,
382 LONG_INSN_FIEBRA,
383 LONG_INSN_FIXBRA,
384 LONG_INSN_LDXBRA,
385 LONG_INSN_LEDBRA,
386 LONG_INSN_LEXBRA,
387 LONG_INSN_LLGFAT,
313 LONG_INSN_LLGFRL, 388 LONG_INSN_LLGFRL,
314 LONG_INSN_LLGHRL, 389 LONG_INSN_LLGHRL,
390 LONG_INSN_LLGTAT,
315 LONG_INSN_POPCNT, 391 LONG_INSN_POPCNT,
392 LONG_INSN_RIEMIT,
393 LONG_INSN_RINEXT,
394 LONG_INSN_RISBGN,
316 LONG_INSN_RISBHG, 395 LONG_INSN_RISBHG,
317 LONG_INSN_RISBLG, 396 LONG_INSN_RISBLG,
318 LONG_INSN_RINEXT, 397 LONG_INSN_SLHHHR,
319 LONG_INSN_RIEMIT, 398 LONG_INSN_SLHHLR,
320 LONG_INSN_TABORT, 399 LONG_INSN_TABORT,
321 LONG_INSN_TBEGIN, 400 LONG_INSN_TBEGIN,
322 LONG_INSN_TBEGINC, 401 LONG_INSN_TBEGINC,
402 LONG_INSN_PCISTG,
403 LONG_INSN_MPCIFC,
404 LONG_INSN_STPCIFC,
405 LONG_INSN_PCISTB,
323}; 406};
324 407
325static char *long_insn_name[] = { 408static char *long_insn_name[] = {
326 [LONG_INSN_ALGHSIK] = "alghsik", 409 [LONG_INSN_ALGHSIK] = "alghsik",
410 [LONG_INSN_ALHHHR] = "alhhhr",
411 [LONG_INSN_ALHHLR] = "alhhlr",
327 [LONG_INSN_ALHSIK] = "alhsik", 412 [LONG_INSN_ALHSIK] = "alhsik",
413 [LONG_INSN_ALSIHN] = "alsihn",
414 [LONG_INSN_CDFBRA] = "cdfbra",
415 [LONG_INSN_CDGBRA] = "cdgbra",
416 [LONG_INSN_CDGTRA] = "cdgtra",
417 [LONG_INSN_CDLFBR] = "cdlfbr",
418 [LONG_INSN_CDLFTR] = "cdlftr",
419 [LONG_INSN_CDLGBR] = "cdlgbr",
420 [LONG_INSN_CDLGTR] = "cdlgtr",
421 [LONG_INSN_CEFBRA] = "cefbra",
422 [LONG_INSN_CEGBRA] = "cegbra",
423 [LONG_INSN_CELFBR] = "celfbr",
424 [LONG_INSN_CELGBR] = "celgbr",
425 [LONG_INSN_CFDBRA] = "cfdbra",
426 [LONG_INSN_CFEBRA] = "cfebra",
427 [LONG_INSN_CFXBRA] = "cfxbra",
428 [LONG_INSN_CGDBRA] = "cgdbra",
429 [LONG_INSN_CGDTRA] = "cgdtra",
430 [LONG_INSN_CGEBRA] = "cgebra",
431 [LONG_INSN_CGXBRA] = "cgxbra",
432 [LONG_INSN_CGXTRA] = "cgxtra",
433 [LONG_INSN_CLFDBR] = "clfdbr",
434 [LONG_INSN_CLFDTR] = "clfdtr",
435 [LONG_INSN_CLFEBR] = "clfebr",
328 [LONG_INSN_CLFHSI] = "clfhsi", 436 [LONG_INSN_CLFHSI] = "clfhsi",
437 [LONG_INSN_CLFXBR] = "clfxbr",
438 [LONG_INSN_CLFXTR] = "clfxtr",
439 [LONG_INSN_CLGDBR] = "clgdbr",
440 [LONG_INSN_CLGDTR] = "clgdtr",
441 [LONG_INSN_CLGEBR] = "clgebr",
329 [LONG_INSN_CLGFRL] = "clgfrl", 442 [LONG_INSN_CLGFRL] = "clgfrl",
330 [LONG_INSN_CLGHRL] = "clghrl", 443 [LONG_INSN_CLGHRL] = "clghrl",
331 [LONG_INSN_CLGHSI] = "clghsi", 444 [LONG_INSN_CLGHSI] = "clghsi",
445 [LONG_INSN_CLGXBR] = "clgxbr",
446 [LONG_INSN_CLGXTR] = "clgxtr",
332 [LONG_INSN_CLHHSI] = "clhhsi", 447 [LONG_INSN_CLHHSI] = "clhhsi",
448 [LONG_INSN_CXFBRA] = "cxfbra",
449 [LONG_INSN_CXGBRA] = "cxgbra",
450 [LONG_INSN_CXGTRA] = "cxgtra",
451 [LONG_INSN_CXLFBR] = "cxlfbr",
452 [LONG_INSN_CXLFTR] = "cxlftr",
453 [LONG_INSN_CXLGBR] = "cxlgbr",
454 [LONG_INSN_CXLGTR] = "cxlgtr",
455 [LONG_INSN_FIDBRA] = "fidbra",
456 [LONG_INSN_FIEBRA] = "fiebra",
457 [LONG_INSN_FIXBRA] = "fixbra",
458 [LONG_INSN_LDXBRA] = "ldxbra",
459 [LONG_INSN_LEDBRA] = "ledbra",
460 [LONG_INSN_LEXBRA] = "lexbra",
461 [LONG_INSN_LLGFAT] = "llgfat",
333 [LONG_INSN_LLGFRL] = "llgfrl", 462 [LONG_INSN_LLGFRL] = "llgfrl",
334 [LONG_INSN_LLGHRL] = "llghrl", 463 [LONG_INSN_LLGHRL] = "llghrl",
464 [LONG_INSN_LLGTAT] = "llgtat",
335 [LONG_INSN_POPCNT] = "popcnt", 465 [LONG_INSN_POPCNT] = "popcnt",
466 [LONG_INSN_RIEMIT] = "riemit",
467 [LONG_INSN_RINEXT] = "rinext",
468 [LONG_INSN_RISBGN] = "risbgn",
336 [LONG_INSN_RISBHG] = "risbhg", 469 [LONG_INSN_RISBHG] = "risbhg",
337 [LONG_INSN_RISBLG] = "risblg", 470 [LONG_INSN_RISBLG] = "risblg",
338 [LONG_INSN_RINEXT] = "rinext", 471 [LONG_INSN_SLHHHR] = "slhhhr",
339 [LONG_INSN_RIEMIT] = "riemit", 472 [LONG_INSN_SLHHLR] = "slhhlr",
340 [LONG_INSN_TABORT] = "tabort", 473 [LONG_INSN_TABORT] = "tabort",
341 [LONG_INSN_TBEGIN] = "tbegin", 474 [LONG_INSN_TBEGIN] = "tbegin",
342 [LONG_INSN_TBEGINC] = "tbeginc", 475 [LONG_INSN_TBEGINC] = "tbeginc",
476 [LONG_INSN_PCISTG] = "pcistg",
477 [LONG_INSN_MPCIFC] = "mpcifc",
478 [LONG_INSN_STPCIFC] = "stpcifc",
479 [LONG_INSN_PCISTB] = "pcistb",
343}; 480};
344 481
345static struct insn opcode[] = { 482static struct insn opcode[] = {
346#ifdef CONFIG_64BIT 483#ifdef CONFIG_64BIT
484 { "bprp", 0xc5, INSTR_MII_UPI },
485 { "bpp", 0xc7, INSTR_SMI_U0RDP },
486 { "trtr", 0xd0, INSTR_SS_L0RDRD },
347 { "lmd", 0xef, INSTR_SS_RRRDRD3 }, 487 { "lmd", 0xef, INSTR_SS_RRRDRD3 },
348#endif 488#endif
349 { "spm", 0x04, INSTR_RR_R0 }, 489 { "spm", 0x04, INSTR_RR_R0 },
@@ -378,7 +518,6 @@ static struct insn opcode[] = {
378 { "lcdr", 0x23, INSTR_RR_FF }, 518 { "lcdr", 0x23, INSTR_RR_FF },
379 { "hdr", 0x24, INSTR_RR_FF }, 519 { "hdr", 0x24, INSTR_RR_FF },
380 { "ldxr", 0x25, INSTR_RR_FF }, 520 { "ldxr", 0x25, INSTR_RR_FF },
381 { "lrdr", 0x25, INSTR_RR_FF },
382 { "mxr", 0x26, INSTR_RR_FF }, 521 { "mxr", 0x26, INSTR_RR_FF },
383 { "mxdr", 0x27, INSTR_RR_FF }, 522 { "mxdr", 0x27, INSTR_RR_FF },
384 { "ldr", 0x28, INSTR_RR_FF }, 523 { "ldr", 0x28, INSTR_RR_FF },
@@ -395,7 +534,6 @@ static struct insn opcode[] = {
395 { "lcer", 0x33, INSTR_RR_FF }, 534 { "lcer", 0x33, INSTR_RR_FF },
396 { "her", 0x34, INSTR_RR_FF }, 535 { "her", 0x34, INSTR_RR_FF },
397 { "ledr", 0x35, INSTR_RR_FF }, 536 { "ledr", 0x35, INSTR_RR_FF },
398 { "lrer", 0x35, INSTR_RR_FF },
399 { "axr", 0x36, INSTR_RR_FF }, 537 { "axr", 0x36, INSTR_RR_FF },
400 { "sxr", 0x37, INSTR_RR_FF }, 538 { "sxr", 0x37, INSTR_RR_FF },
401 { "ler", 0x38, INSTR_RR_FF }, 539 { "ler", 0x38, INSTR_RR_FF },
@@ -403,7 +541,6 @@ static struct insn opcode[] = {
403 { "aer", 0x3a, INSTR_RR_FF }, 541 { "aer", 0x3a, INSTR_RR_FF },
404 { "ser", 0x3b, INSTR_RR_FF }, 542 { "ser", 0x3b, INSTR_RR_FF },
405 { "mder", 0x3c, INSTR_RR_FF }, 543 { "mder", 0x3c, INSTR_RR_FF },
406 { "mer", 0x3c, INSTR_RR_FF },
407 { "der", 0x3d, INSTR_RR_FF }, 544 { "der", 0x3d, INSTR_RR_FF },
408 { "aur", 0x3e, INSTR_RR_FF }, 545 { "aur", 0x3e, INSTR_RR_FF },
409 { "sur", 0x3f, INSTR_RR_FF }, 546 { "sur", 0x3f, INSTR_RR_FF },
@@ -454,7 +591,6 @@ static struct insn opcode[] = {
454 { "ae", 0x7a, INSTR_RX_FRRD }, 591 { "ae", 0x7a, INSTR_RX_FRRD },
455 { "se", 0x7b, INSTR_RX_FRRD }, 592 { "se", 0x7b, INSTR_RX_FRRD },
456 { "mde", 0x7c, INSTR_RX_FRRD }, 593 { "mde", 0x7c, INSTR_RX_FRRD },
457 { "me", 0x7c, INSTR_RX_FRRD },
458 { "de", 0x7d, INSTR_RX_FRRD }, 594 { "de", 0x7d, INSTR_RX_FRRD },
459 { "au", 0x7e, INSTR_RX_FRRD }, 595 { "au", 0x7e, INSTR_RX_FRRD },
460 { "su", 0x7f, INSTR_RX_FRRD }, 596 { "su", 0x7f, INSTR_RX_FRRD },
@@ -534,9 +670,9 @@ static struct insn opcode[] = {
534 670
535static struct insn opcode_01[] = { 671static struct insn opcode_01[] = {
536#ifdef CONFIG_64BIT 672#ifdef CONFIG_64BIT
537 { "sam64", 0x0e, INSTR_E },
538 { "pfpo", 0x0a, INSTR_E },
539 { "ptff", 0x04, INSTR_E }, 673 { "ptff", 0x04, INSTR_E },
674 { "pfpo", 0x0a, INSTR_E },
675 { "sam64", 0x0e, INSTR_E },
540#endif 676#endif
541 { "pr", 0x01, INSTR_E }, 677 { "pr", 0x01, INSTR_E },
542 { "upt", 0x02, INSTR_E }, 678 { "upt", 0x02, INSTR_E },
@@ -605,19 +741,28 @@ static struct insn opcode_aa[] = {
605 741
606static struct insn opcode_b2[] = { 742static struct insn opcode_b2[] = {
607#ifdef CONFIG_64BIT 743#ifdef CONFIG_64BIT
608 { "sske", 0x2b, INSTR_RRF_M0RR },
609 { "stckf", 0x7c, INSTR_S_RD }, 744 { "stckf", 0x7c, INSTR_S_RD },
610 { "cu21", 0xa6, INSTR_RRF_M0RR }, 745 { "lpp", 0x80, INSTR_S_RD },
611 { "cuutf", 0xa6, INSTR_RRF_M0RR }, 746 { "lcctl", 0x84, INSTR_S_RD },
612 { "cu12", 0xa7, INSTR_RRF_M0RR }, 747 { "lpctl", 0x85, INSTR_S_RD },
613 { "cutfu", 0xa7, INSTR_RRF_M0RR }, 748 { "qsi", 0x86, INSTR_S_RD },
749 { "lsctl", 0x87, INSTR_S_RD },
750 { "qctri", 0x8e, INSTR_S_RD },
614 { "stfle", 0xb0, INSTR_S_RD }, 751 { "stfle", 0xb0, INSTR_S_RD },
615 { "lpswe", 0xb2, INSTR_S_RD }, 752 { "lpswe", 0xb2, INSTR_S_RD },
753 { "srnmb", 0xb8, INSTR_S_RD },
616 { "srnmt", 0xb9, INSTR_S_RD }, 754 { "srnmt", 0xb9, INSTR_S_RD },
617 { "lfas", 0xbd, INSTR_S_RD }, 755 { "lfas", 0xbd, INSTR_S_RD },
618 { "etndg", 0xec, INSTR_RRE_R0 }, 756 { "scctr", 0xe0, INSTR_RRE_RR },
757 { "spctr", 0xe1, INSTR_RRE_RR },
758 { "ecctr", 0xe4, INSTR_RRE_RR },
759 { "epctr", 0xe5, INSTR_RRE_RR },
760 { "ppa", 0xe8, INSTR_RRF_U0RR },
761 { "etnd", 0xec, INSTR_RRE_R0 },
762 { "ecpga", 0xed, INSTR_RRE_RR },
763 { "tend", 0xf8, INSTR_S_00 },
764 { "niai", 0xfa, INSTR_IE_UU },
619 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD }, 765 { { 0, LONG_INSN_TABORT }, 0xfc, INSTR_S_RD },
620 { "tend", 0xf8, INSTR_S_RD },
621#endif 766#endif
622 { "stidp", 0x02, INSTR_S_RD }, 767 { "stidp", 0x02, INSTR_S_RD },
623 { "sck", 0x04, INSTR_S_RD }, 768 { "sck", 0x04, INSTR_S_RD },
@@ -635,8 +780,8 @@ static struct insn opcode_b2[] = {
635 { "sie", 0x14, INSTR_S_RD }, 780 { "sie", 0x14, INSTR_S_RD },
636 { "pc", 0x18, INSTR_S_RD }, 781 { "pc", 0x18, INSTR_S_RD },
637 { "sac", 0x19, INSTR_S_RD }, 782 { "sac", 0x19, INSTR_S_RD },
638 { "servc", 0x20, INSTR_RRE_RR },
639 { "cfc", 0x1a, INSTR_S_RD }, 783 { "cfc", 0x1a, INSTR_S_RD },
784 { "servc", 0x20, INSTR_RRE_RR },
640 { "ipte", 0x21, INSTR_RRE_RR }, 785 { "ipte", 0x21, INSTR_RRE_RR },
641 { "ipm", 0x22, INSTR_RRE_R0 }, 786 { "ipm", 0x22, INSTR_RRE_R0 },
642 { "ivsk", 0x23, INSTR_RRE_RR }, 787 { "ivsk", 0x23, INSTR_RRE_RR },
@@ -647,9 +792,9 @@ static struct insn opcode_b2[] = {
647 { "pt", 0x28, INSTR_RRE_RR }, 792 { "pt", 0x28, INSTR_RRE_RR },
648 { "iske", 0x29, INSTR_RRE_RR }, 793 { "iske", 0x29, INSTR_RRE_RR },
649 { "rrbe", 0x2a, INSTR_RRE_RR }, 794 { "rrbe", 0x2a, INSTR_RRE_RR },
650 { "sske", 0x2b, INSTR_RRE_RR }, 795 { "sske", 0x2b, INSTR_RRF_M0RR },
651 { "tb", 0x2c, INSTR_RRE_0R }, 796 { "tb", 0x2c, INSTR_RRE_0R },
652 { "dxr", 0x2d, INSTR_RRE_F0 }, 797 { "dxr", 0x2d, INSTR_RRE_FF },
653 { "pgin", 0x2e, INSTR_RRE_RR }, 798 { "pgin", 0x2e, INSTR_RRE_RR },
654 { "pgout", 0x2f, INSTR_RRE_RR }, 799 { "pgout", 0x2f, INSTR_RRE_RR },
655 { "csch", 0x30, INSTR_S_00 }, 800 { "csch", 0x30, INSTR_S_00 },
@@ -667,8 +812,8 @@ static struct insn opcode_b2[] = {
667 { "schm", 0x3c, INSTR_S_00 }, 812 { "schm", 0x3c, INSTR_S_00 },
668 { "bakr", 0x40, INSTR_RRE_RR }, 813 { "bakr", 0x40, INSTR_RRE_RR },
669 { "cksm", 0x41, INSTR_RRE_RR }, 814 { "cksm", 0x41, INSTR_RRE_RR },
670 { "sqdr", 0x44, INSTR_RRE_F0 }, 815 { "sqdr", 0x44, INSTR_RRE_FF },
671 { "sqer", 0x45, INSTR_RRE_F0 }, 816 { "sqer", 0x45, INSTR_RRE_FF },
672 { "stura", 0x46, INSTR_RRE_RR }, 817 { "stura", 0x46, INSTR_RRE_RR },
673 { "msta", 0x47, INSTR_RRE_R0 }, 818 { "msta", 0x47, INSTR_RRE_R0 },
674 { "palb", 0x48, INSTR_RRE_00 }, 819 { "palb", 0x48, INSTR_RRE_00 },
@@ -694,14 +839,14 @@ static struct insn opcode_b2[] = {
694 { "rp", 0x77, INSTR_S_RD }, 839 { "rp", 0x77, INSTR_S_RD },
695 { "stcke", 0x78, INSTR_S_RD }, 840 { "stcke", 0x78, INSTR_S_RD },
696 { "sacf", 0x79, INSTR_S_RD }, 841 { "sacf", 0x79, INSTR_S_RD },
697 { "spp", 0x80, INSTR_S_RD },
698 { "stsi", 0x7d, INSTR_S_RD }, 842 { "stsi", 0x7d, INSTR_S_RD },
843 { "spp", 0x80, INSTR_S_RD },
699 { "srnm", 0x99, INSTR_S_RD }, 844 { "srnm", 0x99, INSTR_S_RD },
700 { "stfpc", 0x9c, INSTR_S_RD }, 845 { "stfpc", 0x9c, INSTR_S_RD },
701 { "lfpc", 0x9d, INSTR_S_RD }, 846 { "lfpc", 0x9d, INSTR_S_RD },
702 { "tre", 0xa5, INSTR_RRE_RR }, 847 { "tre", 0xa5, INSTR_RRE_RR },
703 { "cuutf", 0xa6, INSTR_RRE_RR }, 848 { "cuutf", 0xa6, INSTR_RRF_M0RR },
704 { "cutfu", 0xa7, INSTR_RRE_RR }, 849 { "cutfu", 0xa7, INSTR_RRF_M0RR },
705 { "stfl", 0xb1, INSTR_S_RD }, 850 { "stfl", 0xb1, INSTR_S_RD },
706 { "trap4", 0xff, INSTR_S_RD }, 851 { "trap4", 0xff, INSTR_S_RD },
707 { "", 0, INSTR_INVALID } 852 { "", 0, INSTR_INVALID }
@@ -715,72 +860,87 @@ static struct insn opcode_b3[] = {
715 { "myr", 0x3b, INSTR_RRF_F0FF }, 860 { "myr", 0x3b, INSTR_RRF_F0FF },
716 { "mayhr", 0x3c, INSTR_RRF_F0FF }, 861 { "mayhr", 0x3c, INSTR_RRF_F0FF },
717 { "myhr", 0x3d, INSTR_RRF_F0FF }, 862 { "myhr", 0x3d, INSTR_RRF_F0FF },
718 { "cegbr", 0xa4, INSTR_RRE_RR },
719 { "cdgbr", 0xa5, INSTR_RRE_RR },
720 { "cxgbr", 0xa6, INSTR_RRE_RR },
721 { "cgebr", 0xa8, INSTR_RRF_U0RF },
722 { "cgdbr", 0xa9, INSTR_RRF_U0RF },
723 { "cgxbr", 0xaa, INSTR_RRF_U0RF },
724 { "cfer", 0xb8, INSTR_RRF_U0RF },
725 { "cfdr", 0xb9, INSTR_RRF_U0RF },
726 { "cfxr", 0xba, INSTR_RRF_U0RF },
727 { "cegr", 0xc4, INSTR_RRE_RR },
728 { "cdgr", 0xc5, INSTR_RRE_RR },
729 { "cxgr", 0xc6, INSTR_RRE_RR },
730 { "cger", 0xc8, INSTR_RRF_U0RF },
731 { "cgdr", 0xc9, INSTR_RRF_U0RF },
732 { "cgxr", 0xca, INSTR_RRF_U0RF },
733 { "lpdfr", 0x70, INSTR_RRE_FF }, 863 { "lpdfr", 0x70, INSTR_RRE_FF },
734 { "lndfr", 0x71, INSTR_RRE_FF }, 864 { "lndfr", 0x71, INSTR_RRE_FF },
735 { "cpsdr", 0x72, INSTR_RRF_F0FF2 }, 865 { "cpsdr", 0x72, INSTR_RRF_F0FF2 },
736 { "lcdfr", 0x73, INSTR_RRE_FF }, 866 { "lcdfr", 0x73, INSTR_RRE_FF },
867 { "sfasr", 0x85, INSTR_RRE_R0 },
868 { { 0, LONG_INSN_CELFBR }, 0x90, INSTR_RRF_UUFR },
869 { { 0, LONG_INSN_CDLFBR }, 0x91, INSTR_RRF_UUFR },
870 { { 0, LONG_INSN_CXLFBR }, 0x92, INSTR_RRF_UURF },
871 { { 0, LONG_INSN_CEFBRA }, 0x94, INSTR_RRF_UUFR },
872 { { 0, LONG_INSN_CDFBRA }, 0x95, INSTR_RRF_UUFR },
873 { { 0, LONG_INSN_CXFBRA }, 0x96, INSTR_RRF_UURF },
874 { { 0, LONG_INSN_CFEBRA }, 0x98, INSTR_RRF_UURF },
875 { { 0, LONG_INSN_CFDBRA }, 0x99, INSTR_RRF_UURF },
876 { { 0, LONG_INSN_CFXBRA }, 0x9a, INSTR_RRF_UUFR },
877 { { 0, LONG_INSN_CLFEBR }, 0x9c, INSTR_RRF_UURF },
878 { { 0, LONG_INSN_CLFDBR }, 0x9d, INSTR_RRF_UURF },
879 { { 0, LONG_INSN_CLFXBR }, 0x9e, INSTR_RRF_UUFR },
880 { { 0, LONG_INSN_CELGBR }, 0xa0, INSTR_RRF_UUFR },
881 { { 0, LONG_INSN_CDLGBR }, 0xa1, INSTR_RRF_UUFR },
882 { { 0, LONG_INSN_CXLGBR }, 0xa2, INSTR_RRF_UURF },
883 { { 0, LONG_INSN_CEGBRA }, 0xa4, INSTR_RRF_UUFR },
884 { { 0, LONG_INSN_CDGBRA }, 0xa5, INSTR_RRF_UUFR },
885 { { 0, LONG_INSN_CXGBRA }, 0xa6, INSTR_RRF_UURF },
886 { { 0, LONG_INSN_CGEBRA }, 0xa8, INSTR_RRF_UURF },
887 { { 0, LONG_INSN_CGDBRA }, 0xa9, INSTR_RRF_UURF },
888 { { 0, LONG_INSN_CGXBRA }, 0xaa, INSTR_RRF_UUFR },
889 { { 0, LONG_INSN_CLGEBR }, 0xac, INSTR_RRF_UURF },
890 { { 0, LONG_INSN_CLGDBR }, 0xad, INSTR_RRF_UURF },
891 { { 0, LONG_INSN_CLGXBR }, 0xae, INSTR_RRF_UUFR },
737 { "ldgr", 0xc1, INSTR_RRE_FR }, 892 { "ldgr", 0xc1, INSTR_RRE_FR },
893 { "cegr", 0xc4, INSTR_RRE_FR },
894 { "cdgr", 0xc5, INSTR_RRE_FR },
895 { "cxgr", 0xc6, INSTR_RRE_FR },
896 { "cger", 0xc8, INSTR_RRF_U0RF },
897 { "cgdr", 0xc9, INSTR_RRF_U0RF },
898 { "cgxr", 0xca, INSTR_RRF_U0RF },
738 { "lgdr", 0xcd, INSTR_RRE_RF }, 899 { "lgdr", 0xcd, INSTR_RRE_RF },
739 { "adtr", 0xd2, INSTR_RRR_F0FF }, 900 { "mdtra", 0xd0, INSTR_RRF_FUFF2 },
740 { "axtr", 0xda, INSTR_RRR_F0FF }, 901 { "ddtra", 0xd1, INSTR_RRF_FUFF2 },
741 { "cdtr", 0xe4, INSTR_RRE_FF }, 902 { "adtra", 0xd2, INSTR_RRF_FUFF2 },
742 { "cxtr", 0xec, INSTR_RRE_FF }, 903 { "sdtra", 0xd3, INSTR_RRF_FUFF2 },
904 { "ldetr", 0xd4, INSTR_RRF_0UFF },
905 { "ledtr", 0xd5, INSTR_RRF_UUFF },
906 { "ltdtr", 0xd6, INSTR_RRE_FF },
907 { "fidtr", 0xd7, INSTR_RRF_UUFF },
908 { "mxtra", 0xd8, INSTR_RRF_FUFF2 },
909 { "dxtra", 0xd9, INSTR_RRF_FUFF2 },
910 { "axtra", 0xda, INSTR_RRF_FUFF2 },
911 { "sxtra", 0xdb, INSTR_RRF_FUFF2 },
912 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
913 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
914 { "ltxtr", 0xde, INSTR_RRE_FF },
915 { "fixtr", 0xdf, INSTR_RRF_UUFF },
743 { "kdtr", 0xe0, INSTR_RRE_FF }, 916 { "kdtr", 0xe0, INSTR_RRE_FF },
744 { "kxtr", 0xe8, INSTR_RRE_FF }, 917 { { 0, LONG_INSN_CGDTRA }, 0xe1, INSTR_RRF_UURF },
745 { "cedtr", 0xf4, INSTR_RRE_FF },
746 { "cextr", 0xfc, INSTR_RRE_FF },
747 { "cdgtr", 0xf1, INSTR_RRE_FR },
748 { "cxgtr", 0xf9, INSTR_RRE_FR },
749 { "cdstr", 0xf3, INSTR_RRE_FR },
750 { "cxstr", 0xfb, INSTR_RRE_FR },
751 { "cdutr", 0xf2, INSTR_RRE_FR },
752 { "cxutr", 0xfa, INSTR_RRE_FR },
753 { "cgdtr", 0xe1, INSTR_RRF_U0RF },
754 { "cgxtr", 0xe9, INSTR_RRF_U0RF },
755 { "csdtr", 0xe3, INSTR_RRE_RF },
756 { "csxtr", 0xeb, INSTR_RRE_RF },
757 { "cudtr", 0xe2, INSTR_RRE_RF }, 918 { "cudtr", 0xe2, INSTR_RRE_RF },
758 { "cuxtr", 0xea, INSTR_RRE_RF }, 919 { "csdtr", 0xe3, INSTR_RRE_RF },
759 { "ddtr", 0xd1, INSTR_RRR_F0FF }, 920 { "cdtr", 0xe4, INSTR_RRE_FF },
760 { "dxtr", 0xd9, INSTR_RRR_F0FF },
761 { "eedtr", 0xe5, INSTR_RRE_RF }, 921 { "eedtr", 0xe5, INSTR_RRE_RF },
762 { "eextr", 0xed, INSTR_RRE_RF },
763 { "esdtr", 0xe7, INSTR_RRE_RF }, 922 { "esdtr", 0xe7, INSTR_RRE_RF },
923 { "kxtr", 0xe8, INSTR_RRE_FF },
924 { { 0, LONG_INSN_CGXTRA }, 0xe9, INSTR_RRF_UUFR },
925 { "cuxtr", 0xea, INSTR_RRE_RF },
926 { "csxtr", 0xeb, INSTR_RRE_RF },
927 { "cxtr", 0xec, INSTR_RRE_FF },
928 { "eextr", 0xed, INSTR_RRE_RF },
764 { "esxtr", 0xef, INSTR_RRE_RF }, 929 { "esxtr", 0xef, INSTR_RRE_RF },
765 { "iedtr", 0xf6, INSTR_RRF_F0FR }, 930 { { 0, LONG_INSN_CDGTRA }, 0xf1, INSTR_RRF_UUFR },
766 { "iextr", 0xfe, INSTR_RRF_F0FR }, 931 { "cdutr", 0xf2, INSTR_RRE_FR },
767 { "ltdtr", 0xd6, INSTR_RRE_FF }, 932 { "cdstr", 0xf3, INSTR_RRE_FR },
768 { "ltxtr", 0xde, INSTR_RRE_FF }, 933 { "cedtr", 0xf4, INSTR_RRE_FF },
769 { "fidtr", 0xd7, INSTR_RRF_UUFF },
770 { "fixtr", 0xdf, INSTR_RRF_UUFF },
771 { "ldetr", 0xd4, INSTR_RRF_0UFF },
772 { "lxdtr", 0xdc, INSTR_RRF_0UFF },
773 { "ledtr", 0xd5, INSTR_RRF_UUFF },
774 { "ldxtr", 0xdd, INSTR_RRF_UUFF },
775 { "mdtr", 0xd0, INSTR_RRR_F0FF },
776 { "mxtr", 0xd8, INSTR_RRR_F0FF },
777 { "qadtr", 0xf5, INSTR_RRF_FUFF }, 934 { "qadtr", 0xf5, INSTR_RRF_FUFF },
778 { "qaxtr", 0xfd, INSTR_RRF_FUFF }, 935 { "iedtr", 0xf6, INSTR_RRF_F0FR },
779 { "rrdtr", 0xf7, INSTR_RRF_FFRU }, 936 { "rrdtr", 0xf7, INSTR_RRF_FFRU },
937 { { 0, LONG_INSN_CXGTRA }, 0xf9, INSTR_RRF_UURF },
938 { "cxutr", 0xfa, INSTR_RRE_FR },
939 { "cxstr", 0xfb, INSTR_RRE_FR },
940 { "cextr", 0xfc, INSTR_RRE_FF },
941 { "qaxtr", 0xfd, INSTR_RRF_FUFF },
942 { "iextr", 0xfe, INSTR_RRF_F0FR },
780 { "rrxtr", 0xff, INSTR_RRF_FFRU }, 943 { "rrxtr", 0xff, INSTR_RRF_FFRU },
781 { "sfasr", 0x85, INSTR_RRE_R0 },
782 { "sdtr", 0xd3, INSTR_RRR_F0FF },
783 { "sxtr", 0xdb, INSTR_RRR_F0FF },
784#endif 944#endif
785 { "lpebr", 0x00, INSTR_RRE_FF }, 945 { "lpebr", 0x00, INSTR_RRE_FF },
786 { "lnebr", 0x01, INSTR_RRE_FF }, 946 { "lnebr", 0x01, INSTR_RRE_FF },
@@ -827,10 +987,10 @@ static struct insn opcode_b3[] = {
827 { "lnxbr", 0x41, INSTR_RRE_FF }, 987 { "lnxbr", 0x41, INSTR_RRE_FF },
828 { "ltxbr", 0x42, INSTR_RRE_FF }, 988 { "ltxbr", 0x42, INSTR_RRE_FF },
829 { "lcxbr", 0x43, INSTR_RRE_FF }, 989 { "lcxbr", 0x43, INSTR_RRE_FF },
830 { "ledbr", 0x44, INSTR_RRE_FF }, 990 { { 0, LONG_INSN_LEDBRA }, 0x44, INSTR_RRF_UUFF },
831 { "ldxbr", 0x45, INSTR_RRE_FF }, 991 { { 0, LONG_INSN_LDXBRA }, 0x45, INSTR_RRF_UUFF },
832 { "lexbr", 0x46, INSTR_RRE_FF }, 992 { { 0, LONG_INSN_LEXBRA }, 0x46, INSTR_RRF_UUFF },
833 { "fixbr", 0x47, INSTR_RRF_U0FF }, 993 { { 0, LONG_INSN_FIXBRA }, 0x47, INSTR_RRF_UUFF },
834 { "kxbr", 0x48, INSTR_RRE_FF }, 994 { "kxbr", 0x48, INSTR_RRE_FF },
835 { "cxbr", 0x49, INSTR_RRE_FF }, 995 { "cxbr", 0x49, INSTR_RRE_FF },
836 { "axbr", 0x4a, INSTR_RRE_FF }, 996 { "axbr", 0x4a, INSTR_RRE_FF },
@@ -840,24 +1000,24 @@ static struct insn opcode_b3[] = {
840 { "tbedr", 0x50, INSTR_RRF_U0FF }, 1000 { "tbedr", 0x50, INSTR_RRF_U0FF },
841 { "tbdr", 0x51, INSTR_RRF_U0FF }, 1001 { "tbdr", 0x51, INSTR_RRF_U0FF },
842 { "diebr", 0x53, INSTR_RRF_FUFF }, 1002 { "diebr", 0x53, INSTR_RRF_FUFF },
843 { "fiebr", 0x57, INSTR_RRF_U0FF }, 1003 { { 0, LONG_INSN_FIEBRA }, 0x57, INSTR_RRF_UUFF },
844 { "thder", 0x58, INSTR_RRE_RR }, 1004 { "thder", 0x58, INSTR_RRE_FF },
845 { "thdr", 0x59, INSTR_RRE_RR }, 1005 { "thdr", 0x59, INSTR_RRE_FF },
846 { "didbr", 0x5b, INSTR_RRF_FUFF }, 1006 { "didbr", 0x5b, INSTR_RRF_FUFF },
847 { "fidbr", 0x5f, INSTR_RRF_U0FF }, 1007 { { 0, LONG_INSN_FIDBRA }, 0x5f, INSTR_RRF_UUFF },
848 { "lpxr", 0x60, INSTR_RRE_FF }, 1008 { "lpxr", 0x60, INSTR_RRE_FF },
849 { "lnxr", 0x61, INSTR_RRE_FF }, 1009 { "lnxr", 0x61, INSTR_RRE_FF },
850 { "ltxr", 0x62, INSTR_RRE_FF }, 1010 { "ltxr", 0x62, INSTR_RRE_FF },
851 { "lcxr", 0x63, INSTR_RRE_FF }, 1011 { "lcxr", 0x63, INSTR_RRE_FF },
852 { "lxr", 0x65, INSTR_RRE_RR }, 1012 { "lxr", 0x65, INSTR_RRE_FF },
853 { "lexr", 0x66, INSTR_RRE_FF }, 1013 { "lexr", 0x66, INSTR_RRE_FF },
854 { "fixr", 0x67, INSTR_RRF_U0FF }, 1014 { "fixr", 0x67, INSTR_RRE_FF },
855 { "cxr", 0x69, INSTR_RRE_FF }, 1015 { "cxr", 0x69, INSTR_RRE_FF },
856 { "lzer", 0x74, INSTR_RRE_R0 }, 1016 { "lzer", 0x74, INSTR_RRE_F0 },
857 { "lzdr", 0x75, INSTR_RRE_R0 }, 1017 { "lzdr", 0x75, INSTR_RRE_F0 },
858 { "lzxr", 0x76, INSTR_RRE_R0 }, 1018 { "lzxr", 0x76, INSTR_RRE_F0 },
859 { "fier", 0x77, INSTR_RRF_U0FF }, 1019 { "fier", 0x77, INSTR_RRE_FF },
860 { "fidr", 0x7f, INSTR_RRF_U0FF }, 1020 { "fidr", 0x7f, INSTR_RRE_FF },
861 { "sfpc", 0x84, INSTR_RRE_RR_OPT }, 1021 { "sfpc", 0x84, INSTR_RRE_RR_OPT },
862 { "efpc", 0x8c, INSTR_RRE_RR_OPT }, 1022 { "efpc", 0x8c, INSTR_RRE_RR_OPT },
863 { "cefbr", 0x94, INSTR_RRE_RF }, 1023 { "cefbr", 0x94, INSTR_RRE_RF },
@@ -866,9 +1026,12 @@ static struct insn opcode_b3[] = {
866 { "cfebr", 0x98, INSTR_RRF_U0RF }, 1026 { "cfebr", 0x98, INSTR_RRF_U0RF },
867 { "cfdbr", 0x99, INSTR_RRF_U0RF }, 1027 { "cfdbr", 0x99, INSTR_RRF_U0RF },
868 { "cfxbr", 0x9a, INSTR_RRF_U0RF }, 1028 { "cfxbr", 0x9a, INSTR_RRF_U0RF },
869 { "cefr", 0xb4, INSTR_RRE_RF }, 1029 { "cefr", 0xb4, INSTR_RRE_FR },
870 { "cdfr", 0xb5, INSTR_RRE_RF }, 1030 { "cdfr", 0xb5, INSTR_RRE_FR },
871 { "cxfr", 0xb6, INSTR_RRE_RF }, 1031 { "cxfr", 0xb6, INSTR_RRE_FR },
1032 { "cfer", 0xb8, INSTR_RRF_U0RF },
1033 { "cfdr", 0xb9, INSTR_RRF_U0RF },
1034 { "cfxr", 0xba, INSTR_RRF_U0RF },
872 { "", 0, INSTR_INVALID } 1035 { "", 0, INSTR_INVALID }
873}; 1036};
874 1037
@@ -910,7 +1073,23 @@ static struct insn opcode_b9[] = {
910 { "lhr", 0x27, INSTR_RRE_RR }, 1073 { "lhr", 0x27, INSTR_RRE_RR },
911 { "cgfr", 0x30, INSTR_RRE_RR }, 1074 { "cgfr", 0x30, INSTR_RRE_RR },
912 { "clgfr", 0x31, INSTR_RRE_RR }, 1075 { "clgfr", 0x31, INSTR_RRE_RR },
1076 { "cfdtr", 0x41, INSTR_RRF_UURF },
1077 { { 0, LONG_INSN_CLGDTR }, 0x42, INSTR_RRF_UURF },
1078 { { 0, LONG_INSN_CLFDTR }, 0x43, INSTR_RRF_UURF },
913 { "bctgr", 0x46, INSTR_RRE_RR }, 1079 { "bctgr", 0x46, INSTR_RRE_RR },
1080 { "cfxtr", 0x49, INSTR_RRF_UURF },
1081 { { 0, LONG_INSN_CLGXTR }, 0x4a, INSTR_RRF_UUFR },
1082 { { 0, LONG_INSN_CLFXTR }, 0x4b, INSTR_RRF_UUFR },
1083 { "cdftr", 0x51, INSTR_RRF_UUFR },
1084 { { 0, LONG_INSN_CDLGTR }, 0x52, INSTR_RRF_UUFR },
1085 { { 0, LONG_INSN_CDLFTR }, 0x53, INSTR_RRF_UUFR },
1086 { "cxftr", 0x59, INSTR_RRF_UURF },
1087 { { 0, LONG_INSN_CXLGTR }, 0x5a, INSTR_RRF_UURF },
1088 { { 0, LONG_INSN_CXLFTR }, 0x5b, INSTR_RRF_UUFR },
1089 { "cgrt", 0x60, INSTR_RRF_U0RR },
1090 { "clgrt", 0x61, INSTR_RRF_U0RR },
1091 { "crt", 0x72, INSTR_RRF_U0RR },
1092 { "clrt", 0x73, INSTR_RRF_U0RR },
914 { "ngr", 0x80, INSTR_RRE_RR }, 1093 { "ngr", 0x80, INSTR_RRE_RR },
915 { "ogr", 0x81, INSTR_RRE_RR }, 1094 { "ogr", 0x81, INSTR_RRE_RR },
916 { "xgr", 0x82, INSTR_RRE_RR }, 1095 { "xgr", 0x82, INSTR_RRE_RR },
@@ -923,32 +1102,34 @@ static struct insn opcode_b9[] = {
923 { "slbgr", 0x89, INSTR_RRE_RR }, 1102 { "slbgr", 0x89, INSTR_RRE_RR },
924 { "cspg", 0x8a, INSTR_RRE_RR }, 1103 { "cspg", 0x8a, INSTR_RRE_RR },
925 { "idte", 0x8e, INSTR_RRF_R0RR }, 1104 { "idte", 0x8e, INSTR_RRF_R0RR },
1105 { "crdte", 0x8f, INSTR_RRF_RMRR },
926 { "llcr", 0x94, INSTR_RRE_RR }, 1106 { "llcr", 0x94, INSTR_RRE_RR },
927 { "llhr", 0x95, INSTR_RRE_RR }, 1107 { "llhr", 0x95, INSTR_RRE_RR },
928 { "esea", 0x9d, INSTR_RRE_R0 }, 1108 { "esea", 0x9d, INSTR_RRE_R0 },
1109 { "ptf", 0xa2, INSTR_RRE_R0 },
929 { "lptea", 0xaa, INSTR_RRF_RURR }, 1110 { "lptea", 0xaa, INSTR_RRF_RURR },
1111 { "rrbm", 0xae, INSTR_RRE_RR },
1112 { "pfmf", 0xaf, INSTR_RRE_RR },
930 { "cu14", 0xb0, INSTR_RRF_M0RR }, 1113 { "cu14", 0xb0, INSTR_RRF_M0RR },
931 { "cu24", 0xb1, INSTR_RRF_M0RR }, 1114 { "cu24", 0xb1, INSTR_RRF_M0RR },
932 { "cu41", 0xb2, INSTR_RRF_M0RR }, 1115 { "cu41", 0xb2, INSTR_RRE_RR },
933 { "cu42", 0xb3, INSTR_RRF_M0RR }, 1116 { "cu42", 0xb3, INSTR_RRE_RR },
934 { "crt", 0x72, INSTR_RRF_U0RR },
935 { "cgrt", 0x60, INSTR_RRF_U0RR },
936 { "clrt", 0x73, INSTR_RRF_U0RR },
937 { "clgrt", 0x61, INSTR_RRF_U0RR },
938 { "ptf", 0xa2, INSTR_RRE_R0 },
939 { "pfmf", 0xaf, INSTR_RRE_RR },
940 { "trte", 0xbf, INSTR_RRF_M0RR },
941 { "trtre", 0xbd, INSTR_RRF_M0RR }, 1117 { "trtre", 0xbd, INSTR_RRF_M0RR },
1118 { "srstu", 0xbe, INSTR_RRE_RR },
1119 { "trte", 0xbf, INSTR_RRF_M0RR },
942 { "ahhhr", 0xc8, INSTR_RRF_R0RR2 }, 1120 { "ahhhr", 0xc8, INSTR_RRF_R0RR2 },
943 { "shhhr", 0xc9, INSTR_RRF_R0RR2 }, 1121 { "shhhr", 0xc9, INSTR_RRF_R0RR2 },
944 { "alhhh", 0xca, INSTR_RRF_R0RR2 }, 1122 { { 0, LONG_INSN_ALHHHR }, 0xca, INSTR_RRF_R0RR2 },
945 { "alhhl", 0xca, INSTR_RRF_R0RR2 }, 1123 { { 0, LONG_INSN_SLHHHR }, 0xcb, INSTR_RRF_R0RR2 },
946 { "slhhh", 0xcb, INSTR_RRF_R0RR2 }, 1124 { "chhr", 0xcd, INSTR_RRE_RR },
947 { "chhr ", 0xcd, INSTR_RRE_RR },
948 { "clhhr", 0xcf, INSTR_RRE_RR }, 1125 { "clhhr", 0xcf, INSTR_RRE_RR },
1126 { { 0, LONG_INSN_PCISTG }, 0xd0, INSTR_RRE_RR },
1127 { "pcilg", 0xd2, INSTR_RRE_RR },
1128 { "rpcit", 0xd3, INSTR_RRE_RR },
949 { "ahhlr", 0xd8, INSTR_RRF_R0RR2 }, 1129 { "ahhlr", 0xd8, INSTR_RRF_R0RR2 },
950 { "shhlr", 0xd9, INSTR_RRF_R0RR2 }, 1130 { "shhlr", 0xd9, INSTR_RRF_R0RR2 },
951 { "slhhl", 0xdb, INSTR_RRF_R0RR2 }, 1131 { { 0, LONG_INSN_ALHHLR }, 0xda, INSTR_RRF_R0RR2 },
1132 { { 0, LONG_INSN_SLHHLR }, 0xdb, INSTR_RRF_R0RR2 },
952 { "chlr", 0xdd, INSTR_RRE_RR }, 1133 { "chlr", 0xdd, INSTR_RRE_RR },
953 { "clhlr", 0xdf, INSTR_RRE_RR }, 1134 { "clhlr", 0xdf, INSTR_RRE_RR },
954 { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR }, 1135 { { 0, LONG_INSN_POPCNT }, 0xe1, INSTR_RRE_RR },
@@ -976,13 +1157,9 @@ static struct insn opcode_b9[] = {
976 { "kimd", 0x3e, INSTR_RRE_RR }, 1157 { "kimd", 0x3e, INSTR_RRE_RR },
977 { "klmd", 0x3f, INSTR_RRE_RR }, 1158 { "klmd", 0x3f, INSTR_RRE_RR },
978 { "epsw", 0x8d, INSTR_RRE_RR }, 1159 { "epsw", 0x8d, INSTR_RRE_RR },
979 { "trtt", 0x90, INSTR_RRE_RR },
980 { "trtt", 0x90, INSTR_RRF_M0RR }, 1160 { "trtt", 0x90, INSTR_RRF_M0RR },
981 { "trto", 0x91, INSTR_RRE_RR },
982 { "trto", 0x91, INSTR_RRF_M0RR }, 1161 { "trto", 0x91, INSTR_RRF_M0RR },
983 { "trot", 0x92, INSTR_RRE_RR },
984 { "trot", 0x92, INSTR_RRF_M0RR }, 1162 { "trot", 0x92, INSTR_RRF_M0RR },
985 { "troo", 0x93, INSTR_RRE_RR },
986 { "troo", 0x93, INSTR_RRF_M0RR }, 1163 { "troo", 0x93, INSTR_RRF_M0RR },
987 { "mlr", 0x96, INSTR_RRE_RR }, 1164 { "mlr", 0x96, INSTR_RRE_RR },
988 { "dlr", 0x97, INSTR_RRE_RR }, 1165 { "dlr", 0x97, INSTR_RRE_RR },
@@ -1013,6 +1190,8 @@ static struct insn opcode_c0[] = {
1013 1190
1014static struct insn opcode_c2[] = { 1191static struct insn opcode_c2[] = {
1015#ifdef CONFIG_64BIT 1192#ifdef CONFIG_64BIT
1193 { "msgfi", 0x00, INSTR_RIL_RI },
1194 { "msfi", 0x01, INSTR_RIL_RI },
1016 { "slgfi", 0x04, INSTR_RIL_RU }, 1195 { "slgfi", 0x04, INSTR_RIL_RU },
1017 { "slfi", 0x05, INSTR_RIL_RU }, 1196 { "slfi", 0x05, INSTR_RIL_RU },
1018 { "agfi", 0x08, INSTR_RIL_RI }, 1197 { "agfi", 0x08, INSTR_RIL_RI },
@@ -1023,43 +1202,41 @@ static struct insn opcode_c2[] = {
1023 { "cfi", 0x0d, INSTR_RIL_RI }, 1202 { "cfi", 0x0d, INSTR_RIL_RI },
1024 { "clgfi", 0x0e, INSTR_RIL_RU }, 1203 { "clgfi", 0x0e, INSTR_RIL_RU },
1025 { "clfi", 0x0f, INSTR_RIL_RU }, 1204 { "clfi", 0x0f, INSTR_RIL_RU },
1026 { "msfi", 0x01, INSTR_RIL_RI },
1027 { "msgfi", 0x00, INSTR_RIL_RI },
1028#endif 1205#endif
1029 { "", 0, INSTR_INVALID } 1206 { "", 0, INSTR_INVALID }
1030}; 1207};
1031 1208
1032static struct insn opcode_c4[] = { 1209static struct insn opcode_c4[] = {
1033#ifdef CONFIG_64BIT 1210#ifdef CONFIG_64BIT
1034 { "lrl", 0x0d, INSTR_RIL_RP }, 1211 { "llhrl", 0x02, INSTR_RIL_RP },
1212 { "lghrl", 0x04, INSTR_RIL_RP },
1213 { "lhrl", 0x05, INSTR_RIL_RP },
1214 { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
1215 { "sthrl", 0x07, INSTR_RIL_RP },
1035 { "lgrl", 0x08, INSTR_RIL_RP }, 1216 { "lgrl", 0x08, INSTR_RIL_RP },
1217 { "stgrl", 0x0b, INSTR_RIL_RP },
1036 { "lgfrl", 0x0c, INSTR_RIL_RP }, 1218 { "lgfrl", 0x0c, INSTR_RIL_RP },
1037 { "lhrl", 0x05, INSTR_RIL_RP }, 1219 { "lrl", 0x0d, INSTR_RIL_RP },
1038 { "lghrl", 0x04, INSTR_RIL_RP },
1039 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP }, 1220 { { 0, LONG_INSN_LLGFRL }, 0x0e, INSTR_RIL_RP },
1040 { "llhrl", 0x02, INSTR_RIL_RP },
1041 { { 0, LONG_INSN_LLGHRL }, 0x06, INSTR_RIL_RP },
1042 { "strl", 0x0f, INSTR_RIL_RP }, 1221 { "strl", 0x0f, INSTR_RIL_RP },
1043 { "stgrl", 0x0b, INSTR_RIL_RP },
1044 { "sthrl", 0x07, INSTR_RIL_RP },
1045#endif 1222#endif
1046 { "", 0, INSTR_INVALID } 1223 { "", 0, INSTR_INVALID }
1047}; 1224};
1048 1225
1049static struct insn opcode_c6[] = { 1226static struct insn opcode_c6[] = {
1050#ifdef CONFIG_64BIT 1227#ifdef CONFIG_64BIT
1051 { "crl", 0x0d, INSTR_RIL_RP }, 1228 { "exrl", 0x00, INSTR_RIL_RP },
1052 { "cgrl", 0x08, INSTR_RIL_RP }, 1229 { "pfdrl", 0x02, INSTR_RIL_UP },
1053 { "cgfrl", 0x0c, INSTR_RIL_RP },
1054 { "chrl", 0x05, INSTR_RIL_RP },
1055 { "cghrl", 0x04, INSTR_RIL_RP }, 1230 { "cghrl", 0x04, INSTR_RIL_RP },
1056 { "clrl", 0x0f, INSTR_RIL_RP }, 1231 { "chrl", 0x05, INSTR_RIL_RP },
1232 { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
1233 { "clhrl", 0x07, INSTR_RIL_RP },
1234 { "cgrl", 0x08, INSTR_RIL_RP },
1057 { "clgrl", 0x0a, INSTR_RIL_RP }, 1235 { "clgrl", 0x0a, INSTR_RIL_RP },
1236 { "cgfrl", 0x0c, INSTR_RIL_RP },
1237 { "crl", 0x0d, INSTR_RIL_RP },
1058 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP }, 1238 { { 0, LONG_INSN_CLGFRL }, 0x0e, INSTR_RIL_RP },
1059 { "clhrl", 0x07, INSTR_RIL_RP }, 1239 { "clrl", 0x0f, INSTR_RIL_RP },
1060 { { 0, LONG_INSN_CLGHRL }, 0x06, INSTR_RIL_RP },
1061 { "pfdrl", 0x02, INSTR_RIL_UP },
1062 { "exrl", 0x00, INSTR_RIL_RP },
1063#endif 1240#endif
1064 { "", 0, INSTR_INVALID } 1241 { "", 0, INSTR_INVALID }
1065}; 1242};
@@ -1070,7 +1247,7 @@ static struct insn opcode_c8[] = {
1070 { "ectg", 0x01, INSTR_SSF_RRDRD }, 1247 { "ectg", 0x01, INSTR_SSF_RRDRD },
1071 { "csst", 0x02, INSTR_SSF_RRDRD }, 1248 { "csst", 0x02, INSTR_SSF_RRDRD },
1072 { "lpd", 0x04, INSTR_SSF_RRDRD2 }, 1249 { "lpd", 0x04, INSTR_SSF_RRDRD2 },
1073 { "lpdg ", 0x05, INSTR_SSF_RRDRD2 }, 1250 { "lpdg", 0x05, INSTR_SSF_RRDRD2 },
1074#endif 1251#endif
1075 { "", 0, INSTR_INVALID } 1252 { "", 0, INSTR_INVALID }
1076}; 1253};
@@ -1080,9 +1257,9 @@ static struct insn opcode_cc[] = {
1080 { "brcth", 0x06, INSTR_RIL_RP }, 1257 { "brcth", 0x06, INSTR_RIL_RP },
1081 { "aih", 0x08, INSTR_RIL_RI }, 1258 { "aih", 0x08, INSTR_RIL_RI },
1082 { "alsih", 0x0a, INSTR_RIL_RI }, 1259 { "alsih", 0x0a, INSTR_RIL_RI },
1083 { "alsih", 0x0b, INSTR_RIL_RI }, 1260 { { 0, LONG_INSN_ALSIHN }, 0x0b, INSTR_RIL_RI },
1084 { "cih", 0x0d, INSTR_RIL_RI }, 1261 { "cih", 0x0d, INSTR_RIL_RI },
1085 { "clih ", 0x0f, INSTR_RIL_RI }, 1262 { "clih", 0x0f, INSTR_RIL_RI },
1086#endif 1263#endif
1087 { "", 0, INSTR_INVALID } 1264 { "", 0, INSTR_INVALID }
1088}; 1265};
@@ -1116,11 +1293,15 @@ static struct insn opcode_e3[] = {
1116 { "cg", 0x20, INSTR_RXY_RRRD }, 1293 { "cg", 0x20, INSTR_RXY_RRRD },
1117 { "clg", 0x21, INSTR_RXY_RRRD }, 1294 { "clg", 0x21, INSTR_RXY_RRRD },
1118 { "stg", 0x24, INSTR_RXY_RRRD }, 1295 { "stg", 0x24, INSTR_RXY_RRRD },
1296 { "ntstg", 0x25, INSTR_RXY_RRRD },
1119 { "cvdy", 0x26, INSTR_RXY_RRRD }, 1297 { "cvdy", 0x26, INSTR_RXY_RRRD },
1120 { "cvdg", 0x2e, INSTR_RXY_RRRD }, 1298 { "cvdg", 0x2e, INSTR_RXY_RRRD },
1121 { "strvg", 0x2f, INSTR_RXY_RRRD }, 1299 { "strvg", 0x2f, INSTR_RXY_RRRD },
1122 { "cgf", 0x30, INSTR_RXY_RRRD }, 1300 { "cgf", 0x30, INSTR_RXY_RRRD },
1123 { "clgf", 0x31, INSTR_RXY_RRRD }, 1301 { "clgf", 0x31, INSTR_RXY_RRRD },
1302 { "ltgf", 0x32, INSTR_RXY_RRRD },
1303 { "cgh", 0x34, INSTR_RXY_RRRD },
1304 { "pfd", 0x36, INSTR_RXY_URRD },
1124 { "strvh", 0x3f, INSTR_RXY_RRRD }, 1305 { "strvh", 0x3f, INSTR_RXY_RRRD },
1125 { "bctg", 0x46, INSTR_RXY_RRRD }, 1306 { "bctg", 0x46, INSTR_RXY_RRRD },
1126 { "sty", 0x50, INSTR_RXY_RRRD }, 1307 { "sty", 0x50, INSTR_RXY_RRRD },
@@ -1133,21 +1314,25 @@ static struct insn opcode_e3[] = {
1133 { "cy", 0x59, INSTR_RXY_RRRD }, 1314 { "cy", 0x59, INSTR_RXY_RRRD },
1134 { "ay", 0x5a, INSTR_RXY_RRRD }, 1315 { "ay", 0x5a, INSTR_RXY_RRRD },
1135 { "sy", 0x5b, INSTR_RXY_RRRD }, 1316 { "sy", 0x5b, INSTR_RXY_RRRD },
1317 { "mfy", 0x5c, INSTR_RXY_RRRD },
1136 { "aly", 0x5e, INSTR_RXY_RRRD }, 1318 { "aly", 0x5e, INSTR_RXY_RRRD },
1137 { "sly", 0x5f, INSTR_RXY_RRRD }, 1319 { "sly", 0x5f, INSTR_RXY_RRRD },
1138 { "sthy", 0x70, INSTR_RXY_RRRD }, 1320 { "sthy", 0x70, INSTR_RXY_RRRD },
1139 { "lay", 0x71, INSTR_RXY_RRRD }, 1321 { "lay", 0x71, INSTR_RXY_RRRD },
1140 { "stcy", 0x72, INSTR_RXY_RRRD }, 1322 { "stcy", 0x72, INSTR_RXY_RRRD },
1141 { "icy", 0x73, INSTR_RXY_RRRD }, 1323 { "icy", 0x73, INSTR_RXY_RRRD },
1324 { "laey", 0x75, INSTR_RXY_RRRD },
1142 { "lb", 0x76, INSTR_RXY_RRRD }, 1325 { "lb", 0x76, INSTR_RXY_RRRD },
1143 { "lgb", 0x77, INSTR_RXY_RRRD }, 1326 { "lgb", 0x77, INSTR_RXY_RRRD },
1144 { "lhy", 0x78, INSTR_RXY_RRRD }, 1327 { "lhy", 0x78, INSTR_RXY_RRRD },
1145 { "chy", 0x79, INSTR_RXY_RRRD }, 1328 { "chy", 0x79, INSTR_RXY_RRRD },
1146 { "ahy", 0x7a, INSTR_RXY_RRRD }, 1329 { "ahy", 0x7a, INSTR_RXY_RRRD },
1147 { "shy", 0x7b, INSTR_RXY_RRRD }, 1330 { "shy", 0x7b, INSTR_RXY_RRRD },
1331 { "mhy", 0x7c, INSTR_RXY_RRRD },
1148 { "ng", 0x80, INSTR_RXY_RRRD }, 1332 { "ng", 0x80, INSTR_RXY_RRRD },
1149 { "og", 0x81, INSTR_RXY_RRRD }, 1333 { "og", 0x81, INSTR_RXY_RRRD },
1150 { "xg", 0x82, INSTR_RXY_RRRD }, 1334 { "xg", 0x82, INSTR_RXY_RRRD },
1335 { "lgat", 0x85, INSTR_RXY_RRRD },
1151 { "mlg", 0x86, INSTR_RXY_RRRD }, 1336 { "mlg", 0x86, INSTR_RXY_RRRD },
1152 { "dlg", 0x87, INSTR_RXY_RRRD }, 1337 { "dlg", 0x87, INSTR_RXY_RRRD },
1153 { "alcg", 0x88, INSTR_RXY_RRRD }, 1338 { "alcg", 0x88, INSTR_RXY_RRRD },
@@ -1158,23 +1343,22 @@ static struct insn opcode_e3[] = {
1158 { "llgh", 0x91, INSTR_RXY_RRRD }, 1343 { "llgh", 0x91, INSTR_RXY_RRRD },
1159 { "llc", 0x94, INSTR_RXY_RRRD }, 1344 { "llc", 0x94, INSTR_RXY_RRRD },
1160 { "llh", 0x95, INSTR_RXY_RRRD }, 1345 { "llh", 0x95, INSTR_RXY_RRRD },
1161 { "cgh", 0x34, INSTR_RXY_RRRD }, 1346 { { 0, LONG_INSN_LLGTAT }, 0x9c, INSTR_RXY_RRRD },
1162 { "laey", 0x75, INSTR_RXY_RRRD }, 1347 { { 0, LONG_INSN_LLGFAT }, 0x9d, INSTR_RXY_RRRD },
1163 { "ltgf", 0x32, INSTR_RXY_RRRD }, 1348 { "lat", 0x9f, INSTR_RXY_RRRD },
1164 { "mfy", 0x5c, INSTR_RXY_RRRD },
1165 { "mhy", 0x7c, INSTR_RXY_RRRD },
1166 { "pfd", 0x36, INSTR_RXY_URRD },
1167 { "lbh", 0xc0, INSTR_RXY_RRRD }, 1349 { "lbh", 0xc0, INSTR_RXY_RRRD },
1168 { "llch", 0xc2, INSTR_RXY_RRRD }, 1350 { "llch", 0xc2, INSTR_RXY_RRRD },
1169 { "stch", 0xc3, INSTR_RXY_RRRD }, 1351 { "stch", 0xc3, INSTR_RXY_RRRD },
1170 { "lhh", 0xc4, INSTR_RXY_RRRD }, 1352 { "lhh", 0xc4, INSTR_RXY_RRRD },
1171 { "llhh", 0xc6, INSTR_RXY_RRRD }, 1353 { "llhh", 0xc6, INSTR_RXY_RRRD },
1172 { "sthh", 0xc7, INSTR_RXY_RRRD }, 1354 { "sthh", 0xc7, INSTR_RXY_RRRD },
1355 { "lfhat", 0xc8, INSTR_RXY_RRRD },
1173 { "lfh", 0xca, INSTR_RXY_RRRD }, 1356 { "lfh", 0xca, INSTR_RXY_RRRD },
1174 { "stfh", 0xcb, INSTR_RXY_RRRD }, 1357 { "stfh", 0xcb, INSTR_RXY_RRRD },
1175 { "chf", 0xcd, INSTR_RXY_RRRD }, 1358 { "chf", 0xcd, INSTR_RXY_RRRD },
1176 { "clhf", 0xcf, INSTR_RXY_RRRD }, 1359 { "clhf", 0xcf, INSTR_RXY_RRRD },
1177 { "ntstg", 0x25, INSTR_RXY_RRRD }, 1360 { { 0, LONG_INSN_MPCIFC }, 0xd0, INSTR_RXY_RRRD },
1361 { { 0, LONG_INSN_STPCIFC }, 0xd4, INSTR_RXY_RRRD },
1178#endif 1362#endif
1179 { "lrv", 0x1e, INSTR_RXY_RRRD }, 1363 { "lrv", 0x1e, INSTR_RXY_RRRD },
1180 { "lrvh", 0x1f, INSTR_RXY_RRRD }, 1364 { "lrvh", 0x1f, INSTR_RXY_RRRD },
@@ -1189,15 +1373,15 @@ static struct insn opcode_e3[] = {
1189static struct insn opcode_e5[] = { 1373static struct insn opcode_e5[] = {
1190#ifdef CONFIG_64BIT 1374#ifdef CONFIG_64BIT
1191 { "strag", 0x02, INSTR_SSE_RDRD }, 1375 { "strag", 0x02, INSTR_SSE_RDRD },
1376 { "mvhhi", 0x44, INSTR_SIL_RDI },
1377 { "mvghi", 0x48, INSTR_SIL_RDI },
1378 { "mvhi", 0x4c, INSTR_SIL_RDI },
1192 { "chhsi", 0x54, INSTR_SIL_RDI }, 1379 { "chhsi", 0x54, INSTR_SIL_RDI },
1193 { "chsi", 0x5c, INSTR_SIL_RDI },
1194 { "cghsi", 0x58, INSTR_SIL_RDI },
1195 { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU }, 1380 { { 0, LONG_INSN_CLHHSI }, 0x55, INSTR_SIL_RDU },
1196 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU }, 1381 { "cghsi", 0x58, INSTR_SIL_RDI },
1197 { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU }, 1382 { { 0, LONG_INSN_CLGHSI }, 0x59, INSTR_SIL_RDU },
1198 { "mvhhi", 0x44, INSTR_SIL_RDI }, 1383 { "chsi", 0x5c, INSTR_SIL_RDI },
1199 { "mvhi", 0x4c, INSTR_SIL_RDI }, 1384 { { 0, LONG_INSN_CLFHSI }, 0x5d, INSTR_SIL_RDU },
1200 { "mvghi", 0x48, INSTR_SIL_RDI },
1201 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU }, 1385 { { 0, LONG_INSN_TBEGIN }, 0x60, INSTR_SIL_RDU },
1202 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU }, 1386 { { 0, LONG_INSN_TBEGINC }, 0x61, INSTR_SIL_RDU },
1203#endif 1387#endif
@@ -1220,9 +1404,11 @@ static struct insn opcode_eb[] = {
1220 { "rllg", 0x1c, INSTR_RSY_RRRD }, 1404 { "rllg", 0x1c, INSTR_RSY_RRRD },
1221 { "clmh", 0x20, INSTR_RSY_RURD }, 1405 { "clmh", 0x20, INSTR_RSY_RURD },
1222 { "clmy", 0x21, INSTR_RSY_RURD }, 1406 { "clmy", 0x21, INSTR_RSY_RURD },
1407 { "clt", 0x23, INSTR_RSY_RURD },
1223 { "stmg", 0x24, INSTR_RSY_RRRD }, 1408 { "stmg", 0x24, INSTR_RSY_RRRD },
1224 { "stctg", 0x25, INSTR_RSY_CCRD }, 1409 { "stctg", 0x25, INSTR_RSY_CCRD },
1225 { "stmh", 0x26, INSTR_RSY_RRRD }, 1410 { "stmh", 0x26, INSTR_RSY_RRRD },
1411 { "clgt", 0x2b, INSTR_RSY_RURD },
1226 { "stcmh", 0x2c, INSTR_RSY_RURD }, 1412 { "stcmh", 0x2c, INSTR_RSY_RURD },
1227 { "stcmy", 0x2d, INSTR_RSY_RURD }, 1413 { "stcmy", 0x2d, INSTR_RSY_RURD },
1228 { "lctlg", 0x2f, INSTR_RSY_CCRD }, 1414 { "lctlg", 0x2f, INSTR_RSY_CCRD },
@@ -1231,16 +1417,17 @@ static struct insn opcode_eb[] = {
1231 { "cdsg", 0x3e, INSTR_RSY_RRRD }, 1417 { "cdsg", 0x3e, INSTR_RSY_RRRD },
1232 { "bxhg", 0x44, INSTR_RSY_RRRD }, 1418 { "bxhg", 0x44, INSTR_RSY_RRRD },
1233 { "bxleg", 0x45, INSTR_RSY_RRRD }, 1419 { "bxleg", 0x45, INSTR_RSY_RRRD },
1420 { "ecag", 0x4c, INSTR_RSY_RRRD },
1234 { "tmy", 0x51, INSTR_SIY_URD }, 1421 { "tmy", 0x51, INSTR_SIY_URD },
1235 { "mviy", 0x52, INSTR_SIY_URD }, 1422 { "mviy", 0x52, INSTR_SIY_URD },
1236 { "niy", 0x54, INSTR_SIY_URD }, 1423 { "niy", 0x54, INSTR_SIY_URD },
1237 { "cliy", 0x55, INSTR_SIY_URD }, 1424 { "cliy", 0x55, INSTR_SIY_URD },
1238 { "oiy", 0x56, INSTR_SIY_URD }, 1425 { "oiy", 0x56, INSTR_SIY_URD },
1239 { "xiy", 0x57, INSTR_SIY_URD }, 1426 { "xiy", 0x57, INSTR_SIY_URD },
1240 { "lric", 0x60, INSTR_RSY_RDRM }, 1427 { "asi", 0x6a, INSTR_SIY_IRD },
1241 { "stric", 0x61, INSTR_RSY_RDRM }, 1428 { "alsi", 0x6e, INSTR_SIY_IRD },
1242 { "mric", 0x62, INSTR_RSY_RDRM }, 1429 { "agsi", 0x7a, INSTR_SIY_IRD },
1243 { "icmh", 0x80, INSTR_RSE_RURD }, 1430 { "algsi", 0x7e, INSTR_SIY_IRD },
1244 { "icmh", 0x80, INSTR_RSY_RURD }, 1431 { "icmh", 0x80, INSTR_RSY_RURD },
1245 { "icmy", 0x81, INSTR_RSY_RURD }, 1432 { "icmy", 0x81, INSTR_RSY_RURD },
1246 { "clclu", 0x8f, INSTR_RSY_RRRD }, 1433 { "clclu", 0x8f, INSTR_RSY_RRRD },
@@ -1249,11 +1436,8 @@ static struct insn opcode_eb[] = {
1249 { "lmy", 0x98, INSTR_RSY_RRRD }, 1436 { "lmy", 0x98, INSTR_RSY_RRRD },
1250 { "lamy", 0x9a, INSTR_RSY_AARD }, 1437 { "lamy", 0x9a, INSTR_RSY_AARD },
1251 { "stamy", 0x9b, INSTR_RSY_AARD }, 1438 { "stamy", 0x9b, INSTR_RSY_AARD },
1252 { "asi", 0x6a, INSTR_SIY_IRD }, 1439 { { 0, LONG_INSN_PCISTB }, 0xd0, INSTR_RSY_RRRD },
1253 { "agsi", 0x7a, INSTR_SIY_IRD }, 1440 { "sic", 0xd1, INSTR_RSY_RRRD },
1254 { "alsi", 0x6e, INSTR_SIY_IRD },
1255 { "algsi", 0x7e, INSTR_SIY_IRD },
1256 { "ecag", 0x4c, INSTR_RSY_RRRD },
1257 { "srak", 0xdc, INSTR_RSY_RRRD }, 1441 { "srak", 0xdc, INSTR_RSY_RRRD },
1258 { "slak", 0xdd, INSTR_RSY_RRRD }, 1442 { "slak", 0xdd, INSTR_RSY_RRRD },
1259 { "srlk", 0xde, INSTR_RSY_RRRD }, 1443 { "srlk", 0xde, INSTR_RSY_RRRD },
@@ -1272,6 +1456,9 @@ static struct insn opcode_eb[] = {
1272 { "lax", 0xf7, INSTR_RSY_RRRD }, 1456 { "lax", 0xf7, INSTR_RSY_RRRD },
1273 { "laa", 0xf8, INSTR_RSY_RRRD }, 1457 { "laa", 0xf8, INSTR_RSY_RRRD },
1274 { "laal", 0xfa, INSTR_RSY_RRRD }, 1458 { "laal", 0xfa, INSTR_RSY_RRRD },
1459 { "lric", 0x60, INSTR_RSY_RDRM },
1460 { "stric", 0x61, INSTR_RSY_RDRM },
1461 { "mric", 0x62, INSTR_RSY_RDRM },
1275#endif 1462#endif
1276 { "rll", 0x1d, INSTR_RSY_RRRD }, 1463 { "rll", 0x1d, INSTR_RSY_RRRD },
1277 { "mvclu", 0x8e, INSTR_RSY_RRRD }, 1464 { "mvclu", 0x8e, INSTR_RSY_RRRD },
@@ -1283,36 +1470,37 @@ static struct insn opcode_ec[] = {
1283#ifdef CONFIG_64BIT 1470#ifdef CONFIG_64BIT
1284 { "brxhg", 0x44, INSTR_RIE_RRP }, 1471 { "brxhg", 0x44, INSTR_RIE_RRP },
1285 { "brxlg", 0x45, INSTR_RIE_RRP }, 1472 { "brxlg", 0x45, INSTR_RIE_RRP },
1286 { "crb", 0xf6, INSTR_RRS_RRRDU }, 1473 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
1287 { "cgrb", 0xe4, INSTR_RRS_RRRDU }, 1474 { "rnsbg", 0x54, INSTR_RIE_RRUUU },
1288 { "crj", 0x76, INSTR_RIE_RRPU }, 1475 { "risbg", 0x55, INSTR_RIE_RRUUU },
1476 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1477 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1478 { { 0, LONG_INSN_RISBGN }, 0x59, INSTR_RIE_RRUUU },
1479 { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
1289 { "cgrj", 0x64, INSTR_RIE_RRPU }, 1480 { "cgrj", 0x64, INSTR_RIE_RRPU },
1290 { "cib", 0xfe, INSTR_RIS_RURDI }, 1481 { "clgrj", 0x65, INSTR_RIE_RRPU },
1291 { "cgib", 0xfc, INSTR_RIS_RURDI },
1292 { "cij", 0x7e, INSTR_RIE_RUPI },
1293 { "cgij", 0x7c, INSTR_RIE_RUPI },
1294 { "cit", 0x72, INSTR_RIE_R0IU },
1295 { "cgit", 0x70, INSTR_RIE_R0IU }, 1482 { "cgit", 0x70, INSTR_RIE_R0IU },
1296 { "clrb", 0xf7, INSTR_RRS_RRRDU }, 1483 { "clgit", 0x71, INSTR_RIE_R0UU },
1297 { "clgrb", 0xe5, INSTR_RRS_RRRDU }, 1484 { "cit", 0x72, INSTR_RIE_R0IU },
1485 { "clfit", 0x73, INSTR_RIE_R0UU },
1486 { "crj", 0x76, INSTR_RIE_RRPU },
1298 { "clrj", 0x77, INSTR_RIE_RRPU }, 1487 { "clrj", 0x77, INSTR_RIE_RRPU },
1299 { "clgrj", 0x65, INSTR_RIE_RRPU }, 1488 { "cgij", 0x7c, INSTR_RIE_RUPI },
1300 { "clib", 0xff, INSTR_RIS_RURDU },
1301 { "clgib", 0xfd, INSTR_RIS_RURDU },
1302 { "clij", 0x7f, INSTR_RIE_RUPU },
1303 { "clgij", 0x7d, INSTR_RIE_RUPU }, 1489 { "clgij", 0x7d, INSTR_RIE_RUPU },
1304 { "clfit", 0x73, INSTR_RIE_R0UU }, 1490 { "cij", 0x7e, INSTR_RIE_RUPI },
1305 { "clgit", 0x71, INSTR_RIE_R0UU }, 1491 { "clij", 0x7f, INSTR_RIE_RUPU },
1306 { "rnsbg", 0x54, INSTR_RIE_RRUUU },
1307 { "rxsbg", 0x57, INSTR_RIE_RRUUU },
1308 { "rosbg", 0x56, INSTR_RIE_RRUUU },
1309 { "risbg", 0x55, INSTR_RIE_RRUUU },
1310 { { 0, LONG_INSN_RISBLG }, 0x51, INSTR_RIE_RRUUU },
1311 { { 0, LONG_INSN_RISBHG }, 0x5D, INSTR_RIE_RRUUU },
1312 { "ahik", 0xd8, INSTR_RIE_RRI0 }, 1492 { "ahik", 0xd8, INSTR_RIE_RRI0 },
1313 { "aghik", 0xd9, INSTR_RIE_RRI0 }, 1493 { "aghik", 0xd9, INSTR_RIE_RRI0 },
1314 { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 }, 1494 { { 0, LONG_INSN_ALHSIK }, 0xda, INSTR_RIE_RRI0 },
1315 { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 }, 1495 { { 0, LONG_INSN_ALGHSIK }, 0xdb, INSTR_RIE_RRI0 },
1496 { "cgrb", 0xe4, INSTR_RRS_RRRDU },
1497 { "clgrb", 0xe5, INSTR_RRS_RRRDU },
1498 { "crb", 0xf6, INSTR_RRS_RRRDU },
1499 { "clrb", 0xf7, INSTR_RRS_RRRDU },
1500 { "cgib", 0xfc, INSTR_RIS_RURDI },
1501 { "clgib", 0xfd, INSTR_RIS_RURDU },
1502 { "cib", 0xfe, INSTR_RIS_RURDI },
1503 { "clib", 0xff, INSTR_RIS_RURDU },
1316#endif 1504#endif
1317 { "", 0, INSTR_INVALID } 1505 { "", 0, INSTR_INVALID }
1318}; 1506};
@@ -1325,20 +1513,24 @@ static struct insn opcode_ed[] = {
1325 { "my", 0x3b, INSTR_RXF_FRRDF }, 1513 { "my", 0x3b, INSTR_RXF_FRRDF },
1326 { "mayh", 0x3c, INSTR_RXF_FRRDF }, 1514 { "mayh", 0x3c, INSTR_RXF_FRRDF },
1327 { "myh", 0x3d, INSTR_RXF_FRRDF }, 1515 { "myh", 0x3d, INSTR_RXF_FRRDF },
1328 { "ley", 0x64, INSTR_RXY_FRRD },
1329 { "ldy", 0x65, INSTR_RXY_FRRD },
1330 { "stey", 0x66, INSTR_RXY_FRRD },
1331 { "stdy", 0x67, INSTR_RXY_FRRD },
1332 { "sldt", 0x40, INSTR_RXF_FRRDF }, 1516 { "sldt", 0x40, INSTR_RXF_FRRDF },
1333 { "slxt", 0x48, INSTR_RXF_FRRDF },
1334 { "srdt", 0x41, INSTR_RXF_FRRDF }, 1517 { "srdt", 0x41, INSTR_RXF_FRRDF },
1518 { "slxt", 0x48, INSTR_RXF_FRRDF },
1335 { "srxt", 0x49, INSTR_RXF_FRRDF }, 1519 { "srxt", 0x49, INSTR_RXF_FRRDF },
1336 { "tdcet", 0x50, INSTR_RXE_FRRD }, 1520 { "tdcet", 0x50, INSTR_RXE_FRRD },
1337 { "tdcdt", 0x54, INSTR_RXE_FRRD },
1338 { "tdcxt", 0x58, INSTR_RXE_FRRD },
1339 { "tdget", 0x51, INSTR_RXE_FRRD }, 1521 { "tdget", 0x51, INSTR_RXE_FRRD },
1522 { "tdcdt", 0x54, INSTR_RXE_FRRD },
1340 { "tdgdt", 0x55, INSTR_RXE_FRRD }, 1523 { "tdgdt", 0x55, INSTR_RXE_FRRD },
1524 { "tdcxt", 0x58, INSTR_RXE_FRRD },
1341 { "tdgxt", 0x59, INSTR_RXE_FRRD }, 1525 { "tdgxt", 0x59, INSTR_RXE_FRRD },
1526 { "ley", 0x64, INSTR_RXY_FRRD },
1527 { "ldy", 0x65, INSTR_RXY_FRRD },
1528 { "stey", 0x66, INSTR_RXY_FRRD },
1529 { "stdy", 0x67, INSTR_RXY_FRRD },
1530 { "czdt", 0xa8, INSTR_RSL_LRDFU },
1531 { "czxt", 0xa9, INSTR_RSL_LRDFU },
1532 { "cdzt", 0xaa, INSTR_RSL_LRDFU },
1533 { "cxzt", 0xab, INSTR_RSL_LRDFU },
1342#endif 1534#endif
1343 { "ldeb", 0x04, INSTR_RXE_FRRD }, 1535 { "ldeb", 0x04, INSTR_RXE_FRRD },
1344 { "lxdb", 0x05, INSTR_RXE_FRRD }, 1536 { "lxdb", 0x05, INSTR_RXE_FRRD },
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index aa8f2ba6289b..550228523267 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -231,12 +231,12 @@ sysc_work:
231 jo sysc_mcck_pending 231 jo sysc_mcck_pending
232 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 232 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
233 jo sysc_reschedule 233 jo sysc_reschedule
234 tm __TI_flags+3(%r12),_TIF_PER_TRAP
235 jo sysc_singlestep
234 tm __TI_flags+3(%r12),_TIF_SIGPENDING 236 tm __TI_flags+3(%r12),_TIF_SIGPENDING
235 jo sysc_sigpending 237 jo sysc_sigpending
236 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 238 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
237 jo sysc_notify_resume 239 jo sysc_notify_resume
238 tm __TI_flags+3(%r12),_TIF_PER_TRAP
239 jo sysc_singlestep
240 j sysc_return # beware of critical section cleanup 240 j sysc_return # beware of critical section cleanup
241 241
242# 242#
@@ -259,7 +259,6 @@ sysc_mcck_pending:
259# _TIF_SIGPENDING is set, call do_signal 259# _TIF_SIGPENDING is set, call do_signal
260# 260#
261sysc_sigpending: 261sysc_sigpending:
262 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
263 lr %r2,%r11 # pass pointer to pt_regs 262 lr %r2,%r11 # pass pointer to pt_regs
264 l %r1,BASED(.Ldo_signal) 263 l %r1,BASED(.Ldo_signal)
265 basr %r14,%r1 # call do_signal 264 basr %r14,%r1 # call do_signal
@@ -286,7 +285,7 @@ sysc_notify_resume:
286# _TIF_PER_TRAP is set, call do_per_trap 285# _TIF_PER_TRAP is set, call do_per_trap
287# 286#
288sysc_singlestep: 287sysc_singlestep:
289 ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) 288 ni __TI_flags+3(%r12),255-_TIF_PER_TRAP
290 lr %r2,%r11 # pass pointer to pt_regs 289 lr %r2,%r11 # pass pointer to pt_regs
291 l %r1,BASED(.Ldo_per_trap) 290 l %r1,BASED(.Ldo_per_trap)
292 la %r14,BASED(sysc_return) 291 la %r14,BASED(sysc_return)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index d8251b98f17a..2711936fe706 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -6,7 +6,6 @@
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7#include <asm/cputime.h> 7#include <asm/cputime.h>
8 8
9extern void (*pgm_check_table[128])(struct pt_regs *);
10extern void *restart_stack; 9extern void *restart_stack;
11 10
12void system_call(void); 11void system_call(void);
@@ -25,6 +24,26 @@ void do_protection_exception(struct pt_regs *regs);
25void do_dat_exception(struct pt_regs *regs); 24void do_dat_exception(struct pt_regs *regs);
26void do_asce_exception(struct pt_regs *regs); 25void do_asce_exception(struct pt_regs *regs);
27 26
27void addressing_exception(struct pt_regs *regs);
28void data_exception(struct pt_regs *regs);
29void default_trap_handler(struct pt_regs *regs);
30void divide_exception(struct pt_regs *regs);
31void execute_exception(struct pt_regs *regs);
32void hfp_divide_exception(struct pt_regs *regs);
33void hfp_overflow_exception(struct pt_regs *regs);
34void hfp_significance_exception(struct pt_regs *regs);
35void hfp_sqrt_exception(struct pt_regs *regs);
36void hfp_underflow_exception(struct pt_regs *regs);
37void illegal_op(struct pt_regs *regs);
38void operand_exception(struct pt_regs *regs);
39void overflow_exception(struct pt_regs *regs);
40void privileged_op(struct pt_regs *regs);
41void space_switch_exception(struct pt_regs *regs);
42void special_op_exception(struct pt_regs *regs);
43void specification_exception(struct pt_regs *regs);
44void transaction_exception(struct pt_regs *regs);
45void translation_exception(struct pt_regs *regs);
46
28void do_per_trap(struct pt_regs *regs); 47void do_per_trap(struct pt_regs *regs);
29void syscall_trace(struct pt_regs *regs, int entryexit); 48void syscall_trace(struct pt_regs *regs, int entryexit);
30void kernel_stack_overflow(struct pt_regs * regs); 49void kernel_stack_overflow(struct pt_regs * regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 499e95e90f38..6d34e0c97a39 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -80,14 +80,21 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING)
80#endif 80#endif
81 .endm 81 .endm
82 82
83 .macro HANDLE_SIE_INTERCEPT scratch 83 .macro HANDLE_SIE_INTERCEPT scratch,pgmcheck
84#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 84#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
85 tmhh %r8,0x0001 # interrupting from user ? 85 tmhh %r8,0x0001 # interrupting from user ?
86 jnz .+42 86 jnz .+42
87 lgr \scratch,%r9 87 lgr \scratch,%r9
88 slg \scratch,BASED(.Lsie_loop) 88 slg \scratch,BASED(.Lsie_loop)
89 clg \scratch,BASED(.Lsie_length) 89 clg \scratch,BASED(.Lsie_length)
90 .if \pgmcheck
91 # Some program interrupts are suppressing (e.g. protection).
92 # We must also check the instruction after SIE in that case.
93 # do_protection_exception will rewind to rewind_pad
94 jh .+22
95 .else
90 jhe .+22 96 jhe .+22
97 .endif
91 lg %r9,BASED(.Lsie_loop) 98 lg %r9,BASED(.Lsie_loop)
92 SPP BASED(.Lhost_id) # set host id 99 SPP BASED(.Lhost_id) # set host id
93#endif 100#endif
@@ -262,12 +269,12 @@ sysc_work:
262 jo sysc_mcck_pending 269 jo sysc_mcck_pending
263 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 270 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
264 jo sysc_reschedule 271 jo sysc_reschedule
272 tm __TI_flags+7(%r12),_TIF_PER_TRAP
273 jo sysc_singlestep
265 tm __TI_flags+7(%r12),_TIF_SIGPENDING 274 tm __TI_flags+7(%r12),_TIF_SIGPENDING
266 jo sysc_sigpending 275 jo sysc_sigpending
267 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 276 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
268 jo sysc_notify_resume 277 jo sysc_notify_resume
269 tm __TI_flags+7(%r12),_TIF_PER_TRAP
270 jo sysc_singlestep
271 j sysc_return # beware of critical section cleanup 278 j sysc_return # beware of critical section cleanup
272 279
273# 280#
@@ -288,7 +295,6 @@ sysc_mcck_pending:
288# _TIF_SIGPENDING is set, call do_signal 295# _TIF_SIGPENDING is set, call do_signal
289# 296#
290sysc_sigpending: 297sysc_sigpending:
291 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP
292 lgr %r2,%r11 # pass pointer to pt_regs 298 lgr %r2,%r11 # pass pointer to pt_regs
293 brasl %r14,do_signal 299 brasl %r14,do_signal
294 tm __TI_flags+7(%r12),_TIF_SYSCALL 300 tm __TI_flags+7(%r12),_TIF_SYSCALL
@@ -313,7 +319,7 @@ sysc_notify_resume:
313# _TIF_PER_TRAP is set, call do_per_trap 319# _TIF_PER_TRAP is set, call do_per_trap
314# 320#
315sysc_singlestep: 321sysc_singlestep:
316 ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) 322 ni __TI_flags+7(%r12),255-_TIF_PER_TRAP
317 lgr %r2,%r11 # pass pointer to pt_regs 323 lgr %r2,%r11 # pass pointer to pt_regs
318 larl %r14,sysc_return 324 larl %r14,sysc_return
319 jg do_per_trap 325 jg do_per_trap
@@ -375,7 +381,7 @@ ENTRY(pgm_check_handler)
375 lg %r12,__LC_THREAD_INFO 381 lg %r12,__LC_THREAD_INFO
376 larl %r13,system_call 382 larl %r13,system_call
377 lmg %r8,%r9,__LC_PGM_OLD_PSW 383 lmg %r8,%r9,__LC_PGM_OLD_PSW
378 HANDLE_SIE_INTERCEPT %r14 384 HANDLE_SIE_INTERCEPT %r14,1
379 tmhh %r8,0x0001 # test problem state bit 385 tmhh %r8,0x0001 # test problem state bit
380 jnz 1f # -> fault in user space 386 jnz 1f # -> fault in user space
381 tmhh %r8,0x4000 # PER bit set in old PSW ? 387 tmhh %r8,0x4000 # PER bit set in old PSW ?
@@ -413,9 +419,9 @@ ENTRY(pgm_check_handler)
413 larl %r1,pgm_check_table 419 larl %r1,pgm_check_table
414 llgh %r10,__PT_INT_CODE+2(%r11) 420 llgh %r10,__PT_INT_CODE+2(%r11)
415 nill %r10,0x007f 421 nill %r10,0x007f
416 sll %r10,3 422 sll %r10,2
417 je sysc_return 423 je sysc_return
418 lg %r1,0(%r10,%r1) # load address of handler routine 424 lgf %r1,0(%r10,%r1) # load address of handler routine
419 lgr %r2,%r11 # pass pointer to pt_regs 425 lgr %r2,%r11 # pass pointer to pt_regs
420 basr %r14,%r1 # branch to interrupt-handler 426 basr %r14,%r1 # branch to interrupt-handler
421 j sysc_return 427 j sysc_return
@@ -451,7 +457,7 @@ ENTRY(io_int_handler)
451 lg %r12,__LC_THREAD_INFO 457 lg %r12,__LC_THREAD_INFO
452 larl %r13,system_call 458 larl %r13,system_call
453 lmg %r8,%r9,__LC_IO_OLD_PSW 459 lmg %r8,%r9,__LC_IO_OLD_PSW
454 HANDLE_SIE_INTERCEPT %r14 460 HANDLE_SIE_INTERCEPT %r14,0
455 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 461 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
456 tmhh %r8,0x0001 # interrupting from user? 462 tmhh %r8,0x0001 # interrupting from user?
457 jz io_skip 463 jz io_skip
@@ -597,7 +603,7 @@ ENTRY(ext_int_handler)
597 lg %r12,__LC_THREAD_INFO 603 lg %r12,__LC_THREAD_INFO
598 larl %r13,system_call 604 larl %r13,system_call
599 lmg %r8,%r9,__LC_EXT_OLD_PSW 605 lmg %r8,%r9,__LC_EXT_OLD_PSW
600 HANDLE_SIE_INTERCEPT %r14 606 HANDLE_SIE_INTERCEPT %r14,0
601 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 607 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
602 tmhh %r8,0x0001 # interrupting from user ? 608 tmhh %r8,0x0001 # interrupting from user ?
603 jz ext_skip 609 jz ext_skip
@@ -645,7 +651,7 @@ ENTRY(mcck_int_handler)
645 lg %r12,__LC_THREAD_INFO 651 lg %r12,__LC_THREAD_INFO
646 larl %r13,system_call 652 larl %r13,system_call
647 lmg %r8,%r9,__LC_MCK_OLD_PSW 653 lmg %r8,%r9,__LC_MCK_OLD_PSW
648 HANDLE_SIE_INTERCEPT %r14 654 HANDLE_SIE_INTERCEPT %r14,0
649 tm __LC_MCCK_CODE,0x80 # system damage? 655 tm __LC_MCCK_CODE,0x80 # system damage?
650 jo mcck_panic # yes -> rest of mcck code invalid 656 jo mcck_panic # yes -> rest of mcck code invalid
651 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 657 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
@@ -944,6 +950,13 @@ ENTRY(sie64a)
944 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area 950 stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
945 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 951 xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
946 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 952 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
953# some program checks are suppressing. C code (e.g. do_protection_exception)
954# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
955# instructions in the sie_loop should not cause program interrupts. So
956# lets use a nop (47 00 00 00) as a landing pad.
957# See also HANDLE_SIE_INTERCEPT
958rewind_pad:
959 nop 0
947sie_loop: 960sie_loop:
948 lg %r14,__LC_THREAD_INFO # pointer thread_info struct 961 lg %r14,__LC_THREAD_INFO # pointer thread_info struct
949 tm __TI_flags+7(%r14),_TIF_EXIT_SIE 962 tm __TI_flags+7(%r14),_TIF_EXIT_SIE
@@ -983,6 +996,7 @@ sie_fault:
983.Lhost_id: 996.Lhost_id:
984 .quad 0 997 .quad 0
985 998
999 EX_TABLE(rewind_pad,sie_fault)
986 EX_TABLE(sie_loop,sie_fault) 1000 EX_TABLE(sie_loop,sie_fault)
987#endif 1001#endif
988 1002
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 984726cbce16..fd8db63dfc94 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -393,30 +393,35 @@ ENTRY(startup_kdump)
393 xc 0x300(256),0x300 393 xc 0x300(256),0x300
394 xc 0xe00(256),0xe00 394 xc 0xe00(256),0xe00
395 stck __LC_LAST_UPDATE_CLOCK 395 stck __LC_LAST_UPDATE_CLOCK
396 spt 5f-.LPG0(%r13) 396 spt 6f-.LPG0(%r13)
397 mvc __LC_LAST_UPDATE_TIMER(8),5f-.LPG0(%r13) 397 mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST 398 xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST
399#ifndef CONFIG_MARCH_G5 399#ifndef CONFIG_MARCH_G5
400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} 400 # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10}
401 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list 401 .insn s,0xb2b10000,__LC_STFL_FAC_LIST # store facility list
402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ? 402 tm __LC_STFL_FAC_LIST,0x01 # stfle available ?
403 jz 0f 403 jz 0f
404 la %r0,0 404 la %r0,1
405 .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended 405 .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended
4060: l %r0,__LC_STFL_FAC_LIST 406 # verify if all required facilities are supported by the machine
407 n %r0,2f+8-.LPG0(%r13) 4070: la %r1,__LC_STFL_FAC_LIST
408 cl %r0,2f+8-.LPG0(%r13) 408 la %r2,3f+8-.LPG0(%r13)
409 jne 1f 409 l %r3,0(%r2)
410 l %r0,__LC_STFL_FAC_LIST+4 4101: l %r0,0(%r1)
411 n %r0,2f+12-.LPG0(%r13) 411 n %r0,4(%r2)
412 cl %r0,2f+12-.LPG0(%r13) 412 cl %r0,4(%r2)
413 je 3f 413 jne 2f
4141: l %r15,.Lstack-.LPG0(%r13) 414 la %r1,4(%r1)
415 la %r2,4(%r2)
416 ahi %r3,-1
417 jnz 1b
418 j 4f
4192: l %r15,.Lstack-.LPG0(%r13)
415 ahi %r15,-96 420 ahi %r15,-96
416 la %r2,.Lals_string-.LPG0(%r13) 421 la %r2,.Lals_string-.LPG0(%r13)
417 l %r3,.Lsclp_print-.LPG0(%r13) 422 l %r3,.Lsclp_print-.LPG0(%r13)
418 basr %r14,%r3 423 basr %r14,%r3
419 lpsw 2f-.LPG0(%r13) # machine type not good enough, crash 424 lpsw 3f-.LPG0(%r13) # machine type not good enough, crash
420.Lals_string: 425.Lals_string:
421 .asciz "The Linux kernel requires more recent processor hardware" 426 .asciz "The Linux kernel requires more recent processor hardware"
422.Lsclp_print: 427.Lsclp_print:
@@ -424,33 +429,42 @@ ENTRY(startup_kdump)
424.Lstack: 429.Lstack:
425 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER)) 430 .long 0x8000 + (1<<(PAGE_SHIFT+THREAD_ORDER))
426 .align 16 431 .align 16
4272: .long 0x000a0000,0x8badcccc 4323: .long 0x000a0000,0x8badcccc
433
434# List of facilities that are required. If not all facilities are present
435# the kernel will crash. Format is number of facility words with bits set,
436# followed by the facility words.
437
428#if defined(CONFIG_64BIT) 438#if defined(CONFIG_64BIT)
429#if defined(CONFIG_MARCH_Z196) 439#if defined(CONFIG_MARCH_ZEC12)
430 .long 0xc100efe3, 0xf46c0000 440 .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
441#elif defined(CONFIG_MARCH_Z196)
442 .long 2, 0xc100efe3, 0xf46c0000
431#elif defined(CONFIG_MARCH_Z10) 443#elif defined(CONFIG_MARCH_Z10)
432 .long 0xc100efe3, 0xf0680000 444 .long 2, 0xc100efe3, 0xf0680000
433#elif defined(CONFIG_MARCH_Z9_109) 445#elif defined(CONFIG_MARCH_Z9_109)
434 .long 0xc100efc3, 0x00000000 446 .long 1, 0xc100efc3
435#elif defined(CONFIG_MARCH_Z990) 447#elif defined(CONFIG_MARCH_Z990)
436 .long 0xc0002000, 0x00000000 448 .long 1, 0xc0002000
437#elif defined(CONFIG_MARCH_Z900) 449#elif defined(CONFIG_MARCH_Z900)
438 .long 0xc0000000, 0x00000000 450 .long 1, 0xc0000000
439#endif 451#endif
440#else 452#else
441#if defined(CONFIG_MARCH_Z196) 453#if defined(CONFIG_MARCH_ZEC12)
442 .long 0x8100c880, 0x00000000 454 .long 1, 0x8100c880
455#elif defined(CONFIG_MARCH_Z196)
456 .long 1, 0x8100c880
443#elif defined(CONFIG_MARCH_Z10) 457#elif defined(CONFIG_MARCH_Z10)
444 .long 0x8100c880, 0x00000000 458 .long 1, 0x8100c880
445#elif defined(CONFIG_MARCH_Z9_109) 459#elif defined(CONFIG_MARCH_Z9_109)
446 .long 0x8100c880, 0x00000000 460 .long 1, 0x8100c880
447#elif defined(CONFIG_MARCH_Z990) 461#elif defined(CONFIG_MARCH_Z990)
448 .long 0x80002000, 0x00000000 462 .long 1, 0x80002000
449#elif defined(CONFIG_MARCH_Z900) 463#elif defined(CONFIG_MARCH_Z900)
450 .long 0x80000000, 0x00000000 464 .long 1, 0x80000000
451#endif 465#endif
452#endif 466#endif
4533: 4674:
454#endif 468#endif
455 469
456#ifdef CONFIG_64BIT 470#ifdef CONFIG_64BIT
@@ -459,14 +473,14 @@ ENTRY(startup_kdump)
459 jg startup_continue 473 jg startup_continue
460#else 474#else
461 /* Continue with 31bit startup code in head31.S */ 475 /* Continue with 31bit startup code in head31.S */
462 l %r13,4f-.LPG0(%r13) 476 l %r13,5f-.LPG0(%r13)
463 b 0(%r13) 477 b 0(%r13)
464 .align 8 478 .align 8
4654: .long startup_continue 4795: .long startup_continue
466#endif 480#endif
467 481
468 .align 8 482 .align 8
4695: .long 0x7fffffff,0xffffffff 4836: .long 0x7fffffff,0xffffffff
470 484
471#include "head_kdump.S" 485#include "head_kdump.S"
472 486
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 6cdc55b26d68..bf24293970ce 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -58,6 +58,8 @@ static const struct irq_class intrclass_names[] = {
58 [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, 58 [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"},
59 [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, 59 [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"},
60 [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, 60 [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"},
61 [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" },
62 [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" },
61 [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, 63 [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"},
62}; 64};
63 65
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
new file mode 100644
index 000000000000..14bdecb61923
--- /dev/null
+++ b/arch/s390/kernel/pgm_check.S
@@ -0,0 +1,152 @@
1/*
2 * Program check table.
3 *
4 * Copyright IBM Corp. 2012
5 */
6
7#include <linux/linkage.h>
8
9#ifdef CONFIG_32BIT
10#define PGM_CHECK_64BIT(handler) .long default_trap_handler
11#else
12#define PGM_CHECK_64BIT(handler) .long handler
13#endif
14
15#define PGM_CHECK(handler) .long handler
16#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
17
18/*
19 * The program check table contains exactly 128 (0x00-0x7f) entries. Each
20 * line defines the 31 and/or 64 bit function to be called corresponding
21 * to the program check interruption code.
22 */
23.section .rodata, "a"
24ENTRY(pgm_check_table)
25PGM_CHECK_DEFAULT /* 00 */
26PGM_CHECK(illegal_op) /* 01 */
27PGM_CHECK(privileged_op) /* 02 */
28PGM_CHECK(execute_exception) /* 03 */
29PGM_CHECK(do_protection_exception) /* 04 */
30PGM_CHECK(addressing_exception) /* 05 */
31PGM_CHECK(specification_exception) /* 06 */
32PGM_CHECK(data_exception) /* 07 */
33PGM_CHECK(overflow_exception) /* 08 */
34PGM_CHECK(divide_exception) /* 09 */
35PGM_CHECK(overflow_exception) /* 0a */
36PGM_CHECK(divide_exception) /* 0b */
37PGM_CHECK(hfp_overflow_exception) /* 0c */
38PGM_CHECK(hfp_underflow_exception) /* 0d */
39PGM_CHECK(hfp_significance_exception) /* 0e */
40PGM_CHECK(hfp_divide_exception) /* 0f */
41PGM_CHECK(do_dat_exception) /* 10 */
42PGM_CHECK(do_dat_exception) /* 11 */
43PGM_CHECK(translation_exception) /* 12 */
44PGM_CHECK(special_op_exception) /* 13 */
45PGM_CHECK_DEFAULT /* 14 */
46PGM_CHECK(operand_exception) /* 15 */
47PGM_CHECK_DEFAULT /* 16 */
48PGM_CHECK_DEFAULT /* 17 */
49PGM_CHECK_64BIT(transaction_exception) /* 18 */
50PGM_CHECK_DEFAULT /* 19 */
51PGM_CHECK_DEFAULT /* 1a */
52PGM_CHECK_DEFAULT /* 1b */
53PGM_CHECK(space_switch_exception) /* 1c */
54PGM_CHECK(hfp_sqrt_exception) /* 1d */
55PGM_CHECK_DEFAULT /* 1e */
56PGM_CHECK_DEFAULT /* 1f */
57PGM_CHECK_DEFAULT /* 20 */
58PGM_CHECK_DEFAULT /* 21 */
59PGM_CHECK_DEFAULT /* 22 */
60PGM_CHECK_DEFAULT /* 23 */
61PGM_CHECK_DEFAULT /* 24 */
62PGM_CHECK_DEFAULT /* 25 */
63PGM_CHECK_DEFAULT /* 26 */
64PGM_CHECK_DEFAULT /* 27 */
65PGM_CHECK_DEFAULT /* 28 */
66PGM_CHECK_DEFAULT /* 29 */
67PGM_CHECK_DEFAULT /* 2a */
68PGM_CHECK_DEFAULT /* 2b */
69PGM_CHECK_DEFAULT /* 2c */
70PGM_CHECK_DEFAULT /* 2d */
71PGM_CHECK_DEFAULT /* 2e */
72PGM_CHECK_DEFAULT /* 2f */
73PGM_CHECK_DEFAULT /* 30 */
74PGM_CHECK_DEFAULT /* 31 */
75PGM_CHECK_DEFAULT /* 32 */
76PGM_CHECK_DEFAULT /* 33 */
77PGM_CHECK_DEFAULT /* 34 */
78PGM_CHECK_DEFAULT /* 35 */
79PGM_CHECK_DEFAULT /* 36 */
80PGM_CHECK_DEFAULT /* 37 */
81PGM_CHECK_64BIT(do_asce_exception) /* 38 */
82PGM_CHECK_64BIT(do_dat_exception) /* 39 */
83PGM_CHECK_64BIT(do_dat_exception) /* 3a */
84PGM_CHECK_64BIT(do_dat_exception) /* 3b */
85PGM_CHECK_DEFAULT /* 3c */
86PGM_CHECK_DEFAULT /* 3d */
87PGM_CHECK_DEFAULT /* 3e */
88PGM_CHECK_DEFAULT /* 3f */
89PGM_CHECK_DEFAULT /* 40 */
90PGM_CHECK_DEFAULT /* 41 */
91PGM_CHECK_DEFAULT /* 42 */
92PGM_CHECK_DEFAULT /* 43 */
93PGM_CHECK_DEFAULT /* 44 */
94PGM_CHECK_DEFAULT /* 45 */
95PGM_CHECK_DEFAULT /* 46 */
96PGM_CHECK_DEFAULT /* 47 */
97PGM_CHECK_DEFAULT /* 48 */
98PGM_CHECK_DEFAULT /* 49 */
99PGM_CHECK_DEFAULT /* 4a */
100PGM_CHECK_DEFAULT /* 4b */
101PGM_CHECK_DEFAULT /* 4c */
102PGM_CHECK_DEFAULT /* 4d */
103PGM_CHECK_DEFAULT /* 4e */
104PGM_CHECK_DEFAULT /* 4f */
105PGM_CHECK_DEFAULT /* 50 */
106PGM_CHECK_DEFAULT /* 51 */
107PGM_CHECK_DEFAULT /* 52 */
108PGM_CHECK_DEFAULT /* 53 */
109PGM_CHECK_DEFAULT /* 54 */
110PGM_CHECK_DEFAULT /* 55 */
111PGM_CHECK_DEFAULT /* 56 */
112PGM_CHECK_DEFAULT /* 57 */
113PGM_CHECK_DEFAULT /* 58 */
114PGM_CHECK_DEFAULT /* 59 */
115PGM_CHECK_DEFAULT /* 5a */
116PGM_CHECK_DEFAULT /* 5b */
117PGM_CHECK_DEFAULT /* 5c */
118PGM_CHECK_DEFAULT /* 5d */
119PGM_CHECK_DEFAULT /* 5e */
120PGM_CHECK_DEFAULT /* 5f */
121PGM_CHECK_DEFAULT /* 60 */
122PGM_CHECK_DEFAULT /* 61 */
123PGM_CHECK_DEFAULT /* 62 */
124PGM_CHECK_DEFAULT /* 63 */
125PGM_CHECK_DEFAULT /* 64 */
126PGM_CHECK_DEFAULT /* 65 */
127PGM_CHECK_DEFAULT /* 66 */
128PGM_CHECK_DEFAULT /* 67 */
129PGM_CHECK_DEFAULT /* 68 */
130PGM_CHECK_DEFAULT /* 69 */
131PGM_CHECK_DEFAULT /* 6a */
132PGM_CHECK_DEFAULT /* 6b */
133PGM_CHECK_DEFAULT /* 6c */
134PGM_CHECK_DEFAULT /* 6d */
135PGM_CHECK_DEFAULT /* 6e */
136PGM_CHECK_DEFAULT /* 6f */
137PGM_CHECK_DEFAULT /* 70 */
138PGM_CHECK_DEFAULT /* 71 */
139PGM_CHECK_DEFAULT /* 72 */
140PGM_CHECK_DEFAULT /* 73 */
141PGM_CHECK_DEFAULT /* 74 */
142PGM_CHECK_DEFAULT /* 75 */
143PGM_CHECK_DEFAULT /* 76 */
144PGM_CHECK_DEFAULT /* 77 */
145PGM_CHECK_DEFAULT /* 78 */
146PGM_CHECK_DEFAULT /* 79 */
147PGM_CHECK_DEFAULT /* 7a */
148PGM_CHECK_DEFAULT /* 7b */
149PGM_CHECK_DEFAULT /* 7c */
150PGM_CHECK_DEFAULT /* 7d */
151PGM_CHECK_DEFAULT /* 7e */
152PGM_CHECK_DEFAULT /* 7f */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b1f2be9aaaad..2568590973ad 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -777,40 +777,6 @@ static void __init reserve_crashkernel(void)
777#endif 777#endif
778} 778}
779 779
780static void __init init_storage_keys(unsigned long start, unsigned long end)
781{
782 unsigned long boundary, function, size;
783
784 while (start < end) {
785 if (MACHINE_HAS_EDAT2) {
786 /* set storage keys for a 2GB frame */
787 function = 0x22000 | PAGE_DEFAULT_KEY;
788 size = 1UL << 31;
789 boundary = (start + size) & ~(size - 1);
790 if (boundary <= end) {
791 do {
792 start = pfmf(function, start);
793 } while (start < boundary);
794 continue;
795 }
796 }
797 if (MACHINE_HAS_EDAT1) {
798 /* set storage keys for a 1MB frame */
799 function = 0x21000 | PAGE_DEFAULT_KEY;
800 size = 1UL << 20;
801 boundary = (start + size) & ~(size - 1);
802 if (boundary <= end) {
803 do {
804 start = pfmf(function, start);
805 } while (start < boundary);
806 continue;
807 }
808 }
809 page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
810 start += PAGE_SIZE;
811 }
812}
813
814static void __init setup_memory(void) 780static void __init setup_memory(void)
815{ 781{
816 unsigned long bootmap_size; 782 unsigned long bootmap_size;
@@ -889,7 +855,7 @@ static void __init setup_memory(void)
889 memblock_add_node(PFN_PHYS(start_chunk), 855 memblock_add_node(PFN_PHYS(start_chunk),
890 PFN_PHYS(end_chunk - start_chunk), 0); 856 PFN_PHYS(end_chunk - start_chunk), 0);
891 pfn = max(start_chunk, start_pfn); 857 pfn = max(start_chunk, start_pfn);
892 init_storage_keys(PFN_PHYS(pfn), PFN_PHYS(end_chunk)); 858 storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
893 } 859 }
894 860
895 psw_set_key(PAGE_DEFAULT_KEY); 861 psw_set_key(PAGE_DEFAULT_KEY);
@@ -1040,6 +1006,9 @@ static void __init setup_hwcaps(void)
1040 case 0x2818: 1006 case 0x2818:
1041 strcpy(elf_platform, "z196"); 1007 strcpy(elf_platform, "z196");
1042 break; 1008 break;
1009 case 0x2827:
1010 strcpy(elf_platform, "zEC12");
1011 break;
1043 } 1012 }
1044} 1013}
1045 1014
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index d1259d875074..c3ff70a7b247 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -461,6 +461,8 @@ void do_signal(struct pt_regs *regs)
461 /* Restart system call with magic TIF bit. */ 461 /* Restart system call with magic TIF bit. */
462 regs->gprs[2] = regs->orig_gpr2; 462 regs->gprs[2] = regs->orig_gpr2;
463 set_thread_flag(TIF_SYSCALL); 463 set_thread_flag(TIF_SYSCALL);
464 if (test_thread_flag(TIF_SINGLE_STEP))
465 set_thread_flag(TIF_PER_TRAP);
464 break; 466 break;
465 } 467 }
466 } 468 }
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index dd55f7c20104..f1aba87cceb8 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -29,48 +29,38 @@ struct mask_info {
29 cpumask_t mask; 29 cpumask_t mask;
30}; 30};
31 31
32static int topology_enabled = 1; 32static void set_topology_timer(void);
33static void topology_work_fn(struct work_struct *work); 33static void topology_work_fn(struct work_struct *work);
34static struct sysinfo_15_1_x *tl_info; 34static struct sysinfo_15_1_x *tl_info;
35static void set_topology_timer(void);
36static DECLARE_WORK(topology_work, topology_work_fn);
37/* topology_lock protects the core linked list */
38static DEFINE_SPINLOCK(topology_lock);
39 35
40static struct mask_info core_info; 36static int topology_enabled = 1;
41cpumask_t cpu_core_map[NR_CPUS]; 37static DECLARE_WORK(topology_work, topology_work_fn);
42unsigned char cpu_core_id[NR_CPUS];
43unsigned char cpu_socket_id[NR_CPUS];
44 38
39/* topology_lock protects the socket and book linked lists */
40static DEFINE_SPINLOCK(topology_lock);
41static struct mask_info socket_info;
45static struct mask_info book_info; 42static struct mask_info book_info;
46cpumask_t cpu_book_map[NR_CPUS]; 43
47unsigned char cpu_book_id[NR_CPUS]; 44struct cpu_topology_s390 cpu_topology[NR_CPUS];
48 45
49static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 46static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
50{ 47{
51 cpumask_t mask; 48 cpumask_t mask;
52 49
53 cpumask_clear(&mask); 50 cpumask_copy(&mask, cpumask_of(cpu));
54 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) { 51 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
55 cpumask_copy(&mask, cpumask_of(cpu));
56 return mask; 52 return mask;
53 for (; info; info = info->next) {
54 if (cpumask_test_cpu(cpu, &info->mask))
55 return info->mask;
57 } 56 }
58 while (info) {
59 if (cpumask_test_cpu(cpu, &info->mask)) {
60 mask = info->mask;
61 break;
62 }
63 info = info->next;
64 }
65 if (cpumask_empty(&mask))
66 cpumask_copy(&mask, cpumask_of(cpu));
67 return mask; 57 return mask;
68} 58}
69 59
70static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, 60static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
71 struct mask_info *book, 61 struct mask_info *book,
72 struct mask_info *core, 62 struct mask_info *socket,
73 int one_core_per_cpu) 63 int one_socket_per_cpu)
74{ 64{
75 unsigned int cpu; 65 unsigned int cpu;
76 66
@@ -80,28 +70,28 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
80 70
81 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin; 71 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
82 lcpu = smp_find_processor_id(rcpu); 72 lcpu = smp_find_processor_id(rcpu);
83 if (lcpu >= 0) { 73 if (lcpu < 0)
84 cpumask_set_cpu(lcpu, &book->mask); 74 continue;
85 cpu_book_id[lcpu] = book->id; 75 cpumask_set_cpu(lcpu, &book->mask);
86 cpumask_set_cpu(lcpu, &core->mask); 76 cpu_topology[lcpu].book_id = book->id;
87 cpu_core_id[lcpu] = rcpu; 77 cpumask_set_cpu(lcpu, &socket->mask);
88 if (one_core_per_cpu) { 78 cpu_topology[lcpu].core_id = rcpu;
89 cpu_socket_id[lcpu] = rcpu; 79 if (one_socket_per_cpu) {
90 core = core->next; 80 cpu_topology[lcpu].socket_id = rcpu;
91 } else { 81 socket = socket->next;
92 cpu_socket_id[lcpu] = core->id; 82 } else {
93 } 83 cpu_topology[lcpu].socket_id = socket->id;
94 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
95 } 84 }
85 smp_cpu_set_polarization(lcpu, tl_cpu->pp);
96 } 86 }
97 return core; 87 return socket;
98} 88}
99 89
100static void clear_masks(void) 90static void clear_masks(void)
101{ 91{
102 struct mask_info *info; 92 struct mask_info *info;
103 93
104 info = &core_info; 94 info = &socket_info;
105 while (info) { 95 while (info) {
106 cpumask_clear(&info->mask); 96 cpumask_clear(&info->mask);
107 info = info->next; 97 info = info->next;
@@ -120,9 +110,9 @@ static union topology_entry *next_tle(union topology_entry *tle)
120 return (union topology_entry *)((struct topology_container *)tle + 1); 110 return (union topology_entry *)((struct topology_container *)tle + 1);
121} 111}
122 112
123static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) 113static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
124{ 114{
125 struct mask_info *core = &core_info; 115 struct mask_info *socket = &socket_info;
126 struct mask_info *book = &book_info; 116 struct mask_info *book = &book_info;
127 union topology_entry *tle, *end; 117 union topology_entry *tle, *end;
128 118
@@ -135,11 +125,11 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
135 book->id = tle->container.id; 125 book->id = tle->container.id;
136 break; 126 break;
137 case 1: 127 case 1:
138 core = core->next; 128 socket = socket->next;
139 core->id = tle->container.id; 129 socket->id = tle->container.id;
140 break; 130 break;
141 case 0: 131 case 0:
142 add_cpus_to_mask(&tle->cpu, book, core, 0); 132 add_cpus_to_mask(&tle->cpu, book, socket, 0);
143 break; 133 break;
144 default: 134 default:
145 clear_masks(); 135 clear_masks();
@@ -149,9 +139,9 @@ static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
149 } 139 }
150} 140}
151 141
152static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) 142static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
153{ 143{
154 struct mask_info *core = &core_info; 144 struct mask_info *socket = &socket_info;
155 struct mask_info *book = &book_info; 145 struct mask_info *book = &book_info;
156 union topology_entry *tle, *end; 146 union topology_entry *tle, *end;
157 147
@@ -164,7 +154,7 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
164 book->id = tle->container.id; 154 book->id = tle->container.id;
165 break; 155 break;
166 case 0: 156 case 0:
167 core = add_cpus_to_mask(&tle->cpu, book, core, 1); 157 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
168 break; 158 break;
169 default: 159 default:
170 clear_masks(); 160 clear_masks();
@@ -174,20 +164,20 @@ static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
174 } 164 }
175} 165}
176 166
177static void tl_to_cores(struct sysinfo_15_1_x *info) 167static void tl_to_masks(struct sysinfo_15_1_x *info)
178{ 168{
179 struct cpuid cpu_id; 169 struct cpuid cpu_id;
180 170
181 get_cpu_id(&cpu_id);
182 spin_lock_irq(&topology_lock); 171 spin_lock_irq(&topology_lock);
172 get_cpu_id(&cpu_id);
183 clear_masks(); 173 clear_masks();
184 switch (cpu_id.machine) { 174 switch (cpu_id.machine) {
185 case 0x2097: 175 case 0x2097:
186 case 0x2098: 176 case 0x2098:
187 __tl_to_cores_z10(info); 177 __tl_to_masks_z10(info);
188 break; 178 break;
189 default: 179 default:
190 __tl_to_cores_generic(info); 180 __tl_to_masks_generic(info);
191 } 181 }
192 spin_unlock_irq(&topology_lock); 182 spin_unlock_irq(&topology_lock);
193} 183}
@@ -232,15 +222,20 @@ int topology_set_cpu_management(int fc)
232 return rc; 222 return rc;
233} 223}
234 224
235static void update_cpu_core_map(void) 225static void update_cpu_masks(void)
236{ 226{
237 unsigned long flags; 227 unsigned long flags;
238 int cpu; 228 int cpu;
239 229
240 spin_lock_irqsave(&topology_lock, flags); 230 spin_lock_irqsave(&topology_lock, flags);
241 for_each_possible_cpu(cpu) { 231 for_each_possible_cpu(cpu) {
242 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); 232 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
243 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); 233 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
234 if (!MACHINE_HAS_TOPOLOGY) {
235 cpu_topology[cpu].core_id = cpu;
236 cpu_topology[cpu].socket_id = cpu;
237 cpu_topology[cpu].book_id = cpu;
238 }
244 } 239 }
245 spin_unlock_irqrestore(&topology_lock, flags); 240 spin_unlock_irqrestore(&topology_lock, flags);
246} 241}
@@ -260,13 +255,13 @@ int arch_update_cpu_topology(void)
260 int cpu; 255 int cpu;
261 256
262 if (!MACHINE_HAS_TOPOLOGY) { 257 if (!MACHINE_HAS_TOPOLOGY) {
263 update_cpu_core_map(); 258 update_cpu_masks();
264 topology_update_polarization_simple(); 259 topology_update_polarization_simple();
265 return 0; 260 return 0;
266 } 261 }
267 store_topology(info); 262 store_topology(info);
268 tl_to_cores(info); 263 tl_to_masks(info);
269 update_cpu_core_map(); 264 update_cpu_masks();
270 for_each_online_cpu(cpu) { 265 for_each_online_cpu(cpu) {
271 dev = get_cpu_device(cpu); 266 dev = get_cpu_device(cpu);
272 kobject_uevent(&dev->kobj, KOBJ_CHANGE); 267 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
@@ -355,7 +350,7 @@ void __init s390_init_cpu_topology(void)
355 for (i = 0; i < TOPOLOGY_NR_MAG; i++) 350 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
356 printk(KERN_CONT " %d", info->mag[i]); 351 printk(KERN_CONT " %d", info->mag[i]);
357 printk(KERN_CONT " / %d\n", info->mnest); 352 printk(KERN_CONT " / %d\n", info->mnest);
358 alloc_masks(info, &core_info, 1); 353 alloc_masks(info, &socket_info, 1);
359 alloc_masks(info, &book_info, 2); 354 alloc_masks(info, &book_info, 2);
360} 355}
361 356
@@ -454,7 +449,7 @@ static int __init topology_init(void)
454 } 449 }
455 set_topology_timer(); 450 set_topology_timer();
456out: 451out:
457 update_cpu_core_map(); 452 update_cpu_masks();
458 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 453 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
459} 454}
460device_initcall(topology_init); 455device_initcall(topology_init);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3d2b0fa37db0..70ecfc5fe8f0 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -41,8 +41,6 @@
41#include <asm/ipl.h> 41#include <asm/ipl.h>
42#include "entry.h" 42#include "entry.h"
43 43
44void (*pgm_check_table[128])(struct pt_regs *regs);
45
46int show_unhandled_signals = 1; 44int show_unhandled_signals = 1;
47 45
48#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) 46#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@@ -350,7 +348,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
350 force_sig_info(SIGTRAP, &info, current); 348 force_sig_info(SIGTRAP, &info, current);
351} 349}
352 350
353static void default_trap_handler(struct pt_regs *regs) 351void default_trap_handler(struct pt_regs *regs)
354{ 352{
355 if (user_mode(regs)) { 353 if (user_mode(regs)) {
356 report_user_fault(regs, SIGSEGV); 354 report_user_fault(regs, SIGSEGV);
@@ -360,9 +358,9 @@ static void default_trap_handler(struct pt_regs *regs)
360} 358}
361 359
362#define DO_ERROR_INFO(name, signr, sicode, str) \ 360#define DO_ERROR_INFO(name, signr, sicode, str) \
363static void name(struct pt_regs *regs) \ 361void name(struct pt_regs *regs) \
364{ \ 362{ \
365 do_trap(regs, signr, sicode, str); \ 363 do_trap(regs, signr, sicode, str); \
366} 364}
367 365
368DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, 366DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR,
@@ -417,7 +415,7 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
417 do_trap(regs, SIGFPE, si_code, "floating point exception"); 415 do_trap(regs, SIGFPE, si_code, "floating point exception");
418} 416}
419 417
420static void __kprobes illegal_op(struct pt_regs *regs) 418void __kprobes illegal_op(struct pt_regs *regs)
421{ 419{
422 siginfo_t info; 420 siginfo_t info;
423 __u8 opcode[6]; 421 __u8 opcode[6];
@@ -536,7 +534,7 @@ DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
536 "specification exception"); 534 "specification exception");
537#endif 535#endif
538 536
539static void data_exception(struct pt_regs *regs) 537void data_exception(struct pt_regs *regs)
540{ 538{
541 __u16 __user *location; 539 __u16 __user *location;
542 int signal = 0; 540 int signal = 0;
@@ -611,7 +609,7 @@ static void data_exception(struct pt_regs *regs)
611 do_trap(regs, signal, ILL_ILLOPN, "data exception"); 609 do_trap(regs, signal, ILL_ILLOPN, "data exception");
612} 610}
613 611
614static void space_switch_exception(struct pt_regs *regs) 612void space_switch_exception(struct pt_regs *regs)
615{ 613{
616 /* Set user psw back to home space mode. */ 614 /* Set user psw back to home space mode. */
617 if (user_mode(regs)) 615 if (user_mode(regs))
@@ -629,43 +627,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
629 panic("Corrupt kernel stack, can't continue."); 627 panic("Corrupt kernel stack, can't continue.");
630} 628}
631 629
632/* init is done in lowcore.S and head.S */
633
634void __init trap_init(void) 630void __init trap_init(void)
635{ 631{
636 int i;
637
638 for (i = 0; i < 128; i++)
639 pgm_check_table[i] = &default_trap_handler;
640 pgm_check_table[1] = &illegal_op;
641 pgm_check_table[2] = &privileged_op;
642 pgm_check_table[3] = &execute_exception;
643 pgm_check_table[4] = &do_protection_exception;
644 pgm_check_table[5] = &addressing_exception;
645 pgm_check_table[6] = &specification_exception;
646 pgm_check_table[7] = &data_exception;
647 pgm_check_table[8] = &overflow_exception;
648 pgm_check_table[9] = &divide_exception;
649 pgm_check_table[0x0A] = &overflow_exception;
650 pgm_check_table[0x0B] = &divide_exception;
651 pgm_check_table[0x0C] = &hfp_overflow_exception;
652 pgm_check_table[0x0D] = &hfp_underflow_exception;
653 pgm_check_table[0x0E] = &hfp_significance_exception;
654 pgm_check_table[0x0F] = &hfp_divide_exception;
655 pgm_check_table[0x10] = &do_dat_exception;
656 pgm_check_table[0x11] = &do_dat_exception;
657 pgm_check_table[0x12] = &translation_exception;
658 pgm_check_table[0x13] = &special_op_exception;
659#ifdef CONFIG_64BIT
660 pgm_check_table[0x18] = &transaction_exception;
661 pgm_check_table[0x38] = &do_asce_exception;
662 pgm_check_table[0x39] = &do_dat_exception;
663 pgm_check_table[0x3A] = &do_dat_exception;
664 pgm_check_table[0x3B] = &do_dat_exception;
665#endif /* CONFIG_64BIT */
666 pgm_check_table[0x15] = &operand_exception;
667 pgm_check_table[0x1C] = &space_switch_exception;
668 pgm_check_table[0x1D] = &hfp_sqrt_exception;
669 /* Enable machine checks early. */
670 local_mcck_enable(); 632 local_mcck_enable();
671} 633}
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 1bea6d1f55ab..640bea12303c 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,9 +2,9 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ 5obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
6 page-states.o gup.o extable.o 6obj-y += page-states.o gup.o extable.o pageattr.o
7obj-$(CONFIG_CMM) += cmm.o 7
8obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 8obj-$(CONFIG_CMM) += cmm.o
9obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o 9obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
10obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o 10obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index cbc6668acb85..04e4892247d2 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -150,6 +150,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
150static void walk_pud_level(struct seq_file *m, struct pg_state *st, 150static void walk_pud_level(struct seq_file *m, struct pg_state *st,
151 pgd_t *pgd, unsigned long addr) 151 pgd_t *pgd, unsigned long addr)
152{ 152{
153 unsigned int prot;
153 pud_t *pud; 154 pud_t *pud;
154 int i; 155 int i;
155 156
@@ -157,7 +158,11 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
157 st->current_address = addr; 158 st->current_address = addr;
158 pud = pud_offset(pgd, addr); 159 pud = pud_offset(pgd, addr);
159 if (!pud_none(*pud)) 160 if (!pud_none(*pud))
160 walk_pmd_level(m, st, pud, addr); 161 if (pud_large(*pud)) {
162 prot = pud_val(*pud) & _PAGE_RO;
163 note_page(m, st, prot, 2);
164 } else
165 walk_pmd_level(m, st, pud, addr);
161 else 166 else
162 note_page(m, st, _PAGE_INVALID, 2); 167 note_page(m, st, _PAGE_INVALID, 2);
163 addr += PUD_SIZE; 168 addr += PUD_SIZE;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 04ad4001a289..42601d6e166f 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -49,15 +49,19 @@
49#define VM_FAULT_BADCONTEXT 0x010000 49#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000 50#define VM_FAULT_BADMAP 0x020000
51#define VM_FAULT_BADACCESS 0x040000 51#define VM_FAULT_BADACCESS 0x040000
52#define VM_FAULT_SIGNAL 0x080000 52#define VM_FAULT_SIGNAL 0x080000
53 53
54static unsigned long store_indication; 54static unsigned long store_indication __read_mostly;
55 55
56void fault_init(void) 56#ifdef CONFIG_64BIT
57static int __init fault_init(void)
57{ 58{
58 if (test_facility(2) && test_facility(75)) 59 if (test_facility(75))
59 store_indication = 0xc00; 60 store_indication = 0xc00;
61 return 0;
60} 62}
63early_initcall(fault_init);
64#endif
61 65
62static inline int notify_page_fault(struct pt_regs *regs) 66static inline int notify_page_fault(struct pt_regs *regs)
63{ 67{
@@ -273,10 +277,16 @@ static inline int do_exception(struct pt_regs *regs, int access)
273 unsigned int flags; 277 unsigned int flags;
274 int fault; 278 int fault;
275 279
280 tsk = current;
281 /*
282 * The instruction that caused the program check has
283 * been nullified. Don't signal single step via SIGTRAP.
284 */
285 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
286
276 if (notify_page_fault(regs)) 287 if (notify_page_fault(regs))
277 return 0; 288 return 0;
278 289
279 tsk = current;
280 mm = tsk->mm; 290 mm = tsk->mm;
281 trans_exc_code = regs->int_parm_long; 291 trans_exc_code = regs->int_parm_long;
282 292
@@ -372,11 +382,6 @@ retry:
372 goto retry; 382 goto retry;
373 } 383 }
374 } 384 }
375 /*
376 * The instruction that caused the program check will
377 * be repeated. Don't signal single step via SIGTRAP.
378 */
379 clear_tsk_thread_flag(tsk, TIF_PER_TRAP);
380 fault = 0; 385 fault = 0;
381out_up: 386out_up:
382 up_read(&mm->mmap_sem); 387 up_read(&mm->mmap_sem);
@@ -423,6 +428,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs)
423 struct vm_area_struct *vma; 428 struct vm_area_struct *vma;
424 unsigned long trans_exc_code; 429 unsigned long trans_exc_code;
425 430
431 /*
432 * The instruction that caused the program check has
433 * been nullified. Don't signal single step via SIGTRAP.
434 */
435 clear_tsk_thread_flag(current, TIF_PER_TRAP);
436
426 trans_exc_code = regs->int_parm_long; 437 trans_exc_code = regs->int_parm_long;
427 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) 438 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
428 goto no_context; 439 goto no_context;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81e596c65dee..ae672f41c464 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -125,7 +125,6 @@ void __init paging_init(void)
125 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 125 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
126 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 126 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
127 free_area_init_nodes(max_zone_pfns); 127 free_area_init_nodes(max_zone_pfns);
128 fault_init();
129} 128}
130 129
131void __init mem_init(void) 130void __init mem_init(void)
@@ -159,34 +158,6 @@ void __init mem_init(void)
159 PFN_ALIGN((unsigned long)&_eshared) - 1); 158 PFN_ALIGN((unsigned long)&_eshared) - 1);
160} 159}
161 160
162#ifdef CONFIG_DEBUG_PAGEALLOC
163void kernel_map_pages(struct page *page, int numpages, int enable)
164{
165 pgd_t *pgd;
166 pud_t *pud;
167 pmd_t *pmd;
168 pte_t *pte;
169 unsigned long address;
170 int i;
171
172 for (i = 0; i < numpages; i++) {
173 address = page_to_phys(page + i);
174 pgd = pgd_offset_k(address);
175 pud = pud_offset(pgd, address);
176 pmd = pmd_offset(pud, address);
177 pte = pte_offset_kernel(pmd, address);
178 if (!enable) {
179 __ptep_ipte(address, pte);
180 pte_val(*pte) = _PAGE_TYPE_EMPTY;
181 continue;
182 }
183 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
184 /* Flush cpu write queue. */
185 mb();
186 }
187}
188#endif
189
190void free_init_pages(char *what, unsigned long begin, unsigned long end) 161void free_init_pages(char *what, unsigned long begin, unsigned long end)
191{ 162{
192 unsigned long addr = begin; 163 unsigned long addr = begin;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 00be01c4b4f3..29ccee3651f4 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -2,11 +2,46 @@
2 * Copyright IBM Corp. 2011 2 * Copyright IBM Corp. 2011
3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
4 */ 4 */
5#include <linux/hugetlb.h>
5#include <linux/module.h> 6#include <linux/module.h>
6#include <linux/mm.h> 7#include <linux/mm.h>
7#include <linux/hugetlb.h>
8#include <asm/cacheflush.h> 8#include <asm/cacheflush.h>
9#include <asm/pgtable.h> 9#include <asm/pgtable.h>
10#include <asm/page.h>
11
12void storage_key_init_range(unsigned long start, unsigned long end)
13{
14 unsigned long boundary, function, size;
15
16 while (start < end) {
17 if (MACHINE_HAS_EDAT2) {
18 /* set storage keys for a 2GB frame */
19 function = 0x22000 | PAGE_DEFAULT_KEY;
20 size = 1UL << 31;
21 boundary = (start + size) & ~(size - 1);
22 if (boundary <= end) {
23 do {
24 start = pfmf(function, start);
25 } while (start < boundary);
26 continue;
27 }
28 }
29 if (MACHINE_HAS_EDAT1) {
30 /* set storage keys for a 1MB frame */
31 function = 0x21000 | PAGE_DEFAULT_KEY;
32 size = 1UL << 20;
33 boundary = (start + size) & ~(size - 1);
34 if (boundary <= end) {
35 do {
36 start = pfmf(function, start);
37 } while (start < boundary);
38 continue;
39 }
40 }
41 page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
42 start += PAGE_SIZE;
43 }
44}
10 45
11static pte_t *walk_page_table(unsigned long addr) 46static pte_t *walk_page_table(unsigned long addr)
12{ 47{
@@ -19,7 +54,7 @@ static pte_t *walk_page_table(unsigned long addr)
19 if (pgd_none(*pgdp)) 54 if (pgd_none(*pgdp))
20 return NULL; 55 return NULL;
21 pudp = pud_offset(pgdp, addr); 56 pudp = pud_offset(pgdp, addr);
22 if (pud_none(*pudp)) 57 if (pud_none(*pudp) || pud_large(*pudp))
23 return NULL; 58 return NULL;
24 pmdp = pmd_offset(pudp, addr); 59 pmdp = pmd_offset(pudp, addr);
25 if (pmd_none(*pmdp) || pmd_large(*pmdp)) 60 if (pmd_none(*pmdp) || pmd_large(*pmdp))
@@ -70,3 +105,46 @@ int set_memory_x(unsigned long addr, int numpages)
70{ 105{
71 return 0; 106 return 0;
72} 107}
108
109#ifdef CONFIG_DEBUG_PAGEALLOC
110void kernel_map_pages(struct page *page, int numpages, int enable)
111{
112 unsigned long address;
113 pgd_t *pgd;
114 pud_t *pud;
115 pmd_t *pmd;
116 pte_t *pte;
117 int i;
118
119 for (i = 0; i < numpages; i++) {
120 address = page_to_phys(page + i);
121 pgd = pgd_offset_k(address);
122 pud = pud_offset(pgd, address);
123 pmd = pmd_offset(pud, address);
124 pte = pte_offset_kernel(pmd, address);
125 if (!enable) {
126 __ptep_ipte(address, pte);
127 pte_val(*pte) = _PAGE_TYPE_EMPTY;
128 continue;
129 }
130 *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
131 }
132}
133
134#ifdef CONFIG_HIBERNATION
135bool kernel_page_present(struct page *page)
136{
137 unsigned long addr;
138 int cc;
139
140 addr = page_to_phys(page);
141 asm volatile(
142 " lra %1,0(%1)\n"
143 " ipm %0\n"
144 " srl %0,28"
145 : "=d" (cc), "+a" (addr) : : "cc");
146 return cc == 0;
147}
148#endif /* CONFIG_HIBERNATION */
149
150#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index c8188a18af05..ae44d2a34313 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -881,22 +881,6 @@ int s390_enable_sie(void)
881} 881}
882EXPORT_SYMBOL_GPL(s390_enable_sie); 882EXPORT_SYMBOL_GPL(s390_enable_sie);
883 883
884#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
885bool kernel_page_present(struct page *page)
886{
887 unsigned long addr;
888 int cc;
889
890 addr = page_to_phys(page);
891 asm volatile(
892 " lra %1,0(%1)\n"
893 " ipm %0\n"
894 " srl %0,28"
895 : "=d" (cc), "+a" (addr) : : "cc");
896 return cc == 0;
897}
898#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */
899
900#ifdef CONFIG_TRANSPARENT_HUGEPAGE 884#ifdef CONFIG_TRANSPARENT_HUGEPAGE
901int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, 885int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
902 pmd_t *pmdp) 886 pmd_t *pmdp)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 387c7c60b5b8..6ed1426d27c5 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -89,6 +89,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
89 int ret = -ENOMEM; 89 int ret = -ENOMEM;
90 90
91 while (address < end) { 91 while (address < end) {
92 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
92 pg_dir = pgd_offset_k(address); 93 pg_dir = pgd_offset_k(address);
93 if (pgd_none(*pg_dir)) { 94 if (pgd_none(*pg_dir)) {
94 pu_dir = vmem_pud_alloc(); 95 pu_dir = vmem_pud_alloc();
@@ -96,18 +97,24 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
96 goto out; 97 goto out;
97 pgd_populate(&init_mm, pg_dir, pu_dir); 98 pgd_populate(&init_mm, pg_dir, pu_dir);
98 } 99 }
99
100 pu_dir = pud_offset(pg_dir, address); 100 pu_dir = pud_offset(pg_dir, address);
101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
104 pte_val(pte) |= _REGION3_ENTRY_LARGE;
105 pte_val(pte) |= _REGION_ENTRY_TYPE_R3;
106 pud_val(*pu_dir) = pte_val(pte);
107 address += PUD_SIZE;
108 continue;
109 }
110#endif
101 if (pud_none(*pu_dir)) { 111 if (pud_none(*pu_dir)) {
102 pm_dir = vmem_pmd_alloc(); 112 pm_dir = vmem_pmd_alloc();
103 if (!pm_dir) 113 if (!pm_dir)
104 goto out; 114 goto out;
105 pud_populate(&init_mm, pu_dir, pm_dir); 115 pud_populate(&init_mm, pu_dir, pm_dir);
106 } 116 }
107
108 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
109 pm_dir = pmd_offset(pu_dir, address); 117 pm_dir = pmd_offset(pu_dir, address);
110
111#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
112 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
113 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
@@ -160,6 +167,11 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
160 address += PUD_SIZE; 167 address += PUD_SIZE;
161 continue; 168 continue;
162 } 169 }
170 if (pud_large(*pu_dir)) {
171 pud_clear(pu_dir);
172 address += PUD_SIZE;
173 continue;
174 }
163 pm_dir = pmd_offset(pu_dir, address); 175 pm_dir = pmd_offset(pu_dir, address);
164 if (pmd_none(*pm_dir)) { 176 if (pmd_none(*pm_dir)) {
165 address += PMD_SIZE; 177 address += PMD_SIZE;
@@ -193,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
193 start_addr = (unsigned long) start; 205 start_addr = (unsigned long) start;
194 end_addr = (unsigned long) (start + nr); 206 end_addr = (unsigned long) (start + nr);
195 207
196 for (address = start_addr; address < end_addr; address += PAGE_SIZE) { 208 for (address = start_addr; address < end_addr;) {
197 pg_dir = pgd_offset_k(address); 209 pg_dir = pgd_offset_k(address);
198 if (pgd_none(*pg_dir)) { 210 if (pgd_none(*pg_dir)) {
199 pu_dir = vmem_pud_alloc(); 211 pu_dir = vmem_pud_alloc();
@@ -212,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
212 224
213 pm_dir = pmd_offset(pu_dir, address); 225 pm_dir = pmd_offset(pu_dir, address);
214 if (pmd_none(*pm_dir)) { 226 if (pmd_none(*pm_dir)) {
227#ifdef CONFIG_64BIT
228 /* Use 1MB frames for vmemmap if available. We always
229 * use large frames even if they are only partially
230 * used.
231 * Otherwise we would have also page tables since
232 * vmemmap_populate gets called for each section
233 * separately. */
234 if (MACHINE_HAS_EDAT1) {
235 void *new_page;
236
237 new_page = vmemmap_alloc_block(PMD_SIZE, node);
238 if (!new_page)
239 goto out;
240 pte = mk_pte_phys(__pa(new_page), PAGE_RW);
241 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
242 pmd_val(*pm_dir) = pte_val(pte);
243 address = (address + PMD_SIZE) & PMD_MASK;
244 continue;
245 }
246#endif
215 pt_dir = vmem_pte_alloc(address); 247 pt_dir = vmem_pte_alloc(address);
216 if (!pt_dir) 248 if (!pt_dir)
217 goto out; 249 goto out;
218 pmd_populate(&init_mm, pm_dir, pt_dir); 250 pmd_populate(&init_mm, pm_dir, pt_dir);
251 } else if (pmd_large(*pm_dir)) {
252 address = (address + PMD_SIZE) & PMD_MASK;
253 continue;
219 } 254 }
220 255
221 pt_dir = pte_offset_kernel(pm_dir, address); 256 pt_dir = pte_offset_kernel(pm_dir, address);
@@ -228,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
228 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); 263 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
229 *pt_dir = pte; 264 *pt_dir = pte;
230 } 265 }
266 address += PAGE_SIZE;
231 } 267 }
232 memset(start, 0, nr * sizeof(struct page)); 268 memset(start, 0, nr * sizeof(struct page));
233 ret = 0; 269 ret = 0;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9b355b406afa..bb284419b0fd 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -341,6 +341,27 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
341 /* lr %r5,%r4 */ 341 /* lr %r5,%r4 */
342 EMIT2(0x1854); 342 EMIT2(0x1854);
343 break; 343 break;
344 case BPF_S_ALU_MOD_X: /* A %= X */
345 jit->seen |= SEEN_XREG | SEEN_RET0;
346 /* ltr %r12,%r12 */
347 EMIT2(0x12cc);
348 /* jz <ret0> */
349 EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
350 /* lhi %r4,0 */
351 EMIT4(0xa7480000);
352 /* dr %r4,%r12 */
353 EMIT2(0x1d4c);
354 /* lr %r5,%r4 */
355 EMIT2(0x1854);
356 break;
357 case BPF_S_ALU_MOD_K: /* A %= K */
358 /* lhi %r4,0 */
359 EMIT4(0xa7480000);
360 /* d %r4,<d(K)>(%r13) */
361 EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
362 /* lr %r5,%r4 */
363 EMIT2(0x1854);
364 break;
344 case BPF_S_ALU_AND_X: /* A &= X */ 365 case BPF_S_ALU_AND_X: /* A &= X */
345 jit->seen |= SEEN_XREG; 366 jit->seen |= SEEN_XREG;
346 /* nr %r5,%r12 */ 367 /* nr %r5,%r12 */
@@ -368,10 +389,17 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
368 EMIT4_DISP(0x5650d000, EMIT_CONST(K)); 389 EMIT4_DISP(0x5650d000, EMIT_CONST(K));
369 break; 390 break;
370 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ 391 case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
392 case BPF_S_ALU_XOR_X:
371 jit->seen |= SEEN_XREG; 393 jit->seen |= SEEN_XREG;
372 /* xr %r5,%r12 */ 394 /* xr %r5,%r12 */
373 EMIT2(0x175c); 395 EMIT2(0x175c);
374 break; 396 break;
397 case BPF_S_ALU_XOR_K: /* A ^= K */
398 if (!K)
399 break;
400 /* x %r5,<d(K)>(%r13) */
401 EMIT4_DISP(0x5750d000, EMIT_CONST(K));
402 break;
375 case BPF_S_ALU_LSH_X: /* A <<= X; */ 403 case BPF_S_ALU_LSH_X: /* A <<= X; */
376 jit->seen |= SEEN_XREG; 404 jit->seen |= SEEN_XREG;
377 /* sll %r5,0(%r12) */ 405 /* sll %r5,0(%r12) */
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
new file mode 100644
index 000000000000..ab0827b6bc4b
--- /dev/null
+++ b/arch/s390/pci/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the s390 PCI subsystem.
3#
4
5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o \
6 pci_sysfs.o pci_event.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
new file mode 100644
index 000000000000..7ed38e5e3028
--- /dev/null
+++ b/arch/s390/pci/pci.c
@@ -0,0 +1,1103 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 *
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
9 * Alexander Schmidt
10 * Christoph Raisch
11 * Hannes Hering
12 * Hoang-Nam Nguyen
13 * Jan-Bernd Themann
14 * Stefan Roscher
15 * Thomas Klein
16 */
17
18#define COMPONENT "zPCI"
19#define pr_fmt(fmt) COMPONENT ": " fmt
20
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/err.h>
24#include <linux/export.h>
25#include <linux/delay.h>
26#include <linux/irq.h>
27#include <linux/kernel_stat.h>
28#include <linux/seq_file.h>
29#include <linux/pci.h>
30#include <linux/msi.h>
31
32#include <asm/isc.h>
33#include <asm/airq.h>
34#include <asm/facility.h>
35#include <asm/pci_insn.h>
36#include <asm/pci_clp.h>
37#include <asm/pci_dma.h>
38
39#define DEBUG /* enable pr_debug */
40
41#define SIC_IRQ_MODE_ALL 0
42#define SIC_IRQ_MODE_SINGLE 1
43
44#define ZPCI_NR_DMA_SPACES 1
45#define ZPCI_MSI_VEC_BITS 6
46#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
47
48/* list of all detected zpci devices */
49LIST_HEAD(zpci_list);
50EXPORT_SYMBOL_GPL(zpci_list);
51DEFINE_MUTEX(zpci_list_lock);
52EXPORT_SYMBOL_GPL(zpci_list_lock);
53
54struct pci_hp_callback_ops hotplug_ops;
55EXPORT_SYMBOL_GPL(hotplug_ops);
56
57static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
58static DEFINE_SPINLOCK(zpci_domain_lock);
59
60struct callback {
61 irq_handler_t handler;
62 void *data;
63};
64
65struct zdev_irq_map {
66 unsigned long aibv; /* AI bit vector */
67 int msi_vecs; /* consecutive MSI-vectors used */
68 int __unused;
69 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
70 spinlock_t lock; /* protect callbacks against de-reg */
71};
72
73struct intr_bucket {
74 /* amap of adapters, one bit per dev, corresponds to one irq nr */
75 unsigned long *alloc;
76 /* AI summary bit, global page for all devices */
77 unsigned long *aisb;
78 /* pointer to aibv and callback data in zdev */
79 struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
80 /* protects the whole bucket struct */
81 spinlock_t lock;
82};
83
84static struct intr_bucket *bucket;
85
86/* Adapter local summary indicator */
87static u8 *zpci_irq_si;
88
89static atomic_t irq_retries = ATOMIC_INIT(0);
90
91/* I/O Map */
92static DEFINE_SPINLOCK(zpci_iomap_lock);
93static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
94struct zpci_iomap_entry *zpci_iomap_start;
95EXPORT_SYMBOL_GPL(zpci_iomap_start);
96
97/* highest irq summary bit */
98static int __read_mostly aisb_max;
99
100static struct kmem_cache *zdev_irq_cache;
101
102static inline int irq_to_msi_nr(unsigned int irq)
103{
104 return irq & ZPCI_MSI_MASK;
105}
106
107static inline int irq_to_dev_nr(unsigned int irq)
108{
109 return irq >> ZPCI_MSI_VEC_BITS;
110}
111
112static inline struct zdev_irq_map *get_imap(unsigned int irq)
113{
114 return bucket->imap[irq_to_dev_nr(irq)];
115}
116
117struct zpci_dev *get_zdev(struct pci_dev *pdev)
118{
119 return (struct zpci_dev *) pdev->sysdata;
120}
121
122struct zpci_dev *get_zdev_by_fid(u32 fid)
123{
124 struct zpci_dev *tmp, *zdev = NULL;
125
126 mutex_lock(&zpci_list_lock);
127 list_for_each_entry(tmp, &zpci_list, entry) {
128 if (tmp->fid == fid) {
129 zdev = tmp;
130 break;
131 }
132 }
133 mutex_unlock(&zpci_list_lock);
134 return zdev;
135}
136
137bool zpci_fid_present(u32 fid)
138{
139 return (get_zdev_by_fid(fid) != NULL) ? true : false;
140}
141
142static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
143{
144 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
145}
146
147int pci_domain_nr(struct pci_bus *bus)
148{
149 return ((struct zpci_dev *) bus->sysdata)->domain;
150}
151EXPORT_SYMBOL_GPL(pci_domain_nr);
152
153int pci_proc_domain(struct pci_bus *bus)
154{
155 return pci_domain_nr(bus);
156}
157EXPORT_SYMBOL_GPL(pci_proc_domain);
158
159/* Store PCI function information block */
160static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
161{
162 struct zpci_fib *fib;
163 u8 status, cc;
164
165 fib = (void *) get_zeroed_page(GFP_KERNEL);
166 if (!fib)
167 return -ENOMEM;
168
169 do {
170 cc = __stpcifc(zdev->fh, 0, fib, &status);
171 if (cc == 2) {
172 msleep(ZPCI_INSN_BUSY_DELAY);
173 memset(fib, 0, PAGE_SIZE);
174 }
175 } while (cc == 2);
176
177 if (cc)
178 pr_err_once("%s: cc: %u status: %u\n",
179 __func__, cc, status);
180
181 /* Return PCI function controls */
182 *fc = fib->fc;
183
184 free_page((unsigned long) fib);
185 return (cc) ? -EIO : 0;
186}
187
188/* Modify PCI: Register adapter interruptions */
189static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
190 u64 aibv)
191{
192 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
193 struct zpci_fib *fib;
194 int rc;
195
196 fib = (void *) get_zeroed_page(GFP_KERNEL);
197 if (!fib)
198 return -ENOMEM;
199
200 fib->isc = PCI_ISC;
201 fib->noi = zdev->irq_map->msi_vecs;
202 fib->sum = 1; /* enable summary notifications */
203 fib->aibv = aibv;
204 fib->aibvo = 0; /* every function has its own page */
205 fib->aisb = (u64) bucket->aisb + aisb / 8;
206 fib->aisbo = aisb & ZPCI_MSI_MASK;
207
208 rc = mpcifc_instr(req, fib);
209 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
210
211 free_page((unsigned long) fib);
212 return rc;
213}
214
215struct mod_pci_args {
216 u64 base;
217 u64 limit;
218 u64 iota;
219};
220
221static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
222{
223 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
224 struct zpci_fib *fib;
225 int rc;
226
227 /* The FIB must be available even if it's not used */
228 fib = (void *) get_zeroed_page(GFP_KERNEL);
229 if (!fib)
230 return -ENOMEM;
231
232 fib->pba = args->base;
233 fib->pal = args->limit;
234 fib->iota = args->iota;
235
236 rc = mpcifc_instr(req, fib);
237 free_page((unsigned long) fib);
238 return rc;
239}
240
241/* Modify PCI: Register I/O address translation parameters */
242int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
243 u64 base, u64 limit, u64 iota)
244{
245 struct mod_pci_args args = { base, limit, iota };
246
247 WARN_ON_ONCE(iota & 0x3fff);
248 args.iota |= ZPCI_IOTA_RTTO_FLAG;
249 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
250}
251
252/* Modify PCI: Unregister I/O address translation parameters */
253int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
254{
255 struct mod_pci_args args = { 0, 0, 0 };
256
257 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
258}
259
260/* Modify PCI: Unregister adapter interruptions */
261static int zpci_unregister_airq(struct zpci_dev *zdev)
262{
263 struct mod_pci_args args = { 0, 0, 0 };
264
265 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
266}
267
268#define ZPCI_PCIAS_CFGSPC 15
269
270static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
271{
272 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
273 u64 data;
274 int rc;
275
276 rc = pcilg_instr(&data, req, offset);
277 data = data << ((8 - len) * 8);
278 data = le64_to_cpu(data);
279 if (!rc)
280 *val = (u32) data;
281 else
282 *val = 0xffffffff;
283 return rc;
284}
285
286static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
287{
288 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
289 u64 data = val;
290 int rc;
291
292 data = cpu_to_le64(data);
293 data = data >> ((8 - len) * 8);
294 rc = pcistg_instr(data, req, offset);
295 return rc;
296}
297
298void synchronize_irq(unsigned int irq)
299{
300 /*
301 * Not needed, the handler is protected by a lock and IRQs that occur
302 * after the handler is deleted are just NOPs.
303 */
304}
305EXPORT_SYMBOL_GPL(synchronize_irq);
306
307void enable_irq(unsigned int irq)
308{
309 struct msi_desc *msi = irq_get_msi_desc(irq);
310
311 zpci_msi_set_mask_bits(msi, 1, 0);
312}
313EXPORT_SYMBOL_GPL(enable_irq);
314
315void disable_irq(unsigned int irq)
316{
317 struct msi_desc *msi = irq_get_msi_desc(irq);
318
319 zpci_msi_set_mask_bits(msi, 1, 1);
320}
321EXPORT_SYMBOL_GPL(disable_irq);
322
323void disable_irq_nosync(unsigned int irq)
324{
325 disable_irq(irq);
326}
327EXPORT_SYMBOL_GPL(disable_irq_nosync);
328
329unsigned long probe_irq_on(void)
330{
331 return 0;
332}
333EXPORT_SYMBOL_GPL(probe_irq_on);
334
335int probe_irq_off(unsigned long val)
336{
337 return 0;
338}
339EXPORT_SYMBOL_GPL(probe_irq_off);
340
341unsigned int probe_irq_mask(unsigned long val)
342{
343 return val;
344}
345EXPORT_SYMBOL_GPL(probe_irq_mask);
346
347void __devinit pcibios_fixup_bus(struct pci_bus *bus)
348{
349}
350
351resource_size_t pcibios_align_resource(void *data, const struct resource *res,
352 resource_size_t size,
353 resource_size_t align)
354{
355 return 0;
356}
357
358/* combine single writes by using store-block insn */
359void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
360{
361 zpci_memcpy_toio(to, from, count);
362}
363
364/* Create a virtual mapping cookie for a PCI BAR */
365void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
366{
367 struct zpci_dev *zdev = get_zdev(pdev);
368 u64 addr;
369 int idx;
370
371 if ((bar & 7) != bar)
372 return NULL;
373
374 idx = zdev->bars[bar].map_idx;
375 spin_lock(&zpci_iomap_lock);
376 zpci_iomap_start[idx].fh = zdev->fh;
377 zpci_iomap_start[idx].bar = bar;
378 spin_unlock(&zpci_iomap_lock);
379
380 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
381 return (void __iomem *) addr;
382}
383EXPORT_SYMBOL_GPL(pci_iomap);
384
385void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
386{
387 unsigned int idx;
388
389 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
390 spin_lock(&zpci_iomap_lock);
391 zpci_iomap_start[idx].fh = 0;
392 zpci_iomap_start[idx].bar = 0;
393 spin_unlock(&zpci_iomap_lock);
394}
395EXPORT_SYMBOL_GPL(pci_iounmap);
396
397static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
398 int size, u32 *val)
399{
400 struct zpci_dev *zdev = get_zdev_by_bus(bus);
401
402 if (!zdev || devfn != ZPCI_DEVFN)
403 return 0;
404 return zpci_cfg_load(zdev, where, val, size);
405}
406
407static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
408 int size, u32 val)
409{
410 struct zpci_dev *zdev = get_zdev_by_bus(bus);
411
412 if (!zdev || devfn != ZPCI_DEVFN)
413 return 0;
414 return zpci_cfg_store(zdev, where, val, size);
415}
416
417static struct pci_ops pci_root_ops = {
418 .read = pci_read,
419 .write = pci_write,
420};
421
422/* store the last handled bit to implement fair scheduling of devices */
423static DEFINE_PER_CPU(unsigned long, next_sbit);
424
425static void zpci_irq_handler(void *dont, void *need)
426{
427 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
428 int rescan = 0, max = aisb_max;
429 struct zdev_irq_map *imap;
430
431 kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
432 sbit = start;
433
434scan:
435 /* find summary_bit */
436 for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
437 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
438 last = sbit;
439
440 /* find vector bit */
441 imap = bucket->imap[sbit];
442 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
443 kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
444 clear_bit(63 - mbit, &imap->aibv);
445
446 spin_lock(&imap->lock);
447 if (imap->cb[mbit].handler)
448 imap->cb[mbit].handler(mbit,
449 imap->cb[mbit].data);
450 spin_unlock(&imap->lock);
451 }
452 }
453
454 if (rescan)
455 goto out;
456
457 /* scan the skipped bits */
458 if (start > 0) {
459 sbit = 0;
460 max = start;
461 start = 0;
462 goto scan;
463 }
464
465 /* enable interrupts again */
466 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
467
468 /* check again to not lose initiative */
469 rmb();
470 max = aisb_max;
471 sbit = find_first_bit_left(bucket->aisb, max);
472 if (sbit != max) {
473 atomic_inc(&irq_retries);
474 rescan++;
475 goto scan;
476 }
477out:
478 /* store next device bit to scan */
479 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
480}
481
482/* msi_vecs - number of requested interrupts, 0 place function to error state */
483static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
484{
485 struct zpci_dev *zdev = get_zdev(pdev);
486 unsigned int aisb, msi_nr;
487 struct msi_desc *msi;
488 int rc;
489
490 /* store the number of used MSI vectors */
491 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
492
493 spin_lock(&bucket->lock);
494 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
495 /* alloc map exhausted? */
496 if (aisb == PAGE_SIZE) {
497 spin_unlock(&bucket->lock);
498 return -EIO;
499 }
500 set_bit(aisb, bucket->alloc);
501 spin_unlock(&bucket->lock);
502
503 zdev->aisb = aisb;
504 if (aisb + 1 > aisb_max)
505 aisb_max = aisb + 1;
506
507 /* wire up IRQ shortcut pointer */
508 bucket->imap[zdev->aisb] = zdev->irq_map;
509 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
510
511 /* TODO: irq number 0 wont be found if we return less than requested MSIs.
512 * ignore it for now and fix in common code.
513 */
514 msi_nr = aisb << ZPCI_MSI_VEC_BITS;
515
516 list_for_each_entry(msi, &pdev->msi_list, list) {
517 rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
518 aisb << ZPCI_MSI_VEC_BITS);
519 if (rc)
520 return rc;
521 msi_nr++;
522 }
523
524 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
525 if (rc) {
526 clear_bit(aisb, bucket->alloc);
527 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
528 return rc;
529 }
530 return (zdev->irq_map->msi_vecs == msi_vecs) ?
531 0 : zdev->irq_map->msi_vecs;
532}
533
534static void zpci_teardown_msi(struct pci_dev *pdev)
535{
536 struct zpci_dev *zdev = get_zdev(pdev);
537 struct msi_desc *msi;
538 int aisb, rc;
539
540 rc = zpci_unregister_airq(zdev);
541 if (rc) {
542 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
543 return;
544 }
545
546 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
547 aisb = irq_to_dev_nr(msi->irq);
548
549 list_for_each_entry(msi, &pdev->msi_list, list)
550 zpci_teardown_msi_irq(zdev, msi);
551
552 clear_bit(aisb, bucket->alloc);
553 if (aisb + 1 == aisb_max)
554 aisb_max--;
555}
556
557int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
558{
559 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
560 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
561 return -EINVAL;
562 return zpci_setup_msi(pdev, nvec);
563}
564
565void arch_teardown_msi_irqs(struct pci_dev *pdev)
566{
567 pr_info("%s: on pdev: %p\n", __func__, pdev);
568 zpci_teardown_msi(pdev);
569}
570
571static void zpci_map_resources(struct zpci_dev *zdev)
572{
573 struct pci_dev *pdev = zdev->pdev;
574 resource_size_t len;
575 int i;
576
577 for (i = 0; i < PCI_BAR_COUNT; i++) {
578 len = pci_resource_len(pdev, i);
579 if (!len)
580 continue;
581 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
582 pdev->resource[i].end = pdev->resource[i].start + len - 1;
583 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
584 i, pdev->resource[i].start, pdev->resource[i].end);
585 }
586};
587
588static void zpci_unmap_resources(struct pci_dev *pdev)
589{
590 resource_size_t len;
591 int i;
592
593 for (i = 0; i < PCI_BAR_COUNT; i++) {
594 len = pci_resource_len(pdev, i);
595 if (!len)
596 continue;
597 pci_iounmap(pdev, (void *) pdev->resource[i].start);
598 }
599};
600
601struct zpci_dev *zpci_alloc_device(void)
602{
603 struct zpci_dev *zdev;
604
605 /* Alloc memory for our private pci device data */
606 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
607 if (!zdev)
608 return ERR_PTR(-ENOMEM);
609
610 /* Alloc aibv & callback space */
611 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
612 if (!zdev->irq_map)
613 goto error;
614 WARN_ON((u64) zdev->irq_map & 0xff);
615 return zdev;
616
617error:
618 kfree(zdev);
619 return ERR_PTR(-ENOMEM);
620}
621
622void zpci_free_device(struct zpci_dev *zdev)
623{
624 kmem_cache_free(zdev_irq_cache, zdev->irq_map);
625 kfree(zdev);
626}
627
628/* Called on removal of pci_dev, leaves zpci and bus device */
629static void zpci_remove_device(struct pci_dev *pdev)
630{
631 struct zpci_dev *zdev = get_zdev(pdev);
632
633 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
634 zdev->state = ZPCI_FN_STATE_CONFIGURED;
635 zpci_dma_exit_device(zdev);
636 zpci_sysfs_remove_device(&pdev->dev);
637 zpci_unmap_resources(pdev);
638 list_del(&zdev->entry); /* can be called from init */
639 zdev->pdev = NULL;
640}
641
642static void zpci_scan_devices(void)
643{
644 struct zpci_dev *zdev;
645
646 mutex_lock(&zpci_list_lock);
647 list_for_each_entry(zdev, &zpci_list, entry)
648 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
649 zpci_scan_device(zdev);
650 mutex_unlock(&zpci_list_lock);
651}
652
653/*
654 * Too late for any s390 specific setup, since interrupts must be set up
655 * already which requires DMA setup too and the pci scan will access the
656 * config space, which only works if the function handle is enabled.
657 */
658int pcibios_enable_device(struct pci_dev *pdev, int mask)
659{
660 struct resource *res;
661 u16 cmd;
662 int i;
663
664 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
665
666 for (i = 0; i < PCI_BAR_COUNT; i++) {
667 res = &pdev->resource[i];
668
669 if (res->flags & IORESOURCE_IO)
670 return -EINVAL;
671
672 if (res->flags & IORESOURCE_MEM)
673 cmd |= PCI_COMMAND_MEMORY;
674 }
675 pci_write_config_word(pdev, PCI_COMMAND, cmd);
676 return 0;
677}
678
679void pcibios_disable_device(struct pci_dev *pdev)
680{
681 zpci_remove_device(pdev);
682 pdev->sysdata = NULL;
683}
684
685int pcibios_add_platform_entries(struct pci_dev *pdev)
686{
687 return zpci_sysfs_add_device(&pdev->dev);
688}
689
690int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
691{
692 int msi_nr = irq_to_msi_nr(irq);
693 struct zdev_irq_map *imap;
694 struct msi_desc *msi;
695
696 msi = irq_get_msi_desc(irq);
697 if (!msi)
698 return -EIO;
699
700 imap = get_imap(irq);
701 spin_lock_init(&imap->lock);
702
703 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
704 imap->cb[msi_nr].handler = handler;
705 imap->cb[msi_nr].data = data;
706
707 /*
708 * The generic MSI code returns with the interrupt disabled on the
709 * card, using the MSI mask bits. Firmware doesn't appear to unmask
710 * at that level, so we do it here by hand.
711 */
712 zpci_msi_set_mask_bits(msi, 1, 0);
713 return 0;
714}
715
716void zpci_free_irq(unsigned int irq)
717{
718 struct zdev_irq_map *imap = get_imap(irq);
719 int msi_nr = irq_to_msi_nr(irq);
720 unsigned long flags;
721
722 pr_debug("%s: for irq: %d\n", __func__, irq);
723
724 spin_lock_irqsave(&imap->lock, flags);
725 imap->cb[msi_nr].handler = NULL;
726 imap->cb[msi_nr].data = NULL;
727 spin_unlock_irqrestore(&imap->lock, flags);
728}
729
730int request_irq(unsigned int irq, irq_handler_t handler,
731 unsigned long irqflags, const char *devname, void *dev_id)
732{
733 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
734 __func__, irq, handler, irqflags, devname);
735
736 return zpci_request_irq(irq, handler, dev_id);
737}
738EXPORT_SYMBOL_GPL(request_irq);
739
740void free_irq(unsigned int irq, void *dev_id)
741{
742 zpci_free_irq(irq);
743}
744EXPORT_SYMBOL_GPL(free_irq);
745
746static int __init zpci_irq_init(void)
747{
748 int cpu, rc;
749
750 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
751 if (!bucket)
752 return -ENOMEM;
753
754 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
755 if (!bucket->aisb) {
756 rc = -ENOMEM;
757 goto out_aisb;
758 }
759
760 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
761 if (!bucket->alloc) {
762 rc = -ENOMEM;
763 goto out_alloc;
764 }
765
766 isc_register(PCI_ISC);
767 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
768 if (IS_ERR(zpci_irq_si)) {
769 rc = PTR_ERR(zpci_irq_si);
770 zpci_irq_si = NULL;
771 goto out_ai;
772 }
773
774 for_each_online_cpu(cpu)
775 per_cpu(next_sbit, cpu) = 0;
776
777 spin_lock_init(&bucket->lock);
778 /* set summary to 1 to be called every time for the ISC */
779 *zpci_irq_si = 1;
780 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
781 return 0;
782
783out_ai:
784 isc_unregister(PCI_ISC);
785 free_page((unsigned long) bucket->alloc);
786out_alloc:
787 free_page((unsigned long) bucket->aisb);
788out_aisb:
789 kfree(bucket);
790 return rc;
791}
792
793static void zpci_irq_exit(void)
794{
795 free_page((unsigned long) bucket->alloc);
796 free_page((unsigned long) bucket->aisb);
797 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
798 isc_unregister(PCI_ISC);
799 kfree(bucket);
800}
801
802static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
803 unsigned long flags, int domain)
804{
805 struct resource *r;
806 char *name;
807 int rc;
808
809 r = kzalloc(sizeof(*r), GFP_KERNEL);
810 if (!r)
811 return ERR_PTR(-ENOMEM);
812 r->start = start;
813 r->end = r->start + size - 1;
814 r->flags = flags;
815 r->parent = &iomem_resource;
816 name = kmalloc(18, GFP_KERNEL);
817 if (!name) {
818 kfree(r);
819 return ERR_PTR(-ENOMEM);
820 }
821 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
822 r->name = name;
823
824 rc = request_resource(&iomem_resource, r);
825 if (rc)
826 pr_debug("request resource %pR failed\n", r);
827 return r;
828}
829
830static int zpci_alloc_iomap(struct zpci_dev *zdev)
831{
832 int entry;
833
834 spin_lock(&zpci_iomap_lock);
835 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
836 if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
837 spin_unlock(&zpci_iomap_lock);
838 return -ENOSPC;
839 }
840 set_bit(entry, zpci_iomap);
841 spin_unlock(&zpci_iomap_lock);
842 return entry;
843}
844
845static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
846{
847 spin_lock(&zpci_iomap_lock);
848 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
849 clear_bit(entry, zpci_iomap);
850 spin_unlock(&zpci_iomap_lock);
851}
852
853static int zpci_create_device_bus(struct zpci_dev *zdev)
854{
855 struct resource *res;
856 LIST_HEAD(resources);
857 int i;
858
859 /* allocate mapping entry for each used bar */
860 for (i = 0; i < PCI_BAR_COUNT; i++) {
861 unsigned long addr, size, flags;
862 int entry;
863
864 if (!zdev->bars[i].size)
865 continue;
866 entry = zpci_alloc_iomap(zdev);
867 if (entry < 0)
868 return entry;
869 zdev->bars[i].map_idx = entry;
870
871 /* only MMIO is supported */
872 flags = IORESOURCE_MEM;
873 if (zdev->bars[i].val & 8)
874 flags |= IORESOURCE_PREFETCH;
875 if (zdev->bars[i].val & 4)
876 flags |= IORESOURCE_MEM_64;
877
878 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
879
880 size = 1UL << zdev->bars[i].size;
881
882 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
883 if (IS_ERR(res)) {
884 zpci_free_iomap(zdev, entry);
885 return PTR_ERR(res);
886 }
887 pci_add_resource(&resources, res);
888 }
889
890 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
891 zdev, &resources);
892 if (!zdev->bus)
893 return -EIO;
894
895 zdev->bus->max_bus_speed = zdev->max_bus_speed;
896 return 0;
897}
898
899static int zpci_alloc_domain(struct zpci_dev *zdev)
900{
901 spin_lock(&zpci_domain_lock);
902 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
903 if (zdev->domain == ZPCI_NR_DEVICES) {
904 spin_unlock(&zpci_domain_lock);
905 return -ENOSPC;
906 }
907 set_bit(zdev->domain, zpci_domain);
908 spin_unlock(&zpci_domain_lock);
909 return 0;
910}
911
912static void zpci_free_domain(struct zpci_dev *zdev)
913{
914 spin_lock(&zpci_domain_lock);
915 clear_bit(zdev->domain, zpci_domain);
916 spin_unlock(&zpci_domain_lock);
917}
918
919int zpci_enable_device(struct zpci_dev *zdev)
920{
921 int rc;
922
923 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
924 if (rc)
925 goto out;
926 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
927
928 rc = zpci_dma_init_device(zdev);
929 if (rc)
930 goto out_dma;
931 return 0;
932
933out_dma:
934 clp_disable_fh(zdev);
935out:
936 return rc;
937}
938EXPORT_SYMBOL_GPL(zpci_enable_device);
939
940int zpci_create_device(struct zpci_dev *zdev)
941{
942 int rc;
943
944 rc = zpci_alloc_domain(zdev);
945 if (rc)
946 goto out;
947
948 rc = zpci_create_device_bus(zdev);
949 if (rc)
950 goto out_bus;
951
952 mutex_lock(&zpci_list_lock);
953 list_add_tail(&zdev->entry, &zpci_list);
954 if (hotplug_ops.create_slot)
955 hotplug_ops.create_slot(zdev);
956 mutex_unlock(&zpci_list_lock);
957
958 if (zdev->state == ZPCI_FN_STATE_STANDBY)
959 return 0;
960
961 rc = zpci_enable_device(zdev);
962 if (rc)
963 goto out_start;
964 return 0;
965
966out_start:
967 mutex_lock(&zpci_list_lock);
968 list_del(&zdev->entry);
969 if (hotplug_ops.remove_slot)
970 hotplug_ops.remove_slot(zdev);
971 mutex_unlock(&zpci_list_lock);
972out_bus:
973 zpci_free_domain(zdev);
974out:
975 return rc;
976}
977
978void zpci_stop_device(struct zpci_dev *zdev)
979{
980 zpci_dma_exit_device(zdev);
981 /*
982 * Note: SCLP disables fh via set-pci-fn so don't
983 * do that here.
984 */
985}
986EXPORT_SYMBOL_GPL(zpci_stop_device);
987
988int zpci_scan_device(struct zpci_dev *zdev)
989{
990 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
991 if (!zdev->pdev) {
992 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
993 zdev->fid);
994 goto out;
995 }
996
997 zpci_map_resources(zdev);
998 pci_bus_add_devices(zdev->bus);
999
1000 /* now that pdev was added to the bus mark it as used */
1001 zdev->state = ZPCI_FN_STATE_ONLINE;
1002 return 0;
1003
1004out:
1005 zpci_dma_exit_device(zdev);
1006 clp_disable_fh(zdev);
1007 return -EIO;
1008}
1009EXPORT_SYMBOL_GPL(zpci_scan_device);
1010
1011static inline int barsize(u8 size)
1012{
1013 return (size) ? (1 << size) >> 10 : 0;
1014}
1015
1016static int zpci_mem_init(void)
1017{
1018 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
1019 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
1020 if (!zdev_irq_cache)
1021 goto error_zdev;
1022
1023 /* TODO: use realloc */
1024 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
1025 GFP_KERNEL);
1026 if (!zpci_iomap_start)
1027 goto error_iomap;
1028 return 0;
1029
1030error_iomap:
1031 kmem_cache_destroy(zdev_irq_cache);
1032error_zdev:
1033 return -ENOMEM;
1034}
1035
1036static void zpci_mem_exit(void)
1037{
1038 kfree(zpci_iomap_start);
1039 kmem_cache_destroy(zdev_irq_cache);
1040}
1041
1042unsigned int pci_probe = 1;
1043EXPORT_SYMBOL_GPL(pci_probe);
1044
1045char * __init pcibios_setup(char *str)
1046{
1047 if (!strcmp(str, "off")) {
1048 pci_probe = 0;
1049 return NULL;
1050 }
1051 return str;
1052}
1053
1054static int __init pci_base_init(void)
1055{
1056 int rc;
1057
1058 if (!pci_probe)
1059 return 0;
1060
1061 if (!test_facility(2) || !test_facility(69)
1062 || !test_facility(71) || !test_facility(72))
1063 return 0;
1064
1065 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
1066 test_facility(69), test_facility(70),
1067 test_facility(71));
1068
1069 rc = zpci_mem_init();
1070 if (rc)
1071 goto out_mem;
1072
1073 rc = zpci_msihash_init();
1074 if (rc)
1075 goto out_hash;
1076
1077 rc = zpci_irq_init();
1078 if (rc)
1079 goto out_irq;
1080
1081 rc = zpci_dma_init();
1082 if (rc)
1083 goto out_dma;
1084
1085 rc = clp_find_pci_devices();
1086 if (rc)
1087 goto out_find;
1088
1089 zpci_scan_devices();
1090 return 0;
1091
1092out_find:
1093 zpci_dma_exit();
1094out_dma:
1095 zpci_irq_exit();
1096out_irq:
1097 zpci_msihash_exit();
1098out_hash:
1099 zpci_mem_exit();
1100out_mem:
1101 return rc;
1102}
1103subsys_initcall(pci_base_init);
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
new file mode 100644
index 000000000000..7f4ce8d874a4
--- /dev/null
+++ b/arch/s390/pci/pci_clp.c
@@ -0,0 +1,324 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/err.h>
14#include <linux/delay.h>
15#include <linux/pci.h>
16#include <asm/pci_clp.h>
17
18/*
19 * Call Logical Processor
20 * Retry logic is handled by the caller.
21 */
22static inline u8 clp_instr(void *req)
23{
24 u64 ilpm;
25 u8 cc;
26
27 asm volatile (
28 " .insn rrf,0xb9a00000,%[ilpm],%[req],0x0,0x2\n"
29 " ipm %[cc]\n"
30 " srl %[cc],28\n"
31 : [cc] "=d" (cc), [ilpm] "=d" (ilpm)
32 : [req] "a" (req)
33 : "cc", "memory");
34 return cc;
35}
36
37static void *clp_alloc_block(void)
38{
39 struct page *page = alloc_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
40 return (page) ? page_address(page) : NULL;
41}
42
43static void clp_free_block(void *ptr)
44{
45 free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
46}
47
48static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
49 struct clp_rsp_query_pci_grp *response)
50{
51 zdev->tlb_refresh = response->refresh;
52 zdev->dma_mask = response->dasm;
53 zdev->msi_addr = response->msia;
54
55 pr_debug("Supported number of MSI vectors: %u\n", response->noi);
56 switch (response->version) {
57 case 1:
58 zdev->max_bus_speed = PCIE_SPEED_5_0GT;
59 break;
60 default:
61 zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
62 break;
63 }
64}
65
66static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
67{
68 struct clp_req_rsp_query_pci_grp *rrb;
69 int rc;
70
71 rrb = clp_alloc_block();
72 if (!rrb)
73 return -ENOMEM;
74
75 memset(rrb, 0, sizeof(*rrb));
76 rrb->request.hdr.len = sizeof(rrb->request);
77 rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
78 rrb->response.hdr.len = sizeof(rrb->response);
79 rrb->request.pfgid = pfgid;
80
81 rc = clp_instr(rrb);
82 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
83 clp_store_query_pci_fngrp(zdev, &rrb->response);
84 else {
85 pr_err("Query PCI FNGRP failed with response: %x cc: %d\n",
86 rrb->response.hdr.rsp, rc);
87 rc = -EIO;
88 }
89 clp_free_block(rrb);
90 return rc;
91}
92
93static int clp_store_query_pci_fn(struct zpci_dev *zdev,
94 struct clp_rsp_query_pci *response)
95{
96 int i;
97
98 for (i = 0; i < PCI_BAR_COUNT; i++) {
99 zdev->bars[i].val = le32_to_cpu(response->bar[i]);
100 zdev->bars[i].size = response->bar_size[i];
101 }
102 zdev->start_dma = response->sdma;
103 zdev->end_dma = response->edma;
104 zdev->pchid = response->pchid;
105 zdev->pfgid = response->pfgid;
106 return 0;
107}
108
109static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
110{
111 struct clp_req_rsp_query_pci *rrb;
112 int rc;
113
114 rrb = clp_alloc_block();
115 if (!rrb)
116 return -ENOMEM;
117
118 memset(rrb, 0, sizeof(*rrb));
119 rrb->request.hdr.len = sizeof(rrb->request);
120 rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
121 rrb->response.hdr.len = sizeof(rrb->response);
122 rrb->request.fh = fh;
123
124 rc = clp_instr(rrb);
125 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
126 rc = clp_store_query_pci_fn(zdev, &rrb->response);
127 if (rc)
128 goto out;
129 if (rrb->response.pfgid)
130 rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
131 } else {
132 pr_err("Query PCI failed with response: %x cc: %d\n",
133 rrb->response.hdr.rsp, rc);
134 rc = -EIO;
135 }
136out:
137 clp_free_block(rrb);
138 return rc;
139}
140
141int clp_add_pci_device(u32 fid, u32 fh, int configured)
142{
143 struct zpci_dev *zdev;
144 int rc;
145
146 zdev = zpci_alloc_device();
147 if (IS_ERR(zdev))
148 return PTR_ERR(zdev);
149
150 zdev->fh = fh;
151 zdev->fid = fid;
152
153 /* Query function properties and update zdev */
154 rc = clp_query_pci_fn(zdev, fh);
155 if (rc)
156 goto error;
157
158 if (configured)
159 zdev->state = ZPCI_FN_STATE_CONFIGURED;
160 else
161 zdev->state = ZPCI_FN_STATE_STANDBY;
162
163 rc = zpci_create_device(zdev);
164 if (rc)
165 goto error;
166 return 0;
167
168error:
169 zpci_free_device(zdev);
170 return rc;
171}
172
173/*
174 * Enable/Disable a given PCI function defined by its function handle.
175 */
176static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
177{
178 struct clp_req_rsp_set_pci *rrb;
179 int rc, retries = 1000;
180
181 rrb = clp_alloc_block();
182 if (!rrb)
183 return -ENOMEM;
184
185 do {
186 memset(rrb, 0, sizeof(*rrb));
187 rrb->request.hdr.len = sizeof(rrb->request);
188 rrb->request.hdr.cmd = CLP_SET_PCI_FN;
189 rrb->response.hdr.len = sizeof(rrb->response);
190 rrb->request.fh = *fh;
191 rrb->request.oc = command;
192 rrb->request.ndas = nr_dma_as;
193
194 rc = clp_instr(rrb);
195 if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
196 retries--;
197 if (retries < 0)
198 break;
199 msleep(1);
200 }
201 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
202
203 if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
204 *fh = rrb->response.fh;
205 else {
206 pr_err("Set PCI FN failed with response: %x cc: %d\n",
207 rrb->response.hdr.rsp, rc);
208 rc = -EIO;
209 }
210 clp_free_block(rrb);
211 return rc;
212}
213
214int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
215{
216 u32 fh = zdev->fh;
217 int rc;
218
219 rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
220 if (!rc)
221 /* Success -> store enabled handle in zdev */
222 zdev->fh = fh;
223 return rc;
224}
225
226int clp_disable_fh(struct zpci_dev *zdev)
227{
228 u32 fh = zdev->fh;
229 int rc;
230
231 if (!zdev_enabled(zdev))
232 return 0;
233
234 dev_info(&zdev->pdev->dev, "disabling fn handle: 0x%x\n", fh);
235 rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
236 if (!rc)
237 /* Success -> store disabled handle in zdev */
238 zdev->fh = fh;
239 else
240 dev_err(&zdev->pdev->dev,
241 "Failed to disable fn handle: 0x%x\n", fh);
242 return rc;
243}
244
245static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry)
246{
247 int present, rc;
248
249 if (!entry->vendor_id)
250 return;
251
252 /* TODO: be a little bit more scalable */
253 present = zpci_fid_present(entry->fid);
254
255 if (present)
256 pr_debug("%s: device %x already present\n", __func__, entry->fid);
257
258 /* skip already used functions */
259 if (present && entry->config_state)
260 return;
261
262 /* aev 306: function moved to stand-by state */
263 if (present && !entry->config_state) {
264 /*
265 * The handle is already disabled, that means no iota/irq freeing via
266 * the firmware interfaces anymore. Need to free resources manually
267 * (DMA memory, debug, sysfs)...
268 */
269 zpci_stop_device(get_zdev_by_fid(entry->fid));
270 return;
271 }
272
273 rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
274 if (rc)
275 pr_err("Failed to add fid: 0x%x\n", entry->fid);
276}
277
278int clp_find_pci_devices(void)
279{
280 struct clp_req_rsp_list_pci *rrb;
281 u64 resume_token = 0;
282 int entries, i, rc;
283
284 rrb = clp_alloc_block();
285 if (!rrb)
286 return -ENOMEM;
287
288 do {
289 memset(rrb, 0, sizeof(*rrb));
290 rrb->request.hdr.len = sizeof(rrb->request);
291 rrb->request.hdr.cmd = CLP_LIST_PCI;
292 /* store as many entries as possible */
293 rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
294 rrb->request.resume_token = resume_token;
295
296 /* Get PCI function handle list */
297 rc = clp_instr(rrb);
298 if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
299 pr_err("List PCI failed with response: 0x%x cc: %d\n",
300 rrb->response.hdr.rsp, rc);
301 rc = -EIO;
302 goto out;
303 }
304
305 WARN_ON_ONCE(rrb->response.entry_size !=
306 sizeof(struct clp_fh_list_entry));
307
308 entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
309 rrb->response.entry_size;
310 pr_info("Detected number of PCI functions: %u\n", entries);
311
312 /* Store the returned resume token as input for the next call */
313 resume_token = rrb->response.resume_token;
314
315 for (i = 0; i < entries; i++)
316 clp_check_pcifn_entry(&rrb->response.fh_list[i]);
317 } while (resume_token);
318
319 pr_debug("Maximum number of supported PCI functions: %u\n",
320 rrb->response.max_fn);
321out:
322 clp_free_block(rrb);
323 return rc;
324}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
new file mode 100644
index 000000000000..c64b4b294b0a
--- /dev/null
+++ b/arch/s390/pci/pci_dma.c
@@ -0,0 +1,506 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h>
13#include <linux/pci.h>
14#include <asm/pci_dma.h>
15
16static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
17
18static struct kmem_cache *dma_region_table_cache;
19static struct kmem_cache *dma_page_table_cache;
20
21static unsigned long *dma_alloc_cpu_table(void)
22{
23 unsigned long *table, *entry;
24
25 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
26 if (!table)
27 return NULL;
28
29 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
30 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
31 return table;
32}
33
34static void dma_free_cpu_table(void *table)
35{
36 kmem_cache_free(dma_region_table_cache, table);
37}
38
39static unsigned long *dma_alloc_page_table(void)
40{
41 unsigned long *table, *entry;
42
43 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
44 if (!table)
45 return NULL;
46
47 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
48 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
49 return table;
50}
51
52static void dma_free_page_table(void *table)
53{
54 kmem_cache_free(dma_page_table_cache, table);
55}
56
57static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
58{
59 unsigned long *sto;
60
61 if (reg_entry_isvalid(*entry))
62 sto = get_rt_sto(*entry);
63 else {
64 sto = dma_alloc_cpu_table();
65 if (!sto)
66 return NULL;
67
68 set_rt_sto(entry, sto);
69 validate_rt_entry(entry);
70 entry_clr_protected(entry);
71 }
72 return sto;
73}
74
75static unsigned long *dma_get_page_table_origin(unsigned long *entry)
76{
77 unsigned long *pto;
78
79 if (reg_entry_isvalid(*entry))
80 pto = get_st_pto(*entry);
81 else {
82 pto = dma_alloc_page_table();
83 if (!pto)
84 return NULL;
85 set_st_pto(entry, pto);
86 validate_st_entry(entry);
87 entry_clr_protected(entry);
88 }
89 return pto;
90}
91
92static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
93{
94 unsigned long *sto, *pto;
95 unsigned int rtx, sx, px;
96
97 rtx = calc_rtx(dma_addr);
98 sto = dma_get_seg_table_origin(&rto[rtx]);
99 if (!sto)
100 return NULL;
101
102 sx = calc_sx(dma_addr);
103 pto = dma_get_page_table_origin(&sto[sx]);
104 if (!pto)
105 return NULL;
106
107 px = calc_px(dma_addr);
108 return &pto[px];
109}
110
111static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
112 dma_addr_t dma_addr, int flags)
113{
114 unsigned long *entry;
115
116 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
117 if (!entry) {
118 WARN_ON_ONCE(1);
119 return;
120 }
121
122 if (flags & ZPCI_PTE_INVALID) {
123 invalidate_pt_entry(entry);
124 return;
125 } else {
126 set_pt_pfaa(entry, page_addr);
127 validate_pt_entry(entry);
128 }
129
130 if (flags & ZPCI_TABLE_PROTECTED)
131 entry_set_protected(entry);
132 else
133 entry_clr_protected(entry);
134}
135
136static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
137 dma_addr_t dma_addr, size_t size, int flags)
138{
139 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
140 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
141 dma_addr_t start_dma_addr = dma_addr;
142 unsigned long irq_flags;
143 int i, rc = 0;
144
145 if (!nr_pages)
146 return -EINVAL;
147
148 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
149 if (!zdev->dma_table) {
150 dev_err(&zdev->pdev->dev, "Missing DMA table\n");
151 goto no_refresh;
152 }
153
154 for (i = 0; i < nr_pages; i++) {
155 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
156 page_addr += PAGE_SIZE;
157 dma_addr += PAGE_SIZE;
158 }
159
160 /*
161 * rpcit is not required to establish new translations when previously
162 * invalid translation-table entries are validated, however it is
163 * required when altering previously valid entries.
164 */
165 if (!zdev->tlb_refresh &&
166 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
167 /*
168 * TODO: also need to check that the old entry is indeed INVALID
169 * and not only for one page but for the whole range...
170 * -> now we WARN_ON in that case but with lazy unmap that
171 * needs to be redone!
172 */
173 goto no_refresh;
174 rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
175 nr_pages * PAGE_SIZE);
176
177no_refresh:
178 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
179 return rc;
180}
181
182static void dma_free_seg_table(unsigned long entry)
183{
184 unsigned long *sto = get_rt_sto(entry);
185 int sx;
186
187 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
188 if (reg_entry_isvalid(sto[sx]))
189 dma_free_page_table(get_st_pto(sto[sx]));
190
191 dma_free_cpu_table(sto);
192}
193
194static void dma_cleanup_tables(struct zpci_dev *zdev)
195{
196 unsigned long *table;
197 int rtx;
198
199 if (!zdev || !zdev->dma_table)
200 return;
201
202 table = zdev->dma_table;
203 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
204 if (reg_entry_isvalid(table[rtx]))
205 dma_free_seg_table(table[rtx]);
206
207 dma_free_cpu_table(table);
208 zdev->dma_table = NULL;
209}
210
211static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
212 int size)
213{
214 unsigned long boundary_size = 0x1000000;
215
216 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
217 start, size, 0, boundary_size, 0);
218}
219
220static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
221{
222 unsigned long offset, flags;
223
224 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
225 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
226 if (offset == -1)
227 offset = __dma_alloc_iommu(zdev, 0, size);
228
229 if (offset != -1) {
230 zdev->next_bit = offset + size;
231 if (zdev->next_bit >= zdev->iommu_pages)
232 zdev->next_bit = 0;
233 }
234 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
235 return offset;
236}
237
238static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
239{
240 unsigned long flags;
241
242 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
243 if (!zdev->iommu_bitmap)
244 goto out;
245 bitmap_clear(zdev->iommu_bitmap, offset, size);
246 if (offset >= zdev->next_bit)
247 zdev->next_bit = offset + size;
248out:
249 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
250}
251
252int dma_set_mask(struct device *dev, u64 mask)
253{
254 if (!dev->dma_mask || !dma_supported(dev, mask))
255 return -EIO;
256
257 *dev->dma_mask = mask;
258 return 0;
259}
260EXPORT_SYMBOL_GPL(dma_set_mask);
261
262static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263 unsigned long offset, size_t size,
264 enum dma_data_direction direction,
265 struct dma_attrs *attrs)
266{
267 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
268 unsigned long nr_pages, iommu_page_index;
269 unsigned long pa = page_to_phys(page) + offset;
270 int flags = ZPCI_PTE_VALID;
271 dma_addr_t dma_addr;
272
273 WARN_ON_ONCE(offset > PAGE_SIZE);
274
275 /* This rounds up number of pages based on size and offset */
276 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
277 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
278 if (iommu_page_index == -1)
279 goto out_err;
280
281 /* Use rounded up size */
282 size = nr_pages * PAGE_SIZE;
283
284 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
285 if (dma_addr + size > zdev->end_dma) {
286 dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
287 dma_addr, size, zdev->end_dma);
288 goto out_free;
289 }
290
291 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
292 flags |= ZPCI_TABLE_PROTECTED;
293
294 if (!dma_update_trans(zdev, pa, dma_addr, size, flags))
295 return dma_addr + offset;
296
297out_free:
298 dma_free_iommu(zdev, iommu_page_index, nr_pages);
299out_err:
300 dev_err(dev, "Failed to map addr: %lx\n", pa);
301 return DMA_ERROR_CODE;
302}
303
304static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
305 size_t size, enum dma_data_direction direction,
306 struct dma_attrs *attrs)
307{
308 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
309 unsigned long iommu_page_index;
310 int npages;
311
312 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
313 dma_addr = dma_addr & PAGE_MASK;
314 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
315 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
316 dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
317
318 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
319 dma_free_iommu(zdev, iommu_page_index, npages);
320}
321
322static void *s390_dma_alloc(struct device *dev, size_t size,
323 dma_addr_t *dma_handle, gfp_t flag,
324 struct dma_attrs *attrs)
325{
326 struct page *page;
327 unsigned long pa;
328 dma_addr_t map;
329
330 size = PAGE_ALIGN(size);
331 page = alloc_pages(flag, get_order(size));
332 if (!page)
333 return NULL;
334 pa = page_to_phys(page);
335 memset((void *) pa, 0, size);
336
337 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
338 size, DMA_BIDIRECTIONAL, NULL);
339 if (dma_mapping_error(dev, map)) {
340 free_pages(pa, get_order(size));
341 return NULL;
342 }
343
344 if (dma_handle)
345 *dma_handle = map;
346 return (void *) pa;
347}
348
349static void s390_dma_free(struct device *dev, size_t size,
350 void *pa, dma_addr_t dma_handle,
351 struct dma_attrs *attrs)
352{
353 s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
354 DMA_BIDIRECTIONAL, NULL);
355 free_pages((unsigned long) pa, get_order(size));
356}
357
358static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
359 int nr_elements, enum dma_data_direction dir,
360 struct dma_attrs *attrs)
361{
362 int mapped_elements = 0;
363 struct scatterlist *s;
364 int i;
365
366 for_each_sg(sg, s, nr_elements, i) {
367 struct page *page = sg_page(s);
368 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
369 s->length, dir, NULL);
370 if (!dma_mapping_error(dev, s->dma_address)) {
371 s->dma_length = s->length;
372 mapped_elements++;
373 } else
374 goto unmap;
375 }
376out:
377 return mapped_elements;
378
379unmap:
380 for_each_sg(sg, s, mapped_elements, i) {
381 if (s->dma_address)
382 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
383 dir, NULL);
384 s->dma_address = 0;
385 s->dma_length = 0;
386 }
387 mapped_elements = 0;
388 goto out;
389}
390
391static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
392 int nr_elements, enum dma_data_direction dir,
393 struct dma_attrs *attrs)
394{
395 struct scatterlist *s;
396 int i;
397
398 for_each_sg(sg, s, nr_elements, i) {
399 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
400 s->dma_address = 0;
401 s->dma_length = 0;
402 }
403}
404
405int zpci_dma_init_device(struct zpci_dev *zdev)
406{
407 unsigned int bitmap_order;
408 int rc;
409
410 spin_lock_init(&zdev->iommu_bitmap_lock);
411 spin_lock_init(&zdev->dma_table_lock);
412
413 zdev->dma_table = dma_alloc_cpu_table();
414 if (!zdev->dma_table) {
415 rc = -ENOMEM;
416 goto out_clean;
417 }
418
419 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
420 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
421 bitmap_order = get_order(zdev->iommu_pages / 8);
422 pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
423 zdev->iommu_size, zdev->iommu_pages, bitmap_order);
424
425 zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
426 bitmap_order);
427 if (!zdev->iommu_bitmap) {
428 rc = -ENOMEM;
429 goto out_reg;
430 }
431
432 rc = zpci_register_ioat(zdev,
433 0,
434 zdev->start_dma + PAGE_OFFSET,
435 zdev->start_dma + zdev->iommu_size - 1,
436 (u64) zdev->dma_table);
437 if (rc)
438 goto out_reg;
439 return 0;
440
441out_reg:
442 dma_free_cpu_table(zdev->dma_table);
443out_clean:
444 return rc;
445}
446
447void zpci_dma_exit_device(struct zpci_dev *zdev)
448{
449 zpci_unregister_ioat(zdev, 0);
450 dma_cleanup_tables(zdev);
451 free_pages((unsigned long) zdev->iommu_bitmap,
452 get_order(zdev->iommu_pages / 8));
453 zdev->iommu_bitmap = NULL;
454 zdev->next_bit = 0;
455}
456
457static int __init dma_alloc_cpu_table_caches(void)
458{
459 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
460 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
461 0, NULL);
462 if (!dma_region_table_cache)
463 return -ENOMEM;
464
465 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
466 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
467 0, NULL);
468 if (!dma_page_table_cache) {
469 kmem_cache_destroy(dma_region_table_cache);
470 return -ENOMEM;
471 }
472 return 0;
473}
474
475int __init zpci_dma_init(void)
476{
477 return dma_alloc_cpu_table_caches();
478}
479
480void zpci_dma_exit(void)
481{
482 kmem_cache_destroy(dma_page_table_cache);
483 kmem_cache_destroy(dma_region_table_cache);
484}
485
486#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
487
488static int __init dma_debug_do_init(void)
489{
490 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
491 return 0;
492}
493fs_initcall(dma_debug_do_init);
494
495struct dma_map_ops s390_dma_ops = {
496 .alloc = s390_dma_alloc,
497 .free = s390_dma_free,
498 .map_sg = s390_dma_map_sg,
499 .unmap_sg = s390_dma_unmap_sg,
500 .map_page = s390_dma_map_pages,
501 .unmap_page = s390_dma_unmap_pages,
502 /* if we support direct DMA this must be conditional */
503 .is_phys = 0,
504 /* dma_supported is unconditionally true without a callback */
505};
506EXPORT_SYMBOL_GPL(s390_dma_ops);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
new file mode 100644
index 000000000000..dbed8cd3370c
--- /dev/null
+++ b/arch/s390/pci/pci_event.c
@@ -0,0 +1,93 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/pci.h>
13
14/* Content Code Description for PCI Function Error */
15struct zpci_ccdf_err {
16 u32 reserved1;
17 u32 fh; /* function handle */
18 u32 fid; /* function id */
19 u32 ett : 4; /* expected table type */
20 u32 mvn : 12; /* MSI vector number */
21 u32 dmaas : 8; /* DMA address space */
22 u32 : 6;
23 u32 q : 1; /* event qualifier */
24 u32 rw : 1; /* read/write */
25 u64 faddr; /* failing address */
26 u32 reserved3;
27 u16 reserved4;
28 u16 pec; /* PCI event code */
29} __packed;
30
31/* Content Code Description for PCI Function Availability */
32struct zpci_ccdf_avail {
33 u32 reserved1;
34 u32 fh; /* function handle */
35 u32 fid; /* function id */
36 u32 reserved2;
37 u32 reserved3;
38 u32 reserved4;
39 u32 reserved5;
40 u16 reserved6;
41 u16 pec; /* PCI event code */
42} __packed;
43
44static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
45{
46 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
47
48 dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
49}
50
51static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
52{
53 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
54
55 pr_err("%s%s: availability event: fh: 0x%x fid: 0x%x event code: 0x%x reason:",
56 (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
57 (zdev) ? dev_name(&zdev->pdev->dev) : "?",
58 ccdf->fh, ccdf->fid, ccdf->pec);
59 print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
60 16, 1, ccdf, sizeof(*ccdf), false);
61
62 switch (ccdf->pec) {
63 case 0x0301:
64 zpci_enable_device(zdev);
65 break;
66 case 0x0302:
67 clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
68 break;
69 case 0x0306:
70 clp_find_pci_devices();
71 break;
72 default:
73 break;
74 }
75}
76
77void zpci_event_error(void *data)
78{
79 struct zpci_ccdf_err *ccdf = data;
80 struct zpci_dev *zdev;
81
82 zpci_event_log_err(ccdf);
83 zdev = get_zdev_by_fid(ccdf->fid);
84 if (!zdev) {
85 pr_err("Error event for unknown fid: %x", ccdf->fid);
86 return;
87 }
88}
89
90void zpci_event_availability(void *data)
91{
92 zpci_event_log_avail(data);
93}
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
new file mode 100644
index 000000000000..90fd3482b9e2
--- /dev/null
+++ b/arch/s390/pci/pci_msi.c
@@ -0,0 +1,141 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/err.h>
13#include <linux/rculist.h>
14#include <linux/hash.h>
15#include <linux/pci.h>
16#include <linux/msi.h>
17#include <asm/hw_irq.h>
18
19/* mapping of irq numbers to msi_desc */
20static struct hlist_head *msi_hash;
21static unsigned int msihash_shift = 6;
22#define msi_hashfn(nr) hash_long(nr, msihash_shift)
23
24static DEFINE_SPINLOCK(msi_map_lock);
25
26struct msi_desc *__irq_get_msi_desc(unsigned int irq)
27{
28 struct hlist_node *entry;
29 struct msi_map *map;
30
31 hlist_for_each_entry_rcu(map, entry,
32 &msi_hash[msi_hashfn(irq)], msi_chain)
33 if (map->irq == irq)
34 return map->msi;
35 return NULL;
36}
37
38int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
39{
40 if (msi->msi_attrib.is_msix) {
41 int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
42 PCI_MSIX_ENTRY_VECTOR_CTRL;
43 msi->masked = readl(msi->mask_base + offset);
44 writel(flag, msi->mask_base + offset);
45 } else {
46 if (msi->msi_attrib.maskbit) {
47 int pos;
48 u32 mask_bits;
49
50 pos = (long) msi->mask_base;
51 pci_read_config_dword(msi->dev, pos, &mask_bits);
52 mask_bits &= ~(mask);
53 mask_bits |= flag & mask;
54 pci_write_config_dword(msi->dev, pos, mask_bits);
55 } else {
56 return 0;
57 }
58 }
59
60 msi->msi_attrib.maskbit = !!flag;
61 return 1;
62}
63
64int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
65 unsigned int nr, int offset)
66{
67 struct msi_map *map;
68 struct msi_msg msg;
69 int rc;
70
71 map = kmalloc(sizeof(*map), GFP_KERNEL);
72 if (map == NULL)
73 return -ENOMEM;
74
75 map->irq = nr;
76 map->msi = msi;
77 zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
78
79 pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
80 __func__, nr, msi_hashfn(nr));
81 hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
82
83 spin_lock(&msi_map_lock);
84 rc = irq_set_msi_desc(nr, msi);
85 if (rc) {
86 spin_unlock(&msi_map_lock);
87 hlist_del_rcu(&map->msi_chain);
88 kfree(map);
89 zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
90 return rc;
91 }
92 spin_unlock(&msi_map_lock);
93
94 msg.data = nr - offset;
95 msg.address_lo = zdev->msi_addr & 0xffffffff;
96 msg.address_hi = zdev->msi_addr >> 32;
97 write_msi_msg(nr, &msg);
98 return 0;
99}
100
101void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
102{
103 int irq = msi->irq & ZPCI_MSI_MASK;
104 struct msi_map *map;
105
106 msi->msg.address_lo = 0;
107 msi->msg.address_hi = 0;
108 msi->msg.data = 0;
109 msi->irq = 0;
110 zpci_msi_set_mask_bits(msi, 1, 1);
111
112 spin_lock(&msi_map_lock);
113 map = zdev->msi_map[irq];
114 hlist_del_rcu(&map->msi_chain);
115 kfree(map);
116 zdev->msi_map[irq] = NULL;
117 spin_unlock(&msi_map_lock);
118}
119
120/*
121 * The msi hash table has 256 entries which is good for 4..20
122 * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
123 * the hash table size adjustable later.
124 */
125int __init zpci_msihash_init(void)
126{
127 unsigned int i;
128
129 msi_hash = kmalloc(256 * sizeof(*msi_hash), GFP_KERNEL);
130 if (!msi_hash)
131 return -ENOMEM;
132
133 for (i = 0; i < (1U << msihash_shift); i++)
134 INIT_HLIST_HEAD(&msi_hash[i]);
135 return 0;
136}
137
138void __init zpci_msihash_exit(void)
139{
140 kfree(msi_hash);
141}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
new file mode 100644
index 000000000000..a42cce69d0a0
--- /dev/null
+++ b/arch/s390/pci/pci_sysfs.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/stat.h>
13#include <linux/pci.h>
14
15static ssize_t show_fid(struct device *dev, struct device_attribute *attr,
16 char *buf)
17{
18 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
19
20 sprintf(buf, "0x%08x\n", zdev->fid);
21 return strlen(buf);
22}
23static DEVICE_ATTR(function_id, S_IRUGO, show_fid, NULL);
24
25static ssize_t show_fh(struct device *dev, struct device_attribute *attr,
26 char *buf)
27{
28 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
29
30 sprintf(buf, "0x%08x\n", zdev->fh);
31 return strlen(buf);
32}
33static DEVICE_ATTR(function_handle, S_IRUGO, show_fh, NULL);
34
35static ssize_t show_pchid(struct device *dev, struct device_attribute *attr,
36 char *buf)
37{
38 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
39
40 sprintf(buf, "0x%04x\n", zdev->pchid);
41 return strlen(buf);
42}
43static DEVICE_ATTR(pchid, S_IRUGO, show_pchid, NULL);
44
45static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
46 char *buf)
47{
48 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
49
50 sprintf(buf, "0x%02x\n", zdev->pfgid);
51 return strlen(buf);
52}
53static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
54
55static struct device_attribute *zpci_dev_attrs[] = {
56 &dev_attr_function_id,
57 &dev_attr_function_handle,
58 &dev_attr_pchid,
59 &dev_attr_pfgid,
60 NULL,
61};
62
63int zpci_sysfs_add_device(struct device *dev)
64{
65 int i, rc = 0;
66
67 for (i = 0; zpci_dev_attrs[i]; i++) {
68 rc = device_create_file(dev, zpci_dev_attrs[i]);
69 if (rc)
70 goto error;
71 }
72 return 0;
73
74error:
75 while (--i >= 0)
76 device_remove_file(dev, zpci_dev_attrs[i]);
77 return rc;
78}
79
80void zpci_sysfs_remove_device(struct device *dev)
81{
82 int i;
83
84 for (i = 0; zpci_dev_attrs[i]; i++)
85 device_remove_file(dev, zpci_dev_attrs[i]);
86}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index f34838839b08..29437eabe095 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,7 +1,7 @@
1config VGA_ARB 1config VGA_ARB
2 bool "VGA Arbitration" if EXPERT 2 bool "VGA Arbitration" if EXPERT
3 default y 3 default y
4 depends on PCI 4 depends on (PCI && !S390)
5 help 5 help
6 Some "legacy" VGA devices implemented on PCI typically have the same 6 Some "legacy" VGA devices implemented on PCI typically have the same
7 hard-decoded addresses as they did on ISA. When multiple PCI devices 7 hard-decoded addresses as they did on ISA. When multiple PCI devices
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index b0e46dede1a9..13e9e63a7266 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -151,4 +151,15 @@ config HOTPLUG_PCI_SGI
151 151
152 When in doubt, say N. 152 When in doubt, say N.
153 153
154config HOTPLUG_PCI_S390
155 tristate "System z PCI Hotplug Support"
156 depends on S390 && 64BIT
157 help
158 Say Y here if you want to use the System z PCI Hotplug
159 driver for PCI devices. Without this driver it is not
160 possible to access stand-by PCI functions nor to deconfigure
161 PCI functions.
162
163 When in doubt, say Y.
164
154endif # HOTPLUG_PCI 165endif # HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index c459cd4e39c2..47ec8c80e16d 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
18obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o 18obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
19obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o 19obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
20obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o 20obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
21obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
21 22
22# acpiphp_ibm extends acpiphp, so should be linked afterwards. 23# acpiphp_ibm extends acpiphp, so should be linked afterwards.
23 24
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
new file mode 100644
index 000000000000..dee68e0698e1
--- /dev/null
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -0,0 +1,252 @@
1/*
2 * PCI Hot Plug Controller Driver for System z
3 *
4 * Copyright 2012 IBM Corp.
5 *
6 * Author(s):
7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 */
9
10#define COMPONENT "zPCI hpc"
11#define pr_fmt(fmt) COMPONENT ": " fmt
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/slab.h>
16#include <linux/pci.h>
17#include <linux/pci_hotplug.h>
18#include <linux/init.h>
19#include <asm/sclp.h>
20
21#define SLOT_NAME_SIZE 10
22static LIST_HEAD(s390_hotplug_slot_list);
23
24MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
25MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
26MODULE_LICENSE("GPL");
27
28static int zpci_fn_configured(enum zpci_state state)
29{
30 return state == ZPCI_FN_STATE_CONFIGURED ||
31 state == ZPCI_FN_STATE_ONLINE;
32}
33
34/*
35 * struct slot - slot information for each *physical* slot
36 */
37struct slot {
38 struct list_head slot_list;
39 struct hotplug_slot *hotplug_slot;
40 struct zpci_dev *zdev;
41};
42
43static int enable_slot(struct hotplug_slot *hotplug_slot)
44{
45 struct slot *slot = hotplug_slot->private;
46 int rc;
47
48 if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
49 return -EIO;
50
51 rc = sclp_pci_configure(slot->zdev->fid);
52 if (!rc) {
53 slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
54 /* automatically scan the device after is was configured */
55 zpci_enable_device(slot->zdev);
56 zpci_scan_device(slot->zdev);
57 }
58 return rc;
59}
60
61static int disable_slot(struct hotplug_slot *hotplug_slot)
62{
63 struct slot *slot = hotplug_slot->private;
64 int rc;
65
66 if (!zpci_fn_configured(slot->zdev->state))
67 return -EIO;
68
69 /* TODO: we rely on the user to unbind/remove the device, is that plausible
70 * or do we need to trigger that here?
71 */
72 rc = sclp_pci_deconfigure(slot->zdev->fid);
73 if (!rc) {
74 /* Fixme: better call List-PCI to find the disabled FH
75 for the FID since the FH should be opaque... */
76 slot->zdev->fh &= 0x7fffffff;
77 slot->zdev->state = ZPCI_FN_STATE_STANDBY;
78 }
79 return rc;
80}
81
82static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
83{
84 struct slot *slot = hotplug_slot->private;
85
86 switch (slot->zdev->state) {
87 case ZPCI_FN_STATE_STANDBY:
88 *value = 0;
89 break;
90 default:
91 *value = 1;
92 break;
93 }
94 return 0;
95}
96
97static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
98{
99 /* if the slot exits it always contains a function */
100 *value = 1;
101 return 0;
102}
103
104static void release_slot(struct hotplug_slot *hotplug_slot)
105{
106 struct slot *slot = hotplug_slot->private;
107
108 pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
109 kfree(slot->hotplug_slot->info);
110 kfree(slot->hotplug_slot);
111 kfree(slot);
112}
113
114static struct hotplug_slot_ops s390_hotplug_slot_ops = {
115 .enable_slot = enable_slot,
116 .disable_slot = disable_slot,
117 .get_power_status = get_power_status,
118 .get_adapter_status = get_adapter_status,
119};
120
121static int init_pci_slot(struct zpci_dev *zdev)
122{
123 struct hotplug_slot *hotplug_slot;
124 struct hotplug_slot_info *info;
125 char name[SLOT_NAME_SIZE];
126 struct slot *slot;
127 int rc;
128
129 if (!zdev)
130 return 0;
131
132 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
133 if (!slot)
134 goto error;
135
136 hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
137 if (!hotplug_slot)
138 goto error_hp;
139 hotplug_slot->private = slot;
140
141 slot->hotplug_slot = hotplug_slot;
142 slot->zdev = zdev;
143
144 info = kzalloc(sizeof(*info), GFP_KERNEL);
145 if (!info)
146 goto error_info;
147 hotplug_slot->info = info;
148
149 hotplug_slot->ops = &s390_hotplug_slot_ops;
150 hotplug_slot->release = &release_slot;
151
152 get_power_status(hotplug_slot, &info->power_status);
153 get_adapter_status(hotplug_slot, &info->adapter_status);
154
155 snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
156 rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
157 ZPCI_DEVFN, name);
158 if (rc) {
159 pr_err("pci_hp_register failed with error %d\n", rc);
160 goto error_reg;
161 }
162 list_add(&slot->slot_list, &s390_hotplug_slot_list);
163 return 0;
164
165error_reg:
166 kfree(info);
167error_info:
168 kfree(hotplug_slot);
169error_hp:
170 kfree(slot);
171error:
172 return -ENOMEM;
173}
174
175static int __init init_pci_slots(void)
176{
177 struct zpci_dev *zdev;
178 int device = 0;
179
180 /*
181 * Create a structure for each slot, and register that slot
182 * with the pci_hotplug subsystem.
183 */
184 mutex_lock(&zpci_list_lock);
185 list_for_each_entry(zdev, &zpci_list, entry) {
186 init_pci_slot(zdev);
187 device++;
188 }
189
190 mutex_unlock(&zpci_list_lock);
191 return (device) ? 0 : -ENODEV;
192}
193
194static void exit_pci_slot(struct zpci_dev *zdev)
195{
196 struct list_head *tmp, *n;
197 struct slot *slot;
198
199 list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
200 slot = list_entry(tmp, struct slot, slot_list);
201 if (slot->zdev != zdev)
202 continue;
203 list_del(&slot->slot_list);
204 pci_hp_deregister(slot->hotplug_slot);
205 }
206}
207
208static void __exit exit_pci_slots(void)
209{
210 struct list_head *tmp, *n;
211 struct slot *slot;
212
213 /*
214 * Unregister all of our slots with the pci_hotplug subsystem.
215 * Memory will be freed in release_slot() callback after slot's
216 * lifespan is finished.
217 */
218 list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
219 slot = list_entry(tmp, struct slot, slot_list);
220 list_del(&slot->slot_list);
221 pci_hp_deregister(slot->hotplug_slot);
222 }
223}
224
225static int __init pci_hotplug_s390_init(void)
226{
227 /*
228 * Do specific initialization stuff for your driver here
229 * like initializing your controller hardware (if any) and
230 * determining the number of slots you have in the system
231 * right now.
232 */
233
234 if (!pci_probe)
235 return -EOPNOTSUPP;
236
237 /* register callbacks for slot handling from arch code */
238 mutex_lock(&zpci_list_lock);
239 hotplug_ops.create_slot = init_pci_slot;
240 hotplug_ops.remove_slot = exit_pci_slot;
241 mutex_unlock(&zpci_list_lock);
242 pr_info("registered hotplug slot callbacks\n");
243 return init_pci_slots();
244}
245
246static void __exit pci_hotplug_s390_exit(void)
247{
248 exit_pci_slots();
249}
250
251module_init(pci_hotplug_s390_init);
252module_exit(pci_hotplug_s390_exit);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a825d78fd0aa..5099636a6e5f 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -207,6 +207,8 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag)
207 desc->masked = __msix_mask_irq(desc, flag); 207 desc->masked = __msix_mask_irq(desc, flag);
208} 208}
209 209
210#ifdef CONFIG_GENERIC_HARDIRQS
211
210static void msi_set_mask_bit(struct irq_data *data, u32 flag) 212static void msi_set_mask_bit(struct irq_data *data, u32 flag)
211{ 213{
212 struct msi_desc *desc = irq_data_get_msi(data); 214 struct msi_desc *desc = irq_data_get_msi(data);
@@ -230,6 +232,8 @@ void unmask_msi_irq(struct irq_data *data)
230 msi_set_mask_bit(data, 0); 232 msi_set_mask_bit(data, 0);
231} 233}
232 234
235#endif /* CONFIG_GENERIC_HARDIRQS */
236
233void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 237void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
234{ 238{
235 BUG_ON(entry->dev->current_state != PCI_D0); 239 BUG_ON(entry->dev->current_state != PCI_D0);
@@ -337,8 +341,10 @@ static void free_msi_irqs(struct pci_dev *dev)
337 if (!entry->irq) 341 if (!entry->irq)
338 continue; 342 continue;
339 nvec = 1 << entry->msi_attrib.multiple; 343 nvec = 1 << entry->msi_attrib.multiple;
344#ifdef CONFIG_GENERIC_HARDIRQS
340 for (i = 0; i < nvec; i++) 345 for (i = 0; i < nvec; i++)
341 BUG_ON(irq_has_action(entry->irq + i)); 346 BUG_ON(irq_has_action(entry->irq + i));
347#endif
342 } 348 }
343 349
344 arch_teardown_msi_irqs(dev); 350 arch_teardown_msi_irqs(dev);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 0595c763dafd..29225e1c159c 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -349,6 +349,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
349 return rc; 349 return rc;
350} 350}
351 351
352static inline
353int _wait_for_empty_queues(struct dasd_device *device)
354{
355 if (device->block)
356 return list_empty(&device->ccw_queue) &&
357 list_empty(&device->block->ccw_queue);
358 else
359 return list_empty(&device->ccw_queue);
360}
361
352/* 362/*
353 * Remove device from block device layer. Destroy dirty buffers. 363 * Remove device from block device layer. Destroy dirty buffers.
354 * Forget format information. Check if the target level is basic 364 * Forget format information. Check if the target level is basic
@@ -1841,6 +1851,13 @@ static void __dasd_device_check_expire(struct dasd_device *device)
1841 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1851 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1842 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1852 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1843 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1853 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1854 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1855 /*
1856 * IO in safe offline processing should not
1857 * run out of retries
1858 */
1859 cqr->retries++;
1860 }
1844 if (device->discipline->term_IO(cqr) != 0) { 1861 if (device->discipline->term_IO(cqr) != 0) {
1845 /* Hmpf, try again in 5 sec */ 1862 /* Hmpf, try again in 5 sec */
1846 dev_err(&device->cdev->dev, 1863 dev_err(&device->cdev->dev,
@@ -3024,11 +3041,11 @@ void dasd_generic_remove(struct ccw_device *cdev)
3024 3041
3025 cdev->handler = NULL; 3042 cdev->handler = NULL;
3026 3043
3027 dasd_remove_sysfs_files(cdev);
3028 device = dasd_device_from_cdev(cdev); 3044 device = dasd_device_from_cdev(cdev);
3029 if (IS_ERR(device)) 3045 if (IS_ERR(device))
3030 return; 3046 return;
3031 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3047 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3048 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3032 /* Already doing offline processing */ 3049 /* Already doing offline processing */
3033 dasd_put_device(device); 3050 dasd_put_device(device);
3034 return; 3051 return;
@@ -3048,6 +3065,8 @@ void dasd_generic_remove(struct ccw_device *cdev)
3048 */ 3065 */
3049 if (block) 3066 if (block)
3050 dasd_free_block(block); 3067 dasd_free_block(block);
3068
3069 dasd_remove_sysfs_files(cdev);
3051} 3070}
3052 3071
3053/* 3072/*
@@ -3126,16 +3145,13 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3126{ 3145{
3127 struct dasd_device *device; 3146 struct dasd_device *device;
3128 struct dasd_block *block; 3147 struct dasd_block *block;
3129 int max_count, open_count; 3148 int max_count, open_count, rc;
3130 3149
3150 rc = 0;
3131 device = dasd_device_from_cdev(cdev); 3151 device = dasd_device_from_cdev(cdev);
3132 if (IS_ERR(device)) 3152 if (IS_ERR(device))
3133 return PTR_ERR(device); 3153 return PTR_ERR(device);
3134 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3154
3135 /* Already doing offline processing */
3136 dasd_put_device(device);
3137 return 0;
3138 }
3139 /* 3155 /*
3140 * We must make sure that this device is currently not in use. 3156 * We must make sure that this device is currently not in use.
3141 * The open_count is increased for every opener, that includes 3157 * The open_count is increased for every opener, that includes
@@ -3159,6 +3175,54 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3159 return -EBUSY; 3175 return -EBUSY;
3160 } 3176 }
3161 } 3177 }
3178
3179 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3180 /*
3181 * safe offline allready running
3182 * could only be called by normal offline so safe_offline flag
3183 * needs to be removed to run normal offline and kill all I/O
3184 */
3185 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3186 /* Already doing normal offline processing */
3187 dasd_put_device(device);
3188 return -EBUSY;
3189 } else
3190 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
3191
3192 } else
3193 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3194 /* Already doing offline processing */
3195 dasd_put_device(device);
3196 return -EBUSY;
3197 }
3198
3199 /*
3200 * if safe_offline called set safe_offline_running flag and
3201 * clear safe_offline so that a call to normal offline
3202 * can overrun safe_offline processing
3203 */
3204 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3205 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3206 /*
3207 * If we want to set the device safe offline all IO operations
3208 * should be finished before continuing the offline process
3209 * so sync bdev first and then wait for our queues to become
3210 * empty
3211 */
3212 /* sync blockdev and partitions */
3213 rc = fsync_bdev(device->block->bdev);
3214 if (rc != 0)
3215 goto interrupted;
3216
3217 /* schedule device tasklet and wait for completion */
3218 dasd_schedule_device_bh(device);
3219 rc = wait_event_interruptible(shutdown_waitq,
3220 _wait_for_empty_queues(device));
3221 if (rc != 0)
3222 goto interrupted;
3223 }
3224
3225 set_bit(DASD_FLAG_OFFLINE, &device->flags);
3162 dasd_set_target_state(device, DASD_STATE_NEW); 3226 dasd_set_target_state(device, DASD_STATE_NEW);
3163 /* dasd_delete_device destroys the device reference. */ 3227 /* dasd_delete_device destroys the device reference. */
3164 block = device->block; 3228 block = device->block;
@@ -3170,6 +3234,14 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3170 if (block) 3234 if (block)
3171 dasd_free_block(block); 3235 dasd_free_block(block);
3172 return 0; 3236 return 0;
3237
3238interrupted:
3239 /* interrupted by signal */
3240 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
3241 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3242 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3243 dasd_put_device(device);
3244 return rc;
3173} 3245}
3174 3246
3175int dasd_generic_last_path_gone(struct dasd_device *device) 3247int dasd_generic_last_path_gone(struct dasd_device *device)
@@ -3489,15 +3561,6 @@ char *dasd_get_sense(struct irb *irb)
3489} 3561}
3490EXPORT_SYMBOL_GPL(dasd_get_sense); 3562EXPORT_SYMBOL_GPL(dasd_get_sense);
3491 3563
3492static inline int _wait_for_empty_queues(struct dasd_device *device)
3493{
3494 if (device->block)
3495 return list_empty(&device->ccw_queue) &&
3496 list_empty(&device->block->ccw_queue);
3497 else
3498 return list_empty(&device->ccw_queue);
3499}
3500
3501void dasd_generic_shutdown(struct ccw_device *cdev) 3564void dasd_generic_shutdown(struct ccw_device *cdev)
3502{ 3565{
3503 struct dasd_device *device; 3566 struct dasd_device *device;
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 20cfd028edcf..c196827c228f 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -952,6 +952,39 @@ static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
952 dasd_use_raw_store); 952 dasd_use_raw_store);
953 953
954static ssize_t 954static ssize_t
955dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
956 const char *buf, size_t count)
957{
958 struct ccw_device *cdev = to_ccwdev(dev);
959 struct dasd_device *device;
960 int rc;
961
962 device = dasd_device_from_cdev(cdev);
963 if (IS_ERR(device)) {
964 rc = PTR_ERR(device);
965 goto out;
966 }
967
968 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
969 test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
970 /* Already doing offline processing */
971 dasd_put_device(device);
972 rc = -EBUSY;
973 goto out;
974 }
975
976 set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
977 dasd_put_device(device);
978
979 rc = ccw_device_set_offline(cdev);
980
981out:
982 return rc ? rc : count;
983}
984
985static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
986
987static ssize_t
955dasd_discipline_show(struct device *dev, struct device_attribute *attr, 988dasd_discipline_show(struct device *dev, struct device_attribute *attr,
956 char *buf) 989 char *buf)
957{ 990{
@@ -1320,6 +1353,7 @@ static struct attribute * dasd_attrs[] = {
1320 &dev_attr_expires.attr, 1353 &dev_attr_expires.attr,
1321 &dev_attr_reservation_policy.attr, 1354 &dev_attr_reservation_policy.attr,
1322 &dev_attr_last_known_reservation_state.attr, 1355 &dev_attr_last_known_reservation_state.attr,
1356 &dev_attr_safe_offline.attr,
1323 NULL, 1357 NULL,
1324}; 1358};
1325 1359
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 108332b44d98..806fe912d6e7 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1026,7 +1026,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1026{ 1026{
1027 void *conf_data; 1027 void *conf_data;
1028 int conf_len, conf_data_saved; 1028 int conf_len, conf_data_saved;
1029 int rc; 1029 int rc, path_err;
1030 __u8 lpm, opm; 1030 __u8 lpm, opm;
1031 struct dasd_eckd_private *private, path_private; 1031 struct dasd_eckd_private *private, path_private;
1032 struct dasd_path *path_data; 1032 struct dasd_path *path_data;
@@ -1037,6 +1037,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1037 path_data = &device->path_data; 1037 path_data = &device->path_data;
1038 opm = ccw_device_get_path_mask(device->cdev); 1038 opm = ccw_device_get_path_mask(device->cdev);
1039 conf_data_saved = 0; 1039 conf_data_saved = 0;
1040 path_err = 0;
1040 /* get configuration data per operational path */ 1041 /* get configuration data per operational path */
1041 for (lpm = 0x80; lpm; lpm>>= 1) { 1042 for (lpm = 0x80; lpm; lpm>>= 1) {
1042 if (!(lpm & opm)) 1043 if (!(lpm & opm))
@@ -1122,7 +1123,8 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1122 "the same device, path %02X leads to " 1123 "the same device, path %02X leads to "
1123 "device %s instead of %s\n", lpm, 1124 "device %s instead of %s\n", lpm,
1124 print_path_uid, print_device_uid); 1125 print_path_uid, print_device_uid);
1125 return -EINVAL; 1126 path_err = -EINVAL;
1127 continue;
1126 } 1128 }
1127 1129
1128 path_private.conf_data = NULL; 1130 path_private.conf_data = NULL;
@@ -1142,7 +1144,7 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
1142 kfree(conf_data); 1144 kfree(conf_data);
1143 } 1145 }
1144 1146
1145 return 0; 1147 return path_err;
1146} 1148}
1147 1149
1148static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1150static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
@@ -3847,7 +3849,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
3847 3849
3848 len = 0; 3850 len = 0;
3849 while (from <= to) { 3851 while (from <= to) {
3850 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3852 len += sprintf(page + len, PRINTK_HEADER
3851 " CCW %p: %08X %08X DAT:", 3853 " CCW %p: %08X %08X DAT:",
3852 from, ((int *) from)[0], ((int *) from)[1]); 3854 from, ((int *) from)[0], ((int *) from)[1]);
3853 3855
@@ -3908,23 +3910,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3908 return; 3910 return;
3909 } 3911 }
3910 /* dump the sense data */ 3912 /* dump the sense data */
3911 len = sprintf(page, KERN_ERR PRINTK_HEADER 3913 len = sprintf(page, PRINTK_HEADER
3912 " I/O status report for device %s:\n", 3914 " I/O status report for device %s:\n",
3913 dev_name(&device->cdev->dev)); 3915 dev_name(&device->cdev->dev));
3914 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3916 len += sprintf(page + len, PRINTK_HEADER
3915 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 3917 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
3916 "CS:%02X RC:%d\n", 3918 "CS:%02X RC:%d\n",
3917 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 3919 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
3918 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 3920 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
3919 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 3921 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
3920 req ? req->intrc : 0); 3922 req ? req->intrc : 0);
3921 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3923 len += sprintf(page + len, PRINTK_HEADER
3922 " device %s: Failing CCW: %p\n", 3924 " device %s: Failing CCW: %p\n",
3923 dev_name(&device->cdev->dev), 3925 dev_name(&device->cdev->dev),
3924 (void *) (addr_t) irb->scsw.cmd.cpa); 3926 (void *) (addr_t) irb->scsw.cmd.cpa);
3925 if (irb->esw.esw0.erw.cons) { 3927 if (irb->esw.esw0.erw.cons) {
3926 for (sl = 0; sl < 4; sl++) { 3928 for (sl = 0; sl < 4; sl++) {
3927 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3929 len += sprintf(page + len, PRINTK_HEADER
3928 " Sense(hex) %2d-%2d:", 3930 " Sense(hex) %2d-%2d:",
3929 (8 * sl), ((8 * sl) + 7)); 3931 (8 * sl), ((8 * sl) + 7));
3930 3932
@@ -3937,23 +3939,23 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3937 3939
3938 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 3940 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3939 /* 24 Byte Sense Data */ 3941 /* 24 Byte Sense Data */
3940 sprintf(page + len, KERN_ERR PRINTK_HEADER 3942 sprintf(page + len, PRINTK_HEADER
3941 " 24 Byte: %x MSG %x, " 3943 " 24 Byte: %x MSG %x, "
3942 "%s MSGb to SYSOP\n", 3944 "%s MSGb to SYSOP\n",
3943 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 3945 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3944 irb->ecw[1] & 0x10 ? "" : "no"); 3946 irb->ecw[1] & 0x10 ? "" : "no");
3945 } else { 3947 } else {
3946 /* 32 Byte Sense Data */ 3948 /* 32 Byte Sense Data */
3947 sprintf(page + len, KERN_ERR PRINTK_HEADER 3949 sprintf(page + len, PRINTK_HEADER
3948 " 32 Byte: Format: %x " 3950 " 32 Byte: Format: %x "
3949 "Exception class %x\n", 3951 "Exception class %x\n",
3950 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 3952 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3951 } 3953 }
3952 } else { 3954 } else {
3953 sprintf(page + len, KERN_ERR PRINTK_HEADER 3955 sprintf(page + len, PRINTK_HEADER
3954 " SORRY - NO VALID SENSE AVAILABLE\n"); 3956 " SORRY - NO VALID SENSE AVAILABLE\n");
3955 } 3957 }
3956 printk("%s", page); 3958 printk(KERN_ERR "%s", page);
3957 3959
3958 if (req) { 3960 if (req) {
3959 /* req == NULL for unsolicited interrupts */ 3961 /* req == NULL for unsolicited interrupts */
@@ -3962,10 +3964,10 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3962 first = req->cpaddr; 3964 first = req->cpaddr;
3963 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 3965 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3964 to = min(first + 6, last); 3966 to = min(first + 6, last);
3965 len = sprintf(page, KERN_ERR PRINTK_HEADER 3967 len = sprintf(page, PRINTK_HEADER
3966 " Related CP in req: %p\n", req); 3968 " Related CP in req: %p\n", req);
3967 dasd_eckd_dump_ccw_range(first, to, page + len); 3969 dasd_eckd_dump_ccw_range(first, to, page + len);
3968 printk("%s", page); 3970 printk(KERN_ERR "%s", page);
3969 3971
3970 /* print failing CCW area (maximum 4) */ 3972 /* print failing CCW area (maximum 4) */
3971 /* scsw->cda is either valid or zero */ 3973 /* scsw->cda is either valid or zero */
@@ -3975,7 +3977,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3975 irb->scsw.cmd.cpa; /* failing CCW */ 3977 irb->scsw.cmd.cpa; /* failing CCW */
3976 if (from < fail - 2) { 3978 if (from < fail - 2) {
3977 from = fail - 2; /* there is a gap - print header */ 3979 from = fail - 2; /* there is a gap - print header */
3978 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 3980 len += sprintf(page, PRINTK_HEADER "......\n");
3979 } 3981 }
3980 to = min(fail + 1, last); 3982 to = min(fail + 1, last);
3981 len += dasd_eckd_dump_ccw_range(from, to, page + len); 3983 len += dasd_eckd_dump_ccw_range(from, to, page + len);
@@ -3984,11 +3986,11 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3984 from = max(from, ++to); 3986 from = max(from, ++to);
3985 if (from < last - 1) { 3987 if (from < last - 1) {
3986 from = last - 1; /* there is a gap - print header */ 3988 from = last - 1; /* there is a gap - print header */
3987 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 3989 len += sprintf(page + len, PRINTK_HEADER "......\n");
3988 } 3990 }
3989 len += dasd_eckd_dump_ccw_range(from, last, page + len); 3991 len += dasd_eckd_dump_ccw_range(from, last, page + len);
3990 if (len > 0) 3992 if (len > 0)
3991 printk("%s", page); 3993 printk(KERN_ERR "%s", page);
3992 } 3994 }
3993 free_page((unsigned long) page); 3995 free_page((unsigned long) page);
3994} 3996}
@@ -4012,10 +4014,10 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4012 return; 4014 return;
4013 } 4015 }
4014 /* dump the sense data */ 4016 /* dump the sense data */
4015 len = sprintf(page, KERN_ERR PRINTK_HEADER 4017 len = sprintf(page, PRINTK_HEADER
4016 " I/O status report for device %s:\n", 4018 " I/O status report for device %s:\n",
4017 dev_name(&device->cdev->dev)); 4019 dev_name(&device->cdev->dev));
4018 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4020 len += sprintf(page + len, PRINTK_HEADER
4019 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 4021 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4020 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 4022 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
4021 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 4023 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
@@ -4023,7 +4025,7 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4023 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 4025 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
4024 irb->scsw.tm.fcxs, irb->scsw.tm.schxs, 4026 irb->scsw.tm.fcxs, irb->scsw.tm.schxs,
4025 req ? req->intrc : 0); 4027 req ? req->intrc : 0);
4026 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4028 len += sprintf(page + len, PRINTK_HEADER
4027 " device %s: Failing TCW: %p\n", 4029 " device %s: Failing TCW: %p\n",
4028 dev_name(&device->cdev->dev), 4030 dev_name(&device->cdev->dev),
4029 (void *) (addr_t) irb->scsw.tm.tcw); 4031 (void *) (addr_t) irb->scsw.tm.tcw);
@@ -4035,43 +4037,42 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4035 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 4037 (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
4036 4038
4037 if (tsb) { 4039 if (tsb) {
4038 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4040 len += sprintf(page + len, PRINTK_HEADER
4039 " tsb->length %d\n", tsb->length); 4041 " tsb->length %d\n", tsb->length);
4040 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4042 len += sprintf(page + len, PRINTK_HEADER
4041 " tsb->flags %x\n", tsb->flags); 4043 " tsb->flags %x\n", tsb->flags);
4042 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4044 len += sprintf(page + len, PRINTK_HEADER
4043 " tsb->dcw_offset %d\n", tsb->dcw_offset); 4045 " tsb->dcw_offset %d\n", tsb->dcw_offset);
4044 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4046 len += sprintf(page + len, PRINTK_HEADER
4045 " tsb->count %d\n", tsb->count); 4047 " tsb->count %d\n", tsb->count);
4046 residual = tsb->count - 28; 4048 residual = tsb->count - 28;
4047 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4049 len += sprintf(page + len, PRINTK_HEADER
4048 " residual %d\n", residual); 4050 " residual %d\n", residual);
4049 4051
4050 switch (tsb->flags & 0x07) { 4052 switch (tsb->flags & 0x07) {
4051 case 1: /* tsa_iostat */ 4053 case 1: /* tsa_iostat */
4052 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4054 len += sprintf(page + len, PRINTK_HEADER
4053 " tsb->tsa.iostat.dev_time %d\n", 4055 " tsb->tsa.iostat.dev_time %d\n",
4054 tsb->tsa.iostat.dev_time); 4056 tsb->tsa.iostat.dev_time);
4055 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4057 len += sprintf(page + len, PRINTK_HEADER
4056 " tsb->tsa.iostat.def_time %d\n", 4058 " tsb->tsa.iostat.def_time %d\n",
4057 tsb->tsa.iostat.def_time); 4059 tsb->tsa.iostat.def_time);
4058 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4060 len += sprintf(page + len, PRINTK_HEADER
4059 " tsb->tsa.iostat.queue_time %d\n", 4061 " tsb->tsa.iostat.queue_time %d\n",
4060 tsb->tsa.iostat.queue_time); 4062 tsb->tsa.iostat.queue_time);
4061 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4063 len += sprintf(page + len, PRINTK_HEADER
4062 " tsb->tsa.iostat.dev_busy_time %d\n", 4064 " tsb->tsa.iostat.dev_busy_time %d\n",
4063 tsb->tsa.iostat.dev_busy_time); 4065 tsb->tsa.iostat.dev_busy_time);
4064 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4066 len += sprintf(page + len, PRINTK_HEADER
4065 " tsb->tsa.iostat.dev_act_time %d\n", 4067 " tsb->tsa.iostat.dev_act_time %d\n",
4066 tsb->tsa.iostat.dev_act_time); 4068 tsb->tsa.iostat.dev_act_time);
4067 sense = tsb->tsa.iostat.sense; 4069 sense = tsb->tsa.iostat.sense;
4068 break; 4070 break;
4069 case 2: /* ts_ddpc */ 4071 case 2: /* ts_ddpc */
4070 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4072 len += sprintf(page + len, PRINTK_HEADER
4071 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 4073 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
4072 for (sl = 0; sl < 2; sl++) { 4074 for (sl = 0; sl < 2; sl++) {
4073 len += sprintf(page + len, 4075 len += sprintf(page + len, PRINTK_HEADER
4074 KERN_ERR PRINTK_HEADER
4075 " tsb->tsa.ddpc.rcq %2d-%2d: ", 4076 " tsb->tsa.ddpc.rcq %2d-%2d: ",
4076 (8 * sl), ((8 * sl) + 7)); 4077 (8 * sl), ((8 * sl) + 7));
4077 rcq = tsb->tsa.ddpc.rcq; 4078 rcq = tsb->tsa.ddpc.rcq;
@@ -4084,15 +4085,14 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4084 sense = tsb->tsa.ddpc.sense; 4085 sense = tsb->tsa.ddpc.sense;
4085 break; 4086 break;
4086 case 3: /* tsa_intrg */ 4087 case 3: /* tsa_intrg */
4087 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 4088 len += sprintf(page + len, PRINTK_HEADER
4088 " tsb->tsa.intrg.: not supportet yet \n"); 4089 " tsb->tsa.intrg.: not supportet yet\n");
4089 break; 4090 break;
4090 } 4091 }
4091 4092
4092 if (sense) { 4093 if (sense) {
4093 for (sl = 0; sl < 4; sl++) { 4094 for (sl = 0; sl < 4; sl++) {
4094 len += sprintf(page + len, 4095 len += sprintf(page + len, PRINTK_HEADER
4095 KERN_ERR PRINTK_HEADER
4096 " Sense(hex) %2d-%2d:", 4096 " Sense(hex) %2d-%2d:",
4097 (8 * sl), ((8 * sl) + 7)); 4097 (8 * sl), ((8 * sl) + 7));
4098 for (sct = 0; sct < 8; sct++) { 4098 for (sct = 0; sct < 8; sct++) {
@@ -4104,27 +4104,27 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
4104 4104
4105 if (sense[27] & DASD_SENSE_BIT_0) { 4105 if (sense[27] & DASD_SENSE_BIT_0) {
4106 /* 24 Byte Sense Data */ 4106 /* 24 Byte Sense Data */
4107 sprintf(page + len, KERN_ERR PRINTK_HEADER 4107 sprintf(page + len, PRINTK_HEADER
4108 " 24 Byte: %x MSG %x, " 4108 " 24 Byte: %x MSG %x, "
4109 "%s MSGb to SYSOP\n", 4109 "%s MSGb to SYSOP\n",
4110 sense[7] >> 4, sense[7] & 0x0f, 4110 sense[7] >> 4, sense[7] & 0x0f,
4111 sense[1] & 0x10 ? "" : "no"); 4111 sense[1] & 0x10 ? "" : "no");
4112 } else { 4112 } else {
4113 /* 32 Byte Sense Data */ 4113 /* 32 Byte Sense Data */
4114 sprintf(page + len, KERN_ERR PRINTK_HEADER 4114 sprintf(page + len, PRINTK_HEADER
4115 " 32 Byte: Format: %x " 4115 " 32 Byte: Format: %x "
4116 "Exception class %x\n", 4116 "Exception class %x\n",
4117 sense[6] & 0x0f, sense[22] >> 4); 4117 sense[6] & 0x0f, sense[22] >> 4);
4118 } 4118 }
4119 } else { 4119 } else {
4120 sprintf(page + len, KERN_ERR PRINTK_HEADER 4120 sprintf(page + len, PRINTK_HEADER
4121 " SORRY - NO VALID SENSE AVAILABLE\n"); 4121 " SORRY - NO VALID SENSE AVAILABLE\n");
4122 } 4122 }
4123 } else { 4123 } else {
4124 sprintf(page + len, KERN_ERR PRINTK_HEADER 4124 sprintf(page + len, PRINTK_HEADER
4125 " SORRY - NO TSB DATA AVAILABLE\n"); 4125 " SORRY - NO TSB DATA AVAILABLE\n");
4126 } 4126 }
4127 printk("%s", page); 4127 printk(KERN_ERR "%s", page);
4128 free_page((unsigned long) page); 4128 free_page((unsigned long) page);
4129} 4129}
4130 4130
@@ -4161,9 +4161,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4161 private = (struct dasd_eckd_private *) device->private; 4161 private = (struct dasd_eckd_private *) device->private;
4162 4162
4163 /* Read Configuration Data */ 4163 /* Read Configuration Data */
4164 rc = dasd_eckd_read_conf(device); 4164 dasd_eckd_read_conf(device);
4165 if (rc)
4166 goto out_err;
4167 4165
4168 dasd_eckd_get_uid(device, &temp_uid); 4166 dasd_eckd_get_uid(device, &temp_uid);
4169 /* Generate device unique id */ 4167 /* Generate device unique id */
@@ -4183,9 +4181,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
4183 dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST); 4181 dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST);
4184 4182
4185 /* RE-Read Configuration Data */ 4183 /* RE-Read Configuration Data */
4186 rc = dasd_eckd_read_conf(device); 4184 dasd_eckd_read_conf(device);
4187 if (rc)
4188 goto out_err;
4189 4185
4190 /* Read Feature Codes */ 4186 /* Read Feature Codes */
4191 dasd_eckd_read_features(device); 4187 dasd_eckd_read_features(device);
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index fb7f3bdc6604..eb748507c7fa 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -479,19 +479,19 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
479 "No memory to dump sense data"); 479 "No memory to dump sense data");
480 return; 480 return;
481 } 481 }
482 len = sprintf(page, KERN_ERR PRINTK_HEADER 482 len = sprintf(page, PRINTK_HEADER
483 " I/O status report for device %s:\n", 483 " I/O status report for device %s:\n",
484 dev_name(&device->cdev->dev)); 484 dev_name(&device->cdev->dev));
485 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 485 len += sprintf(page + len, PRINTK_HEADER
486 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 486 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
487 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 487 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
488 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 488 len += sprintf(page + len, PRINTK_HEADER
489 " device %s: Failing CCW: %p\n", 489 " device %s: Failing CCW: %p\n",
490 dev_name(&device->cdev->dev), 490 dev_name(&device->cdev->dev),
491 (void *) (addr_t) irb->scsw.cmd.cpa); 491 (void *) (addr_t) irb->scsw.cmd.cpa);
492 if (irb->esw.esw0.erw.cons) { 492 if (irb->esw.esw0.erw.cons) {
493 for (sl = 0; sl < 4; sl++) { 493 for (sl = 0; sl < 4; sl++) {
494 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 494 len += sprintf(page + len, PRINTK_HEADER
495 " Sense(hex) %2d-%2d:", 495 " Sense(hex) %2d-%2d:",
496 (8 * sl), ((8 * sl) + 7)); 496 (8 * sl), ((8 * sl) + 7));
497 497
@@ -502,7 +502,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
502 len += sprintf(page + len, "\n"); 502 len += sprintf(page + len, "\n");
503 } 503 }
504 } else { 504 } else {
505 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 505 len += sprintf(page + len, PRINTK_HEADER
506 " SORRY - NO VALID SENSE AVAILABLE\n"); 506 " SORRY - NO VALID SENSE AVAILABLE\n");
507 } 507 }
508 printk(KERN_ERR "%s", page); 508 printk(KERN_ERR "%s", page);
@@ -512,10 +512,9 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
512 act = req->cpaddr; 512 act = req->cpaddr;
513 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 513 for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
514 end = min(act + 8, last); 514 end = min(act + 8, last);
515 len = sprintf(page, KERN_ERR PRINTK_HEADER 515 len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
516 " Related CP in req: %p\n", req);
517 while (act <= end) { 516 while (act <= end) {
518 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 517 len += sprintf(page + len, PRINTK_HEADER
519 " CCW %p: %08X %08X DAT:", 518 " CCW %p: %08X %08X DAT:",
520 act, ((int *) act)[0], ((int *) act)[1]); 519 act, ((int *) act)[0], ((int *) act)[1]);
521 for (count = 0; count < 32 && count < act->count; 520 for (count = 0; count < 32 && count < act->count;
@@ -533,11 +532,11 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
533 len = 0; 532 len = 0;
534 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) { 533 if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
535 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2; 534 act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
536 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 535 len += sprintf(page + len, PRINTK_HEADER "......\n");
537 } 536 }
538 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last); 537 end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
539 while (act <= end) { 538 while (act <= end) {
540 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 539 len += sprintf(page + len, PRINTK_HEADER
541 " CCW %p: %08X %08X DAT:", 540 " CCW %p: %08X %08X DAT:",
542 act, ((int *) act)[0], ((int *) act)[1]); 541 act, ((int *) act)[0], ((int *) act)[1]);
543 for (count = 0; count < 32 && count < act->count; 542 for (count = 0; count < 32 && count < act->count;
@@ -552,10 +551,10 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
552 /* print last CCWs */ 551 /* print last CCWs */
553 if (act < last - 2) { 552 if (act < last - 2) {
554 act = last - 2; 553 act = last - 2;
555 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 554 len += sprintf(page + len, PRINTK_HEADER "......\n");
556 } 555 }
557 while (act <= last) { 556 while (act <= last) {
558 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 557 len += sprintf(page + len, PRINTK_HEADER
559 " CCW %p: %08X %08X DAT:", 558 " CCW %p: %08X %08X DAT:",
560 act, ((int *) act)[0], ((int *) act)[1]); 559 act, ((int *) act)[0], ((int *) act)[1]);
561 for (count = 0; count < 32 && count < act->count; 560 for (count = 0; count < 32 && count < act->count;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 7ff93eea673d..899e3f5a56e5 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -516,6 +516,8 @@ struct dasd_block {
516#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */ 516#define DASD_FLAG_IS_RESERVED 7 /* The device is reserved */
517#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */ 517#define DASD_FLAG_LOCK_STOLEN 8 /* The device lock was stolen */
518#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */ 518#define DASD_FLAG_SUSPENDED 9 /* The device was suspended */
519#define DASD_FLAG_SAFE_OFFLINE 10 /* safe offline processing requested*/
520#define DASD_FLAG_SAFE_OFFLINE_RUNNING 11 /* safe offline running */
519 521
520 522
521void dasd_put_device_wake(struct dasd_device *); 523void dasd_put_device_wake(struct dasd_device *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 8252f37d04ed..03c0e0444553 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -19,6 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <asm/compat.h> 20#include <asm/compat.h>
21#include <asm/ccwdev.h> 21#include <asm/ccwdev.h>
22#include <asm/schid.h>
22#include <asm/cmb.h> 23#include <asm/cmb.h>
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24 25
@@ -308,11 +309,12 @@ static int dasd_ioctl_information(struct dasd_block *block,
308 unsigned int cmd, void __user *argp) 309 unsigned int cmd, void __user *argp)
309{ 310{
310 struct dasd_information2_t *dasd_info; 311 struct dasd_information2_t *dasd_info;
311 unsigned long flags; 312 struct subchannel_id sch_id;
312 int rc; 313 struct ccw_dev_id dev_id;
313 struct dasd_device *base; 314 struct dasd_device *base;
314 struct ccw_device *cdev; 315 struct ccw_device *cdev;
315 struct ccw_dev_id dev_id; 316 unsigned long flags;
317 int rc;
316 318
317 base = block->base; 319 base = block->base;
318 if (!base->discipline || !base->discipline->fill_info) 320 if (!base->discipline || !base->discipline->fill_info)
@@ -330,9 +332,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
330 332
331 cdev = base->cdev; 333 cdev = base->cdev;
332 ccw_device_get_id(cdev, &dev_id); 334 ccw_device_get_id(cdev, &dev_id);
335 ccw_device_get_schid(cdev, &sch_id);
333 336
334 dasd_info->devno = dev_id.devno; 337 dasd_info->devno = dev_id.devno;
335 dasd_info->schid = _ccw_device_get_subchannel_number(base->cdev); 338 dasd_info->schid = sch_id.sch_no;
336 dasd_info->cu_type = cdev->id.cu_type; 339 dasd_info->cu_type = cdev->id.cu_type;
337 dasd_info->cu_model = cdev->id.cu_model; 340 dasd_info->cu_model = cdev->id.cu_model;
338 dasd_info->dev_type = cdev->id.dev_type; 341 dasd_info->dev_type = cdev->id.dev_type;
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index d7e97ae9ef6d..25bcd4c0ed82 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 1999, 2009 2 * Copyright IBM Corp. 1999,2012
3 * 3 *
4 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 4 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -103,6 +103,7 @@ extern u64 sclp_facilities;
103#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL) 103#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
104#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL) 104#define SCLP_HAS_CPU_INFO (sclp_facilities & 0x0800000000000000ULL)
105#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL) 105#define SCLP_HAS_CPU_RECONFIG (sclp_facilities & 0x0400000000000000ULL)
106#define SCLP_HAS_PCI_RECONFIG (sclp_facilities & 0x0000000040000000ULL)
106 107
107 108
108struct gds_subvector { 109struct gds_subvector {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 71ea923c322d..c44d13f607bc 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright IBM Corp. 2007, 2009 2 * Copyright IBM Corp. 2007,2012
3 * 3 *
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/export.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/string.h> 17#include <linux/string.h>
17#include <linux/mm.h> 18#include <linux/mm.h>
@@ -19,10 +20,11 @@
19#include <linux/memory.h> 20#include <linux/memory.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <asm/ctl_reg.h>
22#include <asm/chpid.h> 24#include <asm/chpid.h>
23#include <asm/sclp.h>
24#include <asm/setup.h> 25#include <asm/setup.h>
25#include <asm/ctl_reg.h> 26#include <asm/page.h>
27#include <asm/sclp.h>
26 28
27#include "sclp.h" 29#include "sclp.h"
28 30
@@ -400,17 +402,15 @@ out:
400 402
401static int sclp_assign_storage(u16 rn) 403static int sclp_assign_storage(u16 rn)
402{ 404{
403 unsigned long long start, address; 405 unsigned long long start;
404 int rc; 406 int rc;
405 407
406 rc = do_assign_storage(0x000d0001, rn); 408 rc = do_assign_storage(0x000d0001, rn);
407 if (rc) 409 if (rc)
408 goto out; 410 return rc;
409 start = address = rn2addr(rn); 411 start = rn2addr(rn);
410 for (; address < start + rzm; address += PAGE_SIZE) 412 storage_key_init_range(start, start + rzm);
411 page_set_storage_key(address, PAGE_DEFAULT_KEY, 0); 413 return 0;
412out:
413 return rc;
414} 414}
415 415
416static int sclp_unassign_storage(u16 rn) 416static int sclp_unassign_storage(u16 rn)
@@ -702,6 +702,67 @@ __initcall(sclp_detect_standby_memory);
702#endif /* CONFIG_MEMORY_HOTPLUG */ 702#endif /* CONFIG_MEMORY_HOTPLUG */
703 703
704/* 704/*
705 * PCI I/O adapter configuration related functions.
706 */
707#define SCLP_CMDW_CONFIGURE_PCI 0x001a0001
708#define SCLP_CMDW_DECONFIGURE_PCI 0x001b0001
709
710#define SCLP_RECONFIG_PCI_ATPYE 2
711
712struct pci_cfg_sccb {
713 struct sccb_header header;
714 u8 atype; /* adapter type */
715 u8 reserved1;
716 u16 reserved2;
717 u32 aid; /* adapter identifier */
718} __packed;
719
720static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
721{
722 struct pci_cfg_sccb *sccb;
723 int rc;
724
725 if (!SCLP_HAS_PCI_RECONFIG)
726 return -EOPNOTSUPP;
727
728 sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
729 if (!sccb)
730 return -ENOMEM;
731
732 sccb->header.length = PAGE_SIZE;
733 sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
734 sccb->aid = fid;
735 rc = do_sync_request(cmd, sccb);
736 if (rc)
737 goto out;
738 switch (sccb->header.response_code) {
739 case 0x0020:
740 case 0x0120:
741 break;
742 default:
743 pr_warn("configure PCI I/O adapter failed: cmd=0x%08x response=0x%04x\n",
744 cmd, sccb->header.response_code);
745 rc = -EIO;
746 break;
747 }
748out:
749 free_page((unsigned long) sccb);
750 return rc;
751}
752
753int sclp_pci_configure(u32 fid)
754{
755 return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
756}
757EXPORT_SYMBOL(sclp_pci_configure);
758
759int sclp_pci_deconfigure(u32 fid)
760{
761 return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
762}
763EXPORT_SYMBOL(sclp_pci_deconfigure);
764
765/*
705 * Channel path configuration related functions. 766 * Channel path configuration related functions.
706 */ 767 */
707 768
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 731470e68493..84846c2b96d3 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -65,10 +65,18 @@ static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
65 } 65 }
66} 66}
67 67
68static int ccwgroup_set_online(struct ccwgroup_device *gdev) 68/**
69 * ccwgroup_set_online() - enable a ccwgroup device
70 * @gdev: target ccwgroup device
71 *
72 * This function attempts to put the ccwgroup device into the online state.
73 * Returns:
74 * %0 on success and a negative error value on failure.
75 */
76int ccwgroup_set_online(struct ccwgroup_device *gdev)
69{ 77{
70 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 78 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
71 int ret = 0; 79 int ret = -EINVAL;
72 80
73 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) 81 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
74 return -EAGAIN; 82 return -EAGAIN;
@@ -84,11 +92,20 @@ out:
84 atomic_set(&gdev->onoff, 0); 92 atomic_set(&gdev->onoff, 0);
85 return ret; 93 return ret;
86} 94}
95EXPORT_SYMBOL(ccwgroup_set_online);
87 96
88static int ccwgroup_set_offline(struct ccwgroup_device *gdev) 97/**
98 * ccwgroup_set_offline() - disable a ccwgroup device
99 * @gdev: target ccwgroup device
100 *
101 * This function attempts to put the ccwgroup device into the offline state.
102 * Returns:
103 * %0 on success and a negative error value on failure.
104 */
105int ccwgroup_set_offline(struct ccwgroup_device *gdev)
89{ 106{
90 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); 107 struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
91 int ret = 0; 108 int ret = -EINVAL;
92 109
93 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) 110 if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
94 return -EAGAIN; 111 return -EAGAIN;
@@ -104,6 +121,7 @@ out:
104 atomic_set(&gdev->onoff, 0); 121 atomic_set(&gdev->onoff, 0);
105 return ret; 122 return ret;
106} 123}
124EXPORT_SYMBOL(ccwgroup_set_offline);
107 125
108static ssize_t ccwgroup_online_store(struct device *dev, 126static ssize_t ccwgroup_online_store(struct device *dev,
109 struct device_attribute *attr, 127 struct device_attribute *attr,
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4d51a7c4eb8b..68e80e2734a4 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * S/390 common I/O routines -- channel subsystem call 2 * S/390 common I/O routines -- channel subsystem call
3 * 3 *
4 * Copyright IBM Corp. 1999, 2010 4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Ingo Adlung (adlung@de.ibm.com) 5 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com) 7 * Arnd Bergmann (arndb@de.ibm.com)
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/pci.h>
17 18
18#include <asm/cio.h> 19#include <asm/cio.h>
19#include <asm/chpid.h> 20#include <asm/chpid.h>
@@ -260,26 +261,45 @@ __get_chpid_from_lir(void *data)
260 return (u16) (lir->indesc[0]&0x000000ff); 261 return (u16) (lir->indesc[0]&0x000000ff);
261} 262}
262 263
263struct chsc_sei_area { 264struct chsc_sei_nt0_area {
264 struct chsc_header request; 265 u8 flags;
266 u8 vf; /* validity flags */
267 u8 rs; /* reporting source */
268 u8 cc; /* content code */
269 u16 fla; /* full link address */
270 u16 rsid; /* reporting source id */
265 u32 reserved1; 271 u32 reserved1;
266 u32 reserved2; 272 u32 reserved2;
267 u32 reserved3;
268 struct chsc_header response;
269 u32 reserved4;
270 u8 flags;
271 u8 vf; /* validity flags */
272 u8 rs; /* reporting source */
273 u8 cc; /* content code */
274 u16 fla; /* full link address */
275 u16 rsid; /* reporting source id */
276 u32 reserved5;
277 u32 reserved6;
278 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
279 /* ccdf has to be big enough for a link-incident record */ 273 /* ccdf has to be big enough for a link-incident record */
280} __attribute__ ((packed)); 274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
281 275} __packed;
282static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 276
277struct chsc_sei_nt2_area {
278 u8 flags; /* p and v bit */
279 u8 reserved1;
280 u8 reserved2;
281 u8 cc; /* content code */
282 u32 reserved3[13];
283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
284} __packed;
285
286#define CHSC_SEI_NT0 0ULL
287#define CHSC_SEI_NT2 (1ULL << 61)
288
289struct chsc_sei {
290 struct chsc_header request;
291 u32 reserved1;
292 u64 ntsm; /* notification type mask */
293 struct chsc_header response;
294 u32 reserved2;
295 union {
296 struct chsc_sei_nt0_area nt0_area;
297 struct chsc_sei_nt2_area nt2_area;
298 u8 nt_area[PAGE_SIZE - 24];
299 } u;
300} __packed;
301
302static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
283{ 303{
284 struct chp_id chpid; 304 struct chp_id chpid;
285 int id; 305 int id;
@@ -298,7 +318,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
298 } 318 }
299} 319}
300 320
301static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 321static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
302{ 322{
303 struct chp_link link; 323 struct chp_link link;
304 struct chp_id chpid; 324 struct chp_id chpid;
@@ -330,7 +350,7 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
330 s390_process_res_acc(&link); 350 s390_process_res_acc(&link);
331} 351}
332 352
333static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area) 353static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
334{ 354{
335 struct channel_path *chp; 355 struct channel_path *chp;
336 struct chp_id chpid; 356 struct chp_id chpid;
@@ -366,7 +386,7 @@ struct chp_config_data {
366 u8 pc; 386 u8 pc;
367}; 387};
368 388
369static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) 389static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
370{ 390{
371 struct chp_config_data *data; 391 struct chp_config_data *data;
372 struct chp_id chpid; 392 struct chp_id chpid;
@@ -398,7 +418,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
398 } 418 }
399} 419}
400 420
401static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area) 421static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
402{ 422{
403 int ret; 423 int ret;
404 424
@@ -412,13 +432,26 @@ static void chsc_process_sei_scm_change(struct chsc_sei_area *sei_area)
412 " failed (rc=%d).\n", ret); 432 " failed (rc=%d).\n", ret);
413} 433}
414 434
415static void chsc_process_sei(struct chsc_sei_area *sei_area) 435static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
416{ 436{
417 /* Check if we might have lost some information. */ 437#ifdef CONFIG_PCI
418 if (sei_area->flags & 0x40) { 438 switch (sei_area->cc) {
419 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 439 case 1:
420 css_schedule_eval_all(); 440 zpci_event_error(sei_area->ccdf);
441 break;
442 case 2:
443 zpci_event_availability(sei_area->ccdf);
444 break;
445 default:
446 CIO_CRW_EVENT(2, "chsc: unhandled sei content code %d\n",
447 sei_area->cc);
448 break;
421 } 449 }
450#endif
451}
452
453static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
454{
422 /* which kind of information was stored? */ 455 /* which kind of information was stored? */
423 switch (sei_area->cc) { 456 switch (sei_area->cc) {
424 case 1: /* link incident*/ 457 case 1: /* link incident*/
@@ -443,9 +476,51 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
443 } 476 }
444} 477}
445 478
479static int __chsc_process_crw(struct chsc_sei *sei, u64 ntsm)
480{
481 do {
482 memset(sei, 0, sizeof(*sei));
483 sei->request.length = 0x0010;
484 sei->request.code = 0x000e;
485 sei->ntsm = ntsm;
486
487 if (chsc(sei))
488 break;
489
490 if (sei->response.code == 0x0001) {
491 CIO_CRW_EVENT(2, "chsc: sei successful\n");
492
493 /* Check if we might have lost some information. */
494 if (sei->u.nt0_area.flags & 0x40) {
495 CIO_CRW_EVENT(2, "chsc: event overflow\n");
496 css_schedule_eval_all();
497 }
498
499 switch (sei->ntsm) {
500 case CHSC_SEI_NT0:
501 chsc_process_sei_nt0(&sei->u.nt0_area);
502 return 1;
503 case CHSC_SEI_NT2:
504 chsc_process_sei_nt2(&sei->u.nt2_area);
505 return 1;
506 default:
507 CIO_CRW_EVENT(2, "chsc: unhandled nt (nt=%08Lx)\n",
508 sei->ntsm);
509 return 0;
510 }
511 } else {
512 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
513 sei->response.code);
514 break;
515 }
516 } while (sei->u.nt0_area.flags & 0x80);
517
518 return 0;
519}
520
446static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 521static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
447{ 522{
448 struct chsc_sei_area *sei_area; 523 struct chsc_sei *sei;
449 524
450 if (overflow) { 525 if (overflow) {
451 css_schedule_eval_all(); 526 css_schedule_eval_all();
@@ -459,25 +534,18 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
459 return; 534 return;
460 /* Access to sei_page is serialized through machine check handler 535 /* Access to sei_page is serialized through machine check handler
461 * thread, so no need for locking. */ 536 * thread, so no need for locking. */
462 sei_area = sei_page; 537 sei = sei_page;
463 538
464 CIO_TRACE_EVENT(2, "prcss"); 539 CIO_TRACE_EVENT(2, "prcss");
465 do {
466 memset(sei_area, 0, sizeof(*sei_area));
467 sei_area->request.length = 0x0010;
468 sei_area->request.code = 0x000e;
469 if (chsc(sei_area))
470 break;
471 540
472 if (sei_area->response.code == 0x0001) { 541 /*
473 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 542 * The ntsm does not allow to select NT0 and NT2 together. We need to
474 chsc_process_sei(sei_area); 543 * first check for NT2, than additionally for NT0...
475 } else { 544 */
476 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 545#ifdef CONFIG_PCI
477 sei_area->response.code); 546 if (!__chsc_process_crw(sei, CHSC_SEI_NT2))
478 break; 547#endif
479 } 548 __chsc_process_crw(sei, CHSC_SEI_NT0);
480 } while (sei_area->flags & 0x80);
481} 549}
482 550
483void chsc_chp_online(struct chp_id chpid) 551void chsc_chp_online(struct chp_id chpid)
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index fd3143c291c6..6995cff44636 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -2036,16 +2036,6 @@ void ccw_driver_unregister(struct ccw_driver *cdriver)
2036 driver_unregister(&cdriver->driver); 2036 driver_unregister(&cdriver->driver);
2037} 2037}
2038 2038
2039/* Helper func for qdio. */
2040struct subchannel_id
2041ccw_device_get_subchannel_id(struct ccw_device *cdev)
2042{
2043 struct subchannel *sch;
2044
2045 sch = to_subchannel(cdev->dev.parent);
2046 return sch->schid;
2047}
2048
2049static void ccw_device_todo(struct work_struct *work) 2039static void ccw_device_todo(struct work_struct *work)
2050{ 2040{
2051 struct ccw_device_private *priv; 2041 struct ccw_device_private *priv;
@@ -2138,4 +2128,3 @@ EXPORT_SYMBOL(ccw_device_set_offline);
2138EXPORT_SYMBOL(ccw_driver_register); 2128EXPORT_SYMBOL(ccw_driver_register);
2139EXPORT_SYMBOL(ccw_driver_unregister); 2129EXPORT_SYMBOL(ccw_driver_unregister);
2140EXPORT_SYMBOL(get_ccwdev_by_busid); 2130EXPORT_SYMBOL(get_ccwdev_by_busid);
2141EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 6bace6942396..2e575cff9845 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -142,9 +142,7 @@ int ccw_device_notify(struct ccw_device *, int);
142void ccw_device_set_disconnected(struct ccw_device *cdev); 142void ccw_device_set_disconnected(struct ccw_device *cdev);
143void ccw_device_set_notoper(struct ccw_device *cdev); 143void ccw_device_set_notoper(struct ccw_device *cdev);
144 144
145/* qdio needs this. */
146void ccw_device_set_timeout(struct ccw_device *, int); 145void ccw_device_set_timeout(struct ccw_device *, int);
147extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
148 146
149/* Channel measurement facility related */ 147/* Channel measurement facility related */
150void retry_set_schib(struct ccw_device *cdev); 148void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index ec7fb6d3b479..c77b6e06bf64 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -755,14 +755,18 @@ int ccw_device_tm_intrg(struct ccw_device *cdev)
755} 755}
756EXPORT_SYMBOL(ccw_device_tm_intrg); 756EXPORT_SYMBOL(ccw_device_tm_intrg);
757 757
758// FIXME: these have to go: 758/**
759 759 * ccw_device_get_schid - obtain a subchannel id
760int 760 * @cdev: device to obtain the id for
761_ccw_device_get_subchannel_number(struct ccw_device *cdev) 761 * @schid: where to fill in the values
762 */
763void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
762{ 764{
763 return cdev->private->schid.sch_no; 765 struct subchannel *sch = to_subchannel(cdev->dev.parent);
764}
765 766
767 *schid = sch->schid;
768}
769EXPORT_SYMBOL_GPL(ccw_device_get_schid);
766 770
767MODULE_LICENSE("GPL"); 771MODULE_LICENSE("GPL");
768EXPORT_SYMBOL(ccw_device_set_options_mask); 772EXPORT_SYMBOL(ccw_device_set_options_mask);
@@ -777,5 +781,4 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
777EXPORT_SYMBOL(ccw_device_start_key); 781EXPORT_SYMBOL(ccw_device_start_key);
778EXPORT_SYMBOL(ccw_device_get_ciw); 782EXPORT_SYMBOL(ccw_device_get_ciw);
779EXPORT_SYMBOL(ccw_device_get_path_mask); 783EXPORT_SYMBOL(ccw_device_get_path_mask);
780EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
781EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); 784EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 368368fe04b2..908d287f66c1 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -234,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
234 * Determine pathgroup state from PGID data. 234 * Determine pathgroup state from PGID data.
235 */ 235 */
236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
237 int *mismatch, int *reserved, u8 *reset) 237 int *mismatch, u8 *reserved, u8 *reset)
238{ 238{
239 struct pgid *pgid = &cdev->private->pgid[0]; 239 struct pgid *pgid = &cdev->private->pgid[0];
240 struct pgid *first = NULL; 240 struct pgid *first = NULL;
@@ -248,7 +248,7 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
248 if ((cdev->private->pgid_valid_mask & lpm) == 0) 248 if ((cdev->private->pgid_valid_mask & lpm) == 0)
249 continue; 249 continue;
250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) 250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
251 *reserved = 1; 251 *reserved |= lpm;
252 if (pgid_is_reset(pgid)) { 252 if (pgid_is_reset(pgid)) {
253 *reset |= lpm; 253 *reset |= lpm;
254 continue; 254 continue;
@@ -316,14 +316,14 @@ static void snid_done(struct ccw_device *cdev, int rc)
316 struct subchannel *sch = to_subchannel(cdev->dev.parent); 316 struct subchannel *sch = to_subchannel(cdev->dev.parent);
317 struct pgid *pgid; 317 struct pgid *pgid;
318 int mismatch = 0; 318 int mismatch = 0;
319 int reserved = 0; 319 u8 reserved = 0;
320 u8 reset = 0; 320 u8 reset = 0;
321 u8 donepm; 321 u8 donepm;
322 322
323 if (rc) 323 if (rc)
324 goto out; 324 goto out;
325 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset); 325 pgid_analyze(cdev, &pgid, &mismatch, &reserved, &reset);
326 if (reserved) 326 if (reserved == cdev->private->pgid_valid_mask)
327 rc = -EUSERS; 327 rc = -EUSERS;
328 else if (mismatch) 328 else if (mismatch)
329 rc = -EOPNOTSUPP; 329 rc = -EOPNOTSUPP;
@@ -336,7 +336,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
336 } 336 }
337out: 337out:
338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
339 "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid, 339 "todo=%02x mism=%d rsvd=%02x reset=%02x\n", id->ssid,
340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
341 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 341 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
342 switch (rc) { 342 switch (rc) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e06fa03ea1e4..1671d3461f29 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -129,7 +129,6 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; 129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
130 unsigned int ccq = 0; 130 unsigned int ccq = 0;
131 131
132 BUG_ON(!q->irq_ptr->sch_token);
133 qperf_inc(q, eqbs); 132 qperf_inc(q, eqbs);
134 133
135 if (!q->is_input_q) 134 if (!q->is_input_q)
@@ -147,7 +146,6 @@ again:
147 } 146 }
148 147
149 if (rc == 2) { 148 if (rc == 2) {
150 BUG_ON(tmp_count == count);
151 qperf_inc(q, eqbs_partial); 149 qperf_inc(q, eqbs_partial);
152 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", 150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
153 tmp_count); 151 tmp_count);
@@ -189,8 +187,6 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
189 187
190 if (!count) 188 if (!count)
191 return 0; 189 return 0;
192
193 BUG_ON(!q->irq_ptr->sch_token);
194 qperf_inc(q, sqbs); 190 qperf_inc(q, sqbs);
195 191
196 if (!q->is_input_q) 192 if (!q->is_input_q)
@@ -199,7 +195,7 @@ again:
199 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 195 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
200 rc = qdio_check_ccq(q, ccq); 196 rc = qdio_check_ccq(q, ccq);
201 if (!rc) { 197 if (!rc) {
202 WARN_ON(tmp_count); 198 WARN_ON_ONCE(tmp_count);
203 return count - tmp_count; 199 return count - tmp_count;
204 } 200 }
205 201
@@ -224,9 +220,6 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
224 unsigned char __state = 0; 220 unsigned char __state = 0;
225 int i; 221 int i;
226 222
227 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
228 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
229
230 if (is_qebsm(q)) 223 if (is_qebsm(q))
231 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
232 225
@@ -258,9 +251,6 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
258{ 251{
259 int i; 252 int i;
260 253
261 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
262 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
263
264 if (is_qebsm(q)) 254 if (is_qebsm(q))
265 return qdio_do_sqbs(q, state, bufnr, count); 255 return qdio_do_sqbs(q, state, bufnr, count);
266 256
@@ -345,7 +335,6 @@ again:
345 335
346 /* hipersocket busy condition */ 336 /* hipersocket busy condition */
347 if (unlikely(*busy_bit)) { 337 if (unlikely(*busy_bit)) {
348 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
349 retries++; 338 retries++;
350 339
351 if (!start_time) { 340 if (!start_time) {
@@ -559,7 +548,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
559 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 548 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
560 break; 549 break;
561 default: 550 default:
562 BUG(); 551 WARN_ON_ONCE(1);
563 } 552 }
564out: 553out:
565 return q->first_to_check; 554 return q->first_to_check;
@@ -678,12 +667,10 @@ static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
678 if (aob == NULL) 667 if (aob == NULL)
679 continue; 668 continue;
680 669
681 BUG_ON(q->u.out.sbal_state == NULL);
682 q->u.out.sbal_state[b].flags |= 670 q->u.out.sbal_state[b].flags |=
683 QDIO_OUTBUF_STATE_FLAG_PENDING; 671 QDIO_OUTBUF_STATE_FLAG_PENDING;
684 q->u.out.aobs[b] = NULL; 672 q->u.out.aobs[b] = NULL;
685 } else if (state == SLSB_P_OUTPUT_EMPTY) { 673 } else if (state == SLSB_P_OUTPUT_EMPTY) {
686 BUG_ON(q->u.out.sbal_state == NULL);
687 q->u.out.sbal_state[b].aob = NULL; 674 q->u.out.sbal_state[b].aob = NULL;
688 } 675 }
689 b = next_buf(b); 676 b = next_buf(b);
@@ -703,12 +690,11 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
703 q->aobs[bufnr] = aob; 690 q->aobs[bufnr] = aob;
704 } 691 }
705 if (q->aobs[bufnr]) { 692 if (q->aobs[bufnr]) {
706 BUG_ON(q->sbal_state == NULL);
707 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; 693 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
708 q->sbal_state[bufnr].aob = q->aobs[bufnr]; 694 q->sbal_state[bufnr].aob = q->aobs[bufnr];
709 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; 695 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
710 phys_aob = virt_to_phys(q->aobs[bufnr]); 696 phys_aob = virt_to_phys(q->aobs[bufnr]);
711 BUG_ON(phys_aob & 0xFF); 697 WARN_ON_ONCE(phys_aob & 0xFF);
712 } 698 }
713 699
714out: 700out:
@@ -809,8 +795,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
809 goto out; 795 goto out;
810 796
811 switch (state) { 797 switch (state) {
812 case SLSB_P_OUTPUT_PENDING:
813 BUG();
814 case SLSB_P_OUTPUT_EMPTY: 798 case SLSB_P_OUTPUT_EMPTY:
815 /* the adapter got it */ 799 /* the adapter got it */
816 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, 800 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
@@ -840,7 +824,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
840 case SLSB_P_OUTPUT_HALTED: 824 case SLSB_P_OUTPUT_HALTED:
841 break; 825 break;
842 default: 826 default:
843 BUG(); 827 WARN_ON_ONCE(1);
844 } 828 }
845 829
846out: 830out:
@@ -912,7 +896,7 @@ retry:
912static void __qdio_outbound_processing(struct qdio_q *q) 896static void __qdio_outbound_processing(struct qdio_q *q)
913{ 897{
914 qperf_inc(q, tasklet_outbound); 898 qperf_inc(q, tasklet_outbound);
915 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 899 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
916 900
917 if (qdio_outbound_q_moved(q)) 901 if (qdio_outbound_q_moved(q))
918 qdio_kick_handler(q); 902 qdio_kick_handler(q);
@@ -1138,16 +1122,10 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1138 irq_ptr->perf_stat.qdio_int++; 1122 irq_ptr->perf_stat.qdio_int++;
1139 1123
1140 if (IS_ERR(irb)) { 1124 if (IS_ERR(irb)) {
1141 switch (PTR_ERR(irb)) { 1125 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1142 case -EIO: 1126 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1143 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 1127 wake_up(&cdev->private->wait_q);
1144 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1128 return;
1145 wake_up(&cdev->private->wait_q);
1146 return;
1147 default:
1148 WARN_ON(1);
1149 return;
1150 }
1151 } 1129 }
1152 qdio_irq_check_sense(irq_ptr, irb); 1130 qdio_irq_check_sense(irq_ptr, irb);
1153 cstat = irb->scsw.cmd.cstat; 1131 cstat = irb->scsw.cmd.cstat;
@@ -1173,7 +1151,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1173 case QDIO_IRQ_STATE_STOPPED: 1151 case QDIO_IRQ_STATE_STOPPED:
1174 break; 1152 break;
1175 default: 1153 default:
1176 WARN_ON(1); 1154 WARN_ON_ONCE(1);
1177 } 1155 }
1178 wake_up(&cdev->private->wait_q); 1156 wake_up(&cdev->private->wait_q);
1179} 1157}
@@ -1227,7 +1205,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how)
1227 if (!irq_ptr) 1205 if (!irq_ptr)
1228 return -ENODEV; 1206 return -ENODEV;
1229 1207
1230 BUG_ON(irqs_disabled()); 1208 WARN_ON_ONCE(irqs_disabled());
1231 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1209 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1232 1210
1233 mutex_lock(&irq_ptr->setup_mutex); 1211 mutex_lock(&irq_ptr->setup_mutex);
@@ -1358,7 +1336,6 @@ int qdio_allocate(struct qdio_initialize *init_data)
1358 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1336 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1359 if (!irq_ptr->qdr) 1337 if (!irq_ptr->qdr)
1360 goto out_rel; 1338 goto out_rel;
1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1362 1339
1363 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1340 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1364 init_data->no_output_qs)) 1341 init_data->no_output_qs))
@@ -1597,9 +1574,7 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1597 1574
1598set: 1575set:
1599 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1576 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1600
1601 used = atomic_add_return(count, &q->nr_buf_used) - count; 1577 used = atomic_add_return(count, &q->nr_buf_used) - count;
1602 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1603 1578
1604 if (need_siga_in(q)) 1579 if (need_siga_in(q))
1605 return qdio_siga_input(q); 1580 return qdio_siga_input(q);
@@ -1624,7 +1599,6 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1624 1599
1625 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1600 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1626 used = atomic_add_return(count, &q->nr_buf_used); 1601 used = atomic_add_return(count, &q->nr_buf_used);
1627 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1628 1602
1629 if (used == QDIO_MAX_BUFFERS_PER_Q) 1603 if (used == QDIO_MAX_BUFFERS_PER_Q)
1630 qperf_inc(q, outbound_queue_full); 1604 qperf_inc(q, outbound_queue_full);
@@ -1678,7 +1652,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1678{ 1652{
1679 struct qdio_irq *irq_ptr; 1653 struct qdio_irq *irq_ptr;
1680 1654
1681
1682 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1655 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1683 return -EINVAL; 1656 return -EINVAL;
1684 1657
@@ -1721,8 +1694,6 @@ int qdio_start_irq(struct ccw_device *cdev, int nr)
1721 return -ENODEV; 1694 return -ENODEV;
1722 q = irq_ptr->input_qs[nr]; 1695 q = irq_ptr->input_qs[nr];
1723 1696
1724 WARN_ON(queue_irqs_enabled(q));
1725
1726 clear_nonshared_ind(irq_ptr); 1697 clear_nonshared_ind(irq_ptr);
1727 qdio_stop_polling(q); 1698 qdio_stop_polling(q);
1728 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); 1699 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
@@ -1769,7 +1740,6 @@ int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1769 if (!irq_ptr) 1740 if (!irq_ptr)
1770 return -ENODEV; 1741 return -ENODEV;
1771 q = irq_ptr->input_qs[nr]; 1742 q = irq_ptr->input_qs[nr];
1772 WARN_ON(queue_irqs_enabled(q));
1773 1743
1774 /* 1744 /*
1775 * Cannot rely on automatic sync after interrupt since queues may 1745 * Cannot rely on automatic sync after interrupt since queues may
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 6c973db14983..16ecd35b8e51 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,10 +140,8 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
140 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); 140 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
141 141
142 /* fill in sbal */ 142 /* fill in sbal */
143 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 143 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
144 q->sbal[j] = *sbals_array++; 144 q->sbal[j] = *sbals_array++;
145 BUG_ON((unsigned long)q->sbal[j] & 0xff);
146 }
147 145
148 /* fill in slib */ 146 /* fill in slib */
149 if (i > 0) { 147 if (i > 0) {
@@ -434,9 +432,8 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
434 irq_ptr->int_parm = init_data->int_parm; 432 irq_ptr->int_parm = init_data->int_parm;
435 irq_ptr->nr_input_qs = init_data->no_input_qs; 433 irq_ptr->nr_input_qs = init_data->no_input_qs;
436 irq_ptr->nr_output_qs = init_data->no_output_qs; 434 irq_ptr->nr_output_qs = init_data->no_output_qs;
437
438 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
439 irq_ptr->cdev = init_data->cdev; 435 irq_ptr->cdev = init_data->cdev;
436 ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
440 setup_queues(irq_ptr, init_data); 437 setup_queues(irq_ptr, init_data);
441 438
442 setup_qib(irq_ptr, init_data); 439 setup_qib(irq_ptr, init_data);
@@ -483,7 +480,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
483 char s[80]; 480 char s[80];
484 481
485 snprintf(s, 80, "qdio: %s %s on SC %x using " 482 snprintf(s, 80, "qdio: %s %s on SC %x using "
486 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n", 483 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
487 dev_name(&cdev->dev), 484 dev_name(&cdev->dev),
488 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 485 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
489 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 486 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 2e060088fa87..bdb394b066fc 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -73,7 +73,6 @@ static void put_indicator(u32 *addr)
73void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) 73void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
74{ 74{
75 mutex_lock(&tiq_list_lock); 75 mutex_lock(&tiq_list_lock);
76 BUG_ON(irq_ptr->nr_input_qs < 1);
77 list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); 76 list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
78 mutex_unlock(&tiq_list_lock); 77 mutex_unlock(&tiq_list_lock);
79 xchg(irq_ptr->dsci, 1 << 7); 78 xchg(irq_ptr->dsci, 1 << 7);
@@ -83,7 +82,6 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
83{ 82{
84 struct qdio_q *q; 83 struct qdio_q *q;
85 84
86 BUG_ON(irq_ptr->nr_input_qs < 1);
87 q = irq_ptr->input_qs[0]; 85 q = irq_ptr->input_qs[0];
88 /* if establish triggered an error */ 86 /* if establish triggered an error */
89 if (!q || !q->entry.prev || !q->entry.next) 87 if (!q || !q->entry.prev || !q->entry.next)
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 035b6dc31b71..7c522f338bda 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -241,84 +241,70 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_device *zdev,
241 struct ap_message *ap_msg, 241 struct ap_message *ap_msg,
242 struct ica_rsa_modexpo_crt *crt) 242 struct ica_rsa_modexpo_crt *crt)
243{ 243{
244 int mod_len, short_len, long_len, long_offset, limit; 244 int mod_len, short_len;
245 unsigned char *p, *q, *dp, *dq, *u, *inp; 245 unsigned char *p, *q, *dp, *dq, *u, *inp;
246 246
247 mod_len = crt->inputdatalength; 247 mod_len = crt->inputdatalength;
248 short_len = mod_len / 2; 248 short_len = mod_len / 2;
249 long_len = mod_len / 2 + 8;
250 249
251 /* 250 /*
252 * CEX2A cannot handle p, dp, or U > 128 bytes. 251 * CEX2A and CEX3A w/o FW update can handle requests up to
253 * If we have one of these, we need to do extra checking. 252 * 256 byte modulus (2k keys).
254 * For CEX3A the limit is 256 bytes. 253 * CEX3A with FW update and CEX4A cards are able to handle
254 * 512 byte modulus (4k keys).
255 */ 255 */
256 if (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE) 256 if (mod_len <= 128) { /* up to 1024 bit key size */
257 limit = 256;
258 else
259 limit = 128;
260
261 if (long_len > limit) {
262 /*
263 * zcrypt_rsa_crt already checked for the leading
264 * zeroes of np_prime, bp_key and u_mult_inc.
265 */
266 long_offset = long_len - limit;
267 long_len = limit;
268 } else
269 long_offset = 0;
270
271 /*
272 * Instead of doing extra work for p, dp, U > 64 bytes, we'll just use
273 * the larger message structure.
274 */
275 if (long_len <= 64) {
276 struct type50_crb1_msg *crb1 = ap_msg->message; 257 struct type50_crb1_msg *crb1 = ap_msg->message;
277 memset(crb1, 0, sizeof(*crb1)); 258 memset(crb1, 0, sizeof(*crb1));
278 ap_msg->length = sizeof(*crb1); 259 ap_msg->length = sizeof(*crb1);
279 crb1->header.msg_type_code = TYPE50_TYPE_CODE; 260 crb1->header.msg_type_code = TYPE50_TYPE_CODE;
280 crb1->header.msg_len = sizeof(*crb1); 261 crb1->header.msg_len = sizeof(*crb1);
281 crb1->keyblock_type = TYPE50_CRB1_FMT; 262 crb1->keyblock_type = TYPE50_CRB1_FMT;
282 p = crb1->p + sizeof(crb1->p) - long_len; 263 p = crb1->p + sizeof(crb1->p) - short_len;
283 q = crb1->q + sizeof(crb1->q) - short_len; 264 q = crb1->q + sizeof(crb1->q) - short_len;
284 dp = crb1->dp + sizeof(crb1->dp) - long_len; 265 dp = crb1->dp + sizeof(crb1->dp) - short_len;
285 dq = crb1->dq + sizeof(crb1->dq) - short_len; 266 dq = crb1->dq + sizeof(crb1->dq) - short_len;
286 u = crb1->u + sizeof(crb1->u) - long_len; 267 u = crb1->u + sizeof(crb1->u) - short_len;
287 inp = crb1->message + sizeof(crb1->message) - mod_len; 268 inp = crb1->message + sizeof(crb1->message) - mod_len;
288 } else if (long_len <= 128) { 269 } else if (mod_len <= 256) { /* up to 2048 bit key size */
289 struct type50_crb2_msg *crb2 = ap_msg->message; 270 struct type50_crb2_msg *crb2 = ap_msg->message;
290 memset(crb2, 0, sizeof(*crb2)); 271 memset(crb2, 0, sizeof(*crb2));
291 ap_msg->length = sizeof(*crb2); 272 ap_msg->length = sizeof(*crb2);
292 crb2->header.msg_type_code = TYPE50_TYPE_CODE; 273 crb2->header.msg_type_code = TYPE50_TYPE_CODE;
293 crb2->header.msg_len = sizeof(*crb2); 274 crb2->header.msg_len = sizeof(*crb2);
294 crb2->keyblock_type = TYPE50_CRB2_FMT; 275 crb2->keyblock_type = TYPE50_CRB2_FMT;
295 p = crb2->p + sizeof(crb2->p) - long_len; 276 p = crb2->p + sizeof(crb2->p) - short_len;
296 q = crb2->q + sizeof(crb2->q) - short_len; 277 q = crb2->q + sizeof(crb2->q) - short_len;
297 dp = crb2->dp + sizeof(crb2->dp) - long_len; 278 dp = crb2->dp + sizeof(crb2->dp) - short_len;
298 dq = crb2->dq + sizeof(crb2->dq) - short_len; 279 dq = crb2->dq + sizeof(crb2->dq) - short_len;
299 u = crb2->u + sizeof(crb2->u) - long_len; 280 u = crb2->u + sizeof(crb2->u) - short_len;
300 inp = crb2->message + sizeof(crb2->message) - mod_len; 281 inp = crb2->message + sizeof(crb2->message) - mod_len;
301 } else { 282 } else if ((mod_len <= 512) && /* up to 4096 bit key size */
302 /* long_len >= 256 */ 283 (zdev->max_mod_size == CEX3A_MAX_MOD_SIZE)) { /* >= CEX3A */
303 struct type50_crb3_msg *crb3 = ap_msg->message; 284 struct type50_crb3_msg *crb3 = ap_msg->message;
304 memset(crb3, 0, sizeof(*crb3)); 285 memset(crb3, 0, sizeof(*crb3));
305 ap_msg->length = sizeof(*crb3); 286 ap_msg->length = sizeof(*crb3);
306 crb3->header.msg_type_code = TYPE50_TYPE_CODE; 287 crb3->header.msg_type_code = TYPE50_TYPE_CODE;
307 crb3->header.msg_len = sizeof(*crb3); 288 crb3->header.msg_len = sizeof(*crb3);
308 crb3->keyblock_type = TYPE50_CRB3_FMT; 289 crb3->keyblock_type = TYPE50_CRB3_FMT;
309 p = crb3->p + sizeof(crb3->p) - long_len; 290 p = crb3->p + sizeof(crb3->p) - short_len;
310 q = crb3->q + sizeof(crb3->q) - short_len; 291 q = crb3->q + sizeof(crb3->q) - short_len;
311 dp = crb3->dp + sizeof(crb3->dp) - long_len; 292 dp = crb3->dp + sizeof(crb3->dp) - short_len;
312 dq = crb3->dq + sizeof(crb3->dq) - short_len; 293 dq = crb3->dq + sizeof(crb3->dq) - short_len;
313 u = crb3->u + sizeof(crb3->u) - long_len; 294 u = crb3->u + sizeof(crb3->u) - short_len;
314 inp = crb3->message + sizeof(crb3->message) - mod_len; 295 inp = crb3->message + sizeof(crb3->message) - mod_len;
315 } 296 } else
297 return -EINVAL;
316 298
317 if (copy_from_user(p, crt->np_prime + long_offset, long_len) || 299 /*
300 * correct the offset of p, bp and mult_inv according zcrypt.h
301 * block size right aligned (skip the first byte)
302 */
303 if (copy_from_user(p, crt->np_prime + MSGTYPE_ADJUSTMENT, short_len) ||
318 copy_from_user(q, crt->nq_prime, short_len) || 304 copy_from_user(q, crt->nq_prime, short_len) ||
319 copy_from_user(dp, crt->bp_key + long_offset, long_len) || 305 copy_from_user(dp, crt->bp_key + MSGTYPE_ADJUSTMENT, short_len) ||
320 copy_from_user(dq, crt->bq_key, short_len) || 306 copy_from_user(dq, crt->bq_key, short_len) ||
321 copy_from_user(u, crt->u_mult_inv + long_offset, long_len) || 307 copy_from_user(u, crt->u_mult_inv + MSGTYPE_ADJUSTMENT, short_len) ||
322 copy_from_user(inp, crt->inputdata, mod_len)) 308 copy_from_user(inp, crt->inputdata, mod_len))
323 return -EFAULT; 309 return -EFAULT;
324 310
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index e56dc72c7733..0a66e4aeeb50 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -33,6 +33,8 @@
33#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/ 33#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /*sizeof(struct type50_crb2_msg)*/
34#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/ 34#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /*sizeof(struct type50_crb3_msg)*/
35 35
36#define MSGTYPE_ADJUSTMENT 0x08 /*type04 extension (not needed in type50)*/
37
36int zcrypt_msgtype50_init(void); 38int zcrypt_msgtype50_init(void);
37void zcrypt_msgtype50_exit(void); 39void zcrypt_msgtype50_exit(void);
38 40
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 448303bdb85f..9e0ebe051243 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -83,19 +83,25 @@ static inline void __raw_writel(u32 b, volatile void __iomem *addr)
83#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) 83#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
84 84
85#ifdef CONFIG_64BIT 85#ifdef CONFIG_64BIT
86#ifndef __raw_readq
86static inline u64 __raw_readq(const volatile void __iomem *addr) 87static inline u64 __raw_readq(const volatile void __iomem *addr)
87{ 88{
88 return *(const volatile u64 __force *) addr; 89 return *(const volatile u64 __force *) addr;
89} 90}
91#endif
92
90#define readq(addr) __le64_to_cpu(__raw_readq(addr)) 93#define readq(addr) __le64_to_cpu(__raw_readq(addr))
91 94
95#ifndef __raw_writeq
92static inline void __raw_writeq(u64 b, volatile void __iomem *addr) 96static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
93{ 97{
94 *(volatile u64 __force *) addr = b; 98 *(volatile u64 __force *) addr = b;
95} 99}
96#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
97#endif 100#endif
98 101
102#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
103#endif /* CONFIG_64BIT */
104
99#ifndef PCI_IOBASE 105#ifndef PCI_IOBASE
100#define PCI_IOBASE ((void __iomem *) 0) 106#define PCI_IOBASE ((void __iomem *) 0)
101#endif 107#endif
@@ -286,15 +292,20 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
286 292
287#ifndef CONFIG_GENERIC_IOMAP 293#ifndef CONFIG_GENERIC_IOMAP
288struct pci_dev; 294struct pci_dev;
295extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
296
297#ifndef pci_iounmap
289static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 298static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
290{ 299{
291} 300}
301#endif
292#endif /* CONFIG_GENERIC_IOMAP */ 302#endif /* CONFIG_GENERIC_IOMAP */
293 303
294/* 304/*
295 * Change virtual addresses to physical addresses and vv. 305 * Change virtual addresses to physical addresses and vv.
296 * These are pretty trivial 306 * These are pretty trivial
297 */ 307 */
308#ifndef virt_to_phys
298static inline unsigned long virt_to_phys(volatile void *address) 309static inline unsigned long virt_to_phys(volatile void *address)
299{ 310{
300 return __pa((unsigned long)address); 311 return __pa((unsigned long)address);
@@ -304,6 +315,7 @@ static inline void *phys_to_virt(unsigned long address)
304{ 315{
305 return __va(address); 316 return __va(address);
306} 317}
318#endif
307 319
308/* 320/*
309 * Change "struct page" to physical address. 321 * Change "struct page" to physical address.
@@ -363,9 +375,16 @@ static inline void *bus_to_virt(unsigned long address)
363} 375}
364#endif 376#endif
365 377
378#ifndef memset_io
366#define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) 379#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
380#endif
381
382#ifndef memcpy_fromio
367#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) 383#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
384#endif
385#ifndef memcpy_toio
368#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) 386#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
387#endif
369 388
370#endif /* __KERNEL__ */ 389#endif /* __KERNEL__ */
371 390
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 526f10a637c1..fdf2c4a238cc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -10,9 +10,6 @@
10 */ 10 */
11 11
12#include <linux/smp.h> 12#include <linux/smp.h>
13
14#ifndef CONFIG_S390
15
16#include <linux/linkage.h> 13#include <linux/linkage.h>
17#include <linux/cache.h> 14#include <linux/cache.h>
18#include <linux/spinlock.h> 15#include <linux/spinlock.h>
@@ -746,8 +743,11 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
746static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } 743static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
747#endif 744#endif
748 745
749#endif /* CONFIG_GENERIC_HARDIRQS */ 746#else /* !CONFIG_GENERIC_HARDIRQS */
750 747
751#endif /* !CONFIG_S390 */ 748extern struct msi_desc *irq_get_msi_desc(unsigned int irq);
749extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
750
751#endif /* CONFIG_GENERIC_HARDIRQS */
752 752
753#endif /* _LINUX_IRQ_H */ 753#endif /* _LINUX_IRQ_H */