aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-13 12:43:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-13 12:43:20 -0400
commit6c21e4334adaf1ea0f74349be01adddf40e36a27 (patch)
tree2fe5b781780664caaf44d1e4893662d91e725837 /drivers
parent16e205cf42da1f497b10a4a24f563e6c0d574eec (diff)
parent6a3d1e81a434fc311f224b8be77258bafc18ccc6 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Martin Schwidefsky: "Three notable larger changes next to the usual bug fixing: - update the email addresses in MAINTAINERS for the s390 folks to use the simpler linux.ibm.com domain instead of the old linux.vnet.ibm.com - an update for the zcrypt device driver that removes some old and obsolete interfaces and add support for up to 256 crypto adapters - a rework of the IPL aka boot code" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits) s390: correct nospec auto detection init order s390/zcrypt: Support up to 256 crypto adapters. s390/zcrypt: Remove deprecated zcrypt proc interface. s390/zcrypt: Remove deprecated ioctls. s390/zcrypt: Make ap init functions static. MAINTAINERS: update s390 maintainers email addresses s390/ipl: remove reipl_method and dump_method s390/ipl: correct kdump reipl block checksum calculation s390/ipl: remove non-existing functions declaration s390: assume diag308 set always works s390/ipl: avoid adding scpdata to cmdline during ftp/dvd boot s390/ipl: correct ipl parmblock valid checks s390/ipl: rely on diag308 store to get ipl info s390/ipl: move ipl_flags to ipl.c s390/ipl: get rid of ipl_ssid and ipl_devno s390/ipl: unite diag308 and scsi boot ipl blocks s390/ipl: ensure loadparm valid flag is set s390/qdio: lock device while installing IRQ handler s390/qdio: clear intparm during shutdown s390/ccwgroup: require at least one ccw device ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/s390/cio/ccwgroup.c5
-rw-r--r--drivers/s390/cio/cio.c257
-rw-r--r--drivers/s390/cio/ioasm.c24
-rw-r--r--drivers/s390/cio/ioasm.h1
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c32
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_debug.h3
-rw-r--r--drivers/s390/crypto/pkey_api.c41
-rw-r--r--drivers/s390/crypto/zcrypt_api.c471
-rw-r--r--drivers/s390/crypto/zcrypt_api.h26
12 files changed, 166 insertions, 705 deletions
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index bfec1485ca23..5535312602af 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -323,6 +323,9 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
323 struct ccw_dev_id dev_id; 323 struct ccw_dev_id dev_id;
324 int rc, i; 324 int rc, i;
325 325
326 if (num_devices < 1)
327 return -EINVAL;
328
326 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), 329 gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
327 GFP_KERNEL); 330 GFP_KERNEL);
328 if (!gdev) 331 if (!gdev)
@@ -375,7 +378,7 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
375 goto error; 378 goto error;
376 } 379 }
377 /* Check if the devices are bound to the required ccw driver. */ 380 /* Check if the devices are bound to the required ccw driver. */
378 if (gdev->count && gdrv && gdrv->ccw_driver && 381 if (gdrv && gdrv->ccw_driver &&
379 gdev->cdev[0]->drv != gdrv->ccw_driver) { 382 gdev->cdev[0]->drv != gdrv->ccw_driver) {
380 rc = -EINVAL; 383 rc = -EINVAL;
381 goto error; 384 goto error;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 6886b3d34cf8..5130d7c67239 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -25,7 +25,6 @@
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
27#include <asm/setup.h> 27#include <asm/setup.h>
28#include <asm/reset.h>
29#include <asm/ipl.h> 28#include <asm/ipl.h>
30#include <asm/chpid.h> 29#include <asm/chpid.h>
31#include <asm/airq.h> 30#include <asm/airq.h>
@@ -767,262 +766,6 @@ void cio_register_early_subchannels(void)
767} 766}
768#endif /* CONFIG_CCW_CONSOLE */ 767#endif /* CONFIG_CCW_CONSOLE */
769 768
770static int
771__disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
772{
773 int retry, cc;
774
775 cc = 0;
776 for (retry=0;retry<3;retry++) {
777 schib->pmcw.ena = 0;
778 cc = msch(schid, schib);
779 if (cc)
780 return (cc==3?-ENODEV:-EBUSY);
781 if (stsch(schid, schib) || !css_sch_is_valid(schib))
782 return -ENODEV;
783 if (!schib->pmcw.ena)
784 return 0;
785 }
786 return -EBUSY; /* uhm... */
787}
788
789static int
790__clear_io_subchannel_easy(struct subchannel_id schid)
791{
792 int retry;
793
794 if (csch(schid))
795 return -ENODEV;
796 for (retry=0;retry<20;retry++) {
797 struct tpi_info ti;
798
799 if (tpi(&ti)) {
800 tsch(ti.schid, this_cpu_ptr(&cio_irb));
801 if (schid_equal(&ti.schid, &schid))
802 return 0;
803 }
804 udelay_simple(100);
805 }
806 return -EBUSY;
807}
808
809static void __clear_chsc_subchannel_easy(void)
810{
811 /* It seems we can only wait for a bit here :/ */
812 udelay_simple(100);
813}
814
815static int pgm_check_occured;
816
817static void cio_reset_pgm_check_handler(void)
818{
819 pgm_check_occured = 1;
820}
821
822static int stsch_reset(struct subchannel_id schid, struct schib *addr)
823{
824 int rc;
825
826 pgm_check_occured = 0;
827 s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
828 rc = stsch(schid, addr);
829 s390_base_pgm_handler_fn = NULL;
830
831 /* The program check handler could have changed pgm_check_occured. */
832 barrier();
833
834 if (pgm_check_occured)
835 return -EIO;
836 else
837 return rc;
838}
839
840static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
841{
842 struct schib schib;
843
844 if (stsch_reset(schid, &schib))
845 return -ENXIO;
846 if (!schib.pmcw.ena)
847 return 0;
848 switch(__disable_subchannel_easy(schid, &schib)) {
849 case 0:
850 case -ENODEV:
851 break;
852 default: /* -EBUSY */
853 switch (schib.pmcw.st) {
854 case SUBCHANNEL_TYPE_IO:
855 if (__clear_io_subchannel_easy(schid))
856 goto out; /* give up... */
857 break;
858 case SUBCHANNEL_TYPE_CHSC:
859 __clear_chsc_subchannel_easy();
860 break;
861 default:
862 /* No default clear strategy */
863 break;
864 }
865 stsch(schid, &schib);
866 __disable_subchannel_easy(schid, &schib);
867 }
868out:
869 return 0;
870}
871
872static atomic_t chpid_reset_count;
873
874static void s390_reset_chpids_mcck_handler(void)
875{
876 struct crw crw;
877 union mci mci;
878
879 /* Check for pending channel report word. */
880 mci.val = S390_lowcore.mcck_interruption_code;
881 if (!mci.cp)
882 return;
883 /* Process channel report words. */
884 while (stcrw(&crw) == 0) {
885 /* Check for responses to RCHP. */
886 if (crw.slct && crw.rsc == CRW_RSC_CPATH)
887 atomic_dec(&chpid_reset_count);
888 }
889}
890
891#define RCHP_TIMEOUT (30 * USEC_PER_SEC)
892static void css_reset(void)
893{
894 int i, ret;
895 unsigned long long timeout;
896 struct chp_id chpid;
897
898 /* Reset subchannels. */
899 for_each_subchannel(__shutdown_subchannel_easy, NULL);
900 /* Reset channel paths. */
901 s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
902 /* Enable channel report machine checks. */
903 __ctl_set_bit(14, 28);
904 /* Temporarily reenable machine checks. */
905 local_mcck_enable();
906 chp_id_init(&chpid);
907 for (i = 0; i <= __MAX_CHPID; i++) {
908 chpid.id = i;
909 ret = rchp(chpid);
910 if ((ret == 0) || (ret == 2))
911 /*
912 * rchp either succeeded, or another rchp is already
913 * in progress. In either case, we'll get a crw.
914 */
915 atomic_inc(&chpid_reset_count);
916 }
917 /* Wait for machine check for all channel paths. */
918 timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
919 while (atomic_read(&chpid_reset_count) != 0) {
920 if (get_tod_clock_fast() > timeout)
921 break;
922 cpu_relax();
923 }
924 /* Disable machine checks again. */
925 local_mcck_disable();
926 /* Disable channel report machine checks. */
927 __ctl_clear_bit(14, 28);
928 s390_base_mcck_handler_fn = NULL;
929}
930
931static struct reset_call css_reset_call = {
932 .fn = css_reset,
933};
934
935static int __init init_css_reset_call(void)
936{
937 atomic_set(&chpid_reset_count, 0);
938 register_reset_call(&css_reset_call);
939 return 0;
940}
941
942arch_initcall(init_css_reset_call);
943
944struct sch_match_id {
945 struct subchannel_id schid;
946 struct ccw_dev_id devid;
947 int rc;
948};
949
950static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
951{
952 struct schib schib;
953 struct sch_match_id *match_id = data;
954
955 if (stsch_reset(schid, &schib))
956 return -ENXIO;
957 if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
958 (schib.pmcw.dev == match_id->devid.devno) &&
959 (schid.ssid == match_id->devid.ssid)) {
960 match_id->schid = schid;
961 match_id->rc = 0;
962 return 1;
963 }
964 return 0;
965}
966
967static int reipl_find_schid(struct ccw_dev_id *devid,
968 struct subchannel_id *schid)
969{
970 struct sch_match_id match_id;
971
972 match_id.devid = *devid;
973 match_id.rc = -ENODEV;
974 for_each_subchannel(__reipl_subchannel_match, &match_id);
975 if (match_id.rc == 0)
976 *schid = match_id.schid;
977 return match_id.rc;
978}
979
980extern void do_reipl_asm(__u32 schid);
981
982/* Make sure all subchannels are quiet before we re-ipl an lpar. */
983void reipl_ccw_dev(struct ccw_dev_id *devid)
984{
985 struct subchannel_id uninitialized_var(schid);
986
987 s390_reset_system();
988 if (reipl_find_schid(devid, &schid) != 0)
989 panic("IPL Device not found\n");
990 do_reipl_asm(*((__u32*)&schid));
991}
992
993int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
994{
995 static struct chsc_sda_area sda_area __initdata;
996 struct subchannel_id schid;
997 struct schib schib;
998
999 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
1000 if (!schid.one)
1001 return -ENODEV;
1002
1003 if (schid.ssid) {
1004 /*
1005 * Firmware should have already enabled MSS but whoever started
1006 * the kernel might have initiated a channel subsystem reset.
1007 * Ensure that MSS is enabled.
1008 */
1009 memset(&sda_area, 0, sizeof(sda_area));
1010 if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
1011 return -ENODEV;
1012 }
1013 if (stsch(schid, &schib))
1014 return -ENODEV;
1015 if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1016 return -ENODEV;
1017 if (!schib.pmcw.dnv)
1018 return -ENODEV;
1019
1020 iplinfo->ssid = schid.ssid;
1021 iplinfo->devno = schib.pmcw.dev;
1022 iplinfo->is_qdio = schib.pmcw.qf;
1023 return 0;
1024}
1025
1026/** 769/**
1027 * cio_tm_start_key - perform start function 770 * cio_tm_start_key - perform start function
1028 * @sch: subchannel on which to perform the start function 771 * @sch: subchannel on which to perform the start function
diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
index 4fa9ee1d09fa..14d328338ce2 100644
--- a/drivers/s390/cio/ioasm.c
+++ b/drivers/s390/cio/ioasm.c
@@ -183,30 +183,6 @@ int chsc(void *chsc_area)
183} 183}
184EXPORT_SYMBOL(chsc); 184EXPORT_SYMBOL(chsc);
185 185
186static inline int __rchp(struct chp_id chpid)
187{
188 register struct chp_id reg1 asm ("1") = chpid;
189 int ccode;
190
191 asm volatile(
192 " lr 1,%1\n"
193 " rchp\n"
194 " ipm %0\n"
195 " srl %0,28"
196 : "=d" (ccode) : "d" (reg1) : "cc");
197 return ccode;
198}
199
200int rchp(struct chp_id chpid)
201{
202 int ccode;
203
204 ccode = __rchp(chpid);
205 trace_s390_cio_rchp(chpid, ccode);
206
207 return ccode;
208}
209
210static inline int __rsch(struct subchannel_id schid) 186static inline int __rsch(struct subchannel_id schid)
211{ 187{
212 register struct subchannel_id reg1 asm("1") = schid; 188 register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index 35ad4ddd61e0..4be539cb9adc 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -20,7 +20,6 @@ int ssch(struct subchannel_id schid, union orb *addr);
20int csch(struct subchannel_id schid); 20int csch(struct subchannel_id schid);
21int tpi(struct tpi_info *addr); 21int tpi(struct tpi_info *addr);
22int chsc(void *chsc_area); 22int chsc(void *chsc_area);
23int rchp(struct chp_id chpid);
24int rsch(struct subchannel_id schid); 23int rsch(struct subchannel_id schid);
25int hsch(struct subchannel_id schid); 24int hsch(struct subchannel_id schid);
26int xsch(struct subchannel_id schid); 25int xsch(struct subchannel_id schid);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index a337281337a7..f4ca72dd862f 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1207,8 +1207,10 @@ no_cleanup:
1207 qdio_shutdown_thinint(irq_ptr); 1207 qdio_shutdown_thinint(irq_ptr);
1208 1208
1209 /* restore interrupt handler */ 1209 /* restore interrupt handler */
1210 if ((void *)cdev->handler == (void *)qdio_int_handler) 1210 if ((void *)cdev->handler == (void *)qdio_int_handler) {
1211 cdev->handler = irq_ptr->orig_handler; 1211 cdev->handler = irq_ptr->orig_handler;
1212 cdev->private->intparm = 0;
1213 }
1212 spin_unlock_irq(get_ccwdev_lock(cdev)); 1214 spin_unlock_irq(get_ccwdev_lock(cdev));
1213 1215
1214 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1216 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 98f3cfdc0d02..439991d71b14 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -507,8 +507,10 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
507 irq_ptr->aqueue = *ciw; 507 irq_ptr->aqueue = *ciw;
508 508
509 /* set new interrupt handler */ 509 /* set new interrupt handler */
510 spin_lock_irq(get_ccwdev_lock(irq_ptr->cdev));
510 irq_ptr->orig_handler = init_data->cdev->handler; 511 irq_ptr->orig_handler = init_data->cdev->handler;
511 init_data->cdev->handler = qdio_int_handler; 512 init_data->cdev->handler = qdio_int_handler;
513 spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
512 return 0; 514 return 0;
513out_err: 515out_err:
514 qdio_release_memory(irq_ptr); 516 qdio_release_memory(irq_ptr);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48d55dc9e986..35a0c2b52f82 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -25,7 +25,6 @@
25#include <linux/kthread.h> 25#include <linux/kthread.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/suspend.h> 27#include <linux/suspend.h>
28#include <asm/reset.h>
29#include <asm/airq.h> 28#include <asm/airq.h>
30#include <linux/atomic.h> 29#include <linux/atomic.h>
31#include <asm/isc.h> 30#include <asm/isc.h>
@@ -1197,26 +1196,7 @@ static void ap_config_timeout(struct timer_list *unused)
1197 queue_work(system_long_wq, &ap_scan_work); 1196 queue_work(system_long_wq, &ap_scan_work);
1198} 1197}
1199 1198
1200static void ap_reset_all(void) 1199static int __init ap_debug_init(void)
1201{
1202 int i, j;
1203
1204 for (i = 0; i < AP_DOMAINS; i++) {
1205 if (!ap_test_config_domain(i))
1206 continue;
1207 for (j = 0; j < AP_DEVICES; j++) {
1208 if (!ap_test_config_card_id(j))
1209 continue;
1210 ap_rapq(AP_MKQID(j, i));
1211 }
1212 }
1213}
1214
1215static struct reset_call ap_reset_call = {
1216 .fn = ap_reset_all,
1217};
1218
1219int __init ap_debug_init(void)
1220{ 1200{
1221 ap_dbf_info = debug_register("ap", 1, 1, 1201 ap_dbf_info = debug_register("ap", 1, 1,
1222 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1202 DBF_MAX_SPRINTF_ARGS * sizeof(long));
@@ -1226,17 +1206,12 @@ int __init ap_debug_init(void)
1226 return 0; 1206 return 0;
1227} 1207}
1228 1208
1229void ap_debug_exit(void)
1230{
1231 debug_unregister(ap_dbf_info);
1232}
1233
1234/** 1209/**
1235 * ap_module_init(): The module initialization code. 1210 * ap_module_init(): The module initialization code.
1236 * 1211 *
1237 * Initializes the module. 1212 * Initializes the module.
1238 */ 1213 */
1239int __init ap_module_init(void) 1214static int __init ap_module_init(void)
1240{ 1215{
1241 int max_domain_id; 1216 int max_domain_id;
1242 int rc, i; 1217 int rc, i;
@@ -1274,8 +1249,6 @@ int __init ap_module_init(void)
1274 ap_airq_flag = (rc == 0); 1249 ap_airq_flag = (rc == 0);
1275 } 1250 }
1276 1251
1277 register_reset_call(&ap_reset_call);
1278
1279 /* Create /sys/bus/ap. */ 1252 /* Create /sys/bus/ap. */
1280 rc = bus_register(&ap_bus_type); 1253 rc = bus_register(&ap_bus_type);
1281 if (rc) 1254 if (rc)
@@ -1331,7 +1304,6 @@ out_bus:
1331 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1304 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1332 bus_unregister(&ap_bus_type); 1305 bus_unregister(&ap_bus_type);
1333out: 1306out:
1334 unregister_reset_call(&ap_reset_call);
1335 if (ap_using_interrupts()) 1307 if (ap_using_interrupts())
1336 unregister_adapter_interrupt(&ap_airq); 1308 unregister_adapter_interrupt(&ap_airq);
1337 kfree(ap_configuration); 1309 kfree(ap_configuration);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index e0827eaa42f1..02184cf35834 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -17,7 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/ap.h> 18#include <asm/ap.h>
19 19
20#define AP_DEVICES 64 /* Number of AP devices. */ 20#define AP_DEVICES 256 /* Number of AP devices. */
21#define AP_DOMAINS 256 /* Number of AP domains. */ 21#define AP_DOMAINS 256 /* Number of AP domains. */
22#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */ 22#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
23#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */ 23#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
@@ -240,7 +240,4 @@ void ap_queue_resume(struct ap_device *ap_dev);
240struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, 240struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
241 int comp_device_type, unsigned int functions); 241 int comp_device_type, unsigned int functions);
242 242
243int ap_module_init(void);
244void ap_module_exit(void);
245
246#endif /* _AP_BUS_H_ */ 243#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index 6a9d77c75ec3..dc675eb5aef6 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -23,7 +23,4 @@
23 23
24extern debug_info_t *ap_dbf_info; 24extern debug_info_t *ap_dbf_info;
25 25
26int ap_debug_init(void);
27void ap_debug_exit(void);
28
29#endif /* AP_DEBUG_H */ 26#endif /* AP_DEBUG_H */
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index e7c2e4f9529a..ed80d00cdb6f 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -889,7 +889,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
889 u16 *pcardnr, u16 *pdomain, int verify) 889 u16 *pcardnr, u16 *pdomain, int verify)
890{ 890{
891 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; 891 struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
892 struct zcrypt_device_matrix *device_matrix; 892 struct zcrypt_device_status_ext *device_status;
893 u16 card, dom; 893 u16 card, dom;
894 u64 mkvp[2]; 894 u64 mkvp[2];
895 int i, rc, oi = -1; 895 int i, rc, oi = -1;
@@ -899,18 +899,19 @@ int pkey_findcard(const struct pkey_seckey *seckey,
899 return -EINVAL; 899 return -EINVAL;
900 900
901 /* fetch status of all crypto cards */ 901 /* fetch status of all crypto cards */
902 device_matrix = kmalloc(sizeof(struct zcrypt_device_matrix), 902 device_status = kmalloc(MAX_ZDEV_ENTRIES_EXT
903 * sizeof(struct zcrypt_device_status_ext),
903 GFP_KERNEL); 904 GFP_KERNEL);
904 if (!device_matrix) 905 if (!device_status)
905 return -ENOMEM; 906 return -ENOMEM;
906 zcrypt_device_status_mask(device_matrix); 907 zcrypt_device_status_mask_ext(device_status);
907 908
908 /* walk through all crypto cards */ 909 /* walk through all crypto cards */
909 for (i = 0; i < MAX_ZDEV_ENTRIES; i++) { 910 for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
910 card = AP_QID_CARD(device_matrix->device[i].qid); 911 card = AP_QID_CARD(device_status[i].qid);
911 dom = AP_QID_QUEUE(device_matrix->device[i].qid); 912 dom = AP_QID_QUEUE(device_status[i].qid);
912 if (device_matrix->device[i].online && 913 if (device_status[i].online &&
913 device_matrix->device[i].functions & 0x04) { 914 device_status[i].functions & 0x04) {
914 /* an enabled CCA Coprocessor card */ 915 /* an enabled CCA Coprocessor card */
915 /* try cached mkvp */ 916 /* try cached mkvp */
916 if (mkvp_cache_fetch(card, dom, mkvp) == 0 && 917 if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
@@ -930,14 +931,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
930 mkvp_cache_scrub(card, dom); 931 mkvp_cache_scrub(card, dom);
931 } 932 }
932 } 933 }
933 if (i >= MAX_ZDEV_ENTRIES) { 934 if (i >= MAX_ZDEV_ENTRIES_EXT) {
934 /* nothing found, so this time without cache */ 935 /* nothing found, so this time without cache */
935 for (i = 0; i < MAX_ZDEV_ENTRIES; i++) { 936 for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
936 if (!(device_matrix->device[i].online && 937 if (!(device_status[i].online &&
937 device_matrix->device[i].functions & 0x04)) 938 device_status[i].functions & 0x04))
938 continue; 939 continue;
939 card = AP_QID_CARD(device_matrix->device[i].qid); 940 card = AP_QID_CARD(device_status[i].qid);
940 dom = AP_QID_QUEUE(device_matrix->device[i].qid); 941 dom = AP_QID_QUEUE(device_status[i].qid);
941 /* fresh fetch mkvp from adapter */ 942 /* fresh fetch mkvp from adapter */
942 if (fetch_mkvp(card, dom, mkvp) == 0) { 943 if (fetch_mkvp(card, dom, mkvp) == 0) {
943 mkvp_cache_update(card, dom, mkvp); 944 mkvp_cache_update(card, dom, mkvp);
@@ -947,13 +948,13 @@ int pkey_findcard(const struct pkey_seckey *seckey,
947 oi = i; 948 oi = i;
948 } 949 }
949 } 950 }
950 if (i >= MAX_ZDEV_ENTRIES && oi >= 0) { 951 if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
951 /* old mkvp matched, use this card then */ 952 /* old mkvp matched, use this card then */
952 card = AP_QID_CARD(device_matrix->device[oi].qid); 953 card = AP_QID_CARD(device_status[oi].qid);
953 dom = AP_QID_QUEUE(device_matrix->device[oi].qid); 954 dom = AP_QID_QUEUE(device_status[oi].qid);
954 } 955 }
955 } 956 }
956 if (i < MAX_ZDEV_ENTRIES || oi >= 0) { 957 if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
957 if (pcardnr) 958 if (pcardnr)
958 *pcardnr = card; 959 *pcardnr = card;
959 if (pdomain) 960 if (pdomain)
@@ -962,7 +963,7 @@ int pkey_findcard(const struct pkey_seckey *seckey,
962 } else 963 } else
963 rc = -ENODEV; 964 rc = -ENODEV;
964 965
965 kfree(device_matrix); 966 kfree(device_status);
966 return rc; 967 return rc;
967} 968}
968EXPORT_SYMBOL(pkey_findcard); 969EXPORT_SYMBOL(pkey_findcard);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index ce15f101ee28..5efd84862ccb 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -18,8 +18,6 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/miscdevice.h> 19#include <linux/miscdevice.h>
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/compat.h> 21#include <linux/compat.h>
24#include <linux/slab.h> 22#include <linux/slab.h>
25#include <linux/atomic.h> 23#include <linux/atomic.h>
@@ -607,19 +605,24 @@ out:
607 return rc; 605 return rc;
608} 606}
609 607
610void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 608static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
611{ 609{
612 struct zcrypt_card *zc; 610 struct zcrypt_card *zc;
613 struct zcrypt_queue *zq; 611 struct zcrypt_queue *zq;
614 struct zcrypt_device_status *stat; 612 struct zcrypt_device_status *stat;
613 int card, queue;
614
615 memset(devstatus, 0, MAX_ZDEV_ENTRIES
616 * sizeof(struct zcrypt_device_status));
615 617
616 memset(matrix, 0, sizeof(*matrix));
617 spin_lock(&zcrypt_list_lock); 618 spin_lock(&zcrypt_list_lock);
618 for_each_zcrypt_card(zc) { 619 for_each_zcrypt_card(zc) {
619 for_each_zcrypt_queue(zq, zc) { 620 for_each_zcrypt_queue(zq, zc) {
620 stat = matrix->device; 621 card = AP_QID_CARD(zq->queue->qid);
621 stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; 622 if (card >= MAX_ZDEV_CARDIDS)
622 stat += AP_QID_QUEUE(zq->queue->qid); 623 continue;
624 queue = AP_QID_QUEUE(zq->queue->qid);
625 stat = &devstatus[card * AP_DOMAINS + queue];
623 stat->hwtype = zc->card->ap_dev.device_type; 626 stat->hwtype = zc->card->ap_dev.device_type;
624 stat->functions = zc->card->functions >> 26; 627 stat->functions = zc->card->functions >> 26;
625 stat->qid = zq->queue->qid; 628 stat->qid = zq->queue->qid;
@@ -628,40 +631,70 @@ void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
628 } 631 }
629 spin_unlock(&zcrypt_list_lock); 632 spin_unlock(&zcrypt_list_lock);
630} 633}
631EXPORT_SYMBOL(zcrypt_device_status_mask);
632 634
633static void zcrypt_status_mask(char status[AP_DEVICES]) 635void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
634{ 636{
635 struct zcrypt_card *zc; 637 struct zcrypt_card *zc;
636 struct zcrypt_queue *zq; 638 struct zcrypt_queue *zq;
639 struct zcrypt_device_status_ext *stat;
640 int card, queue;
641
642 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
643 * sizeof(struct zcrypt_device_status_ext));
637 644
638 memset(status, 0, sizeof(char) * AP_DEVICES);
639 spin_lock(&zcrypt_list_lock); 645 spin_lock(&zcrypt_list_lock);
640 for_each_zcrypt_card(zc) { 646 for_each_zcrypt_card(zc) {
641 for_each_zcrypt_queue(zq, zc) { 647 for_each_zcrypt_queue(zq, zc) {
642 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 648 card = AP_QID_CARD(zq->queue->qid);
649 queue = AP_QID_QUEUE(zq->queue->qid);
650 stat = &devstatus[card * AP_DOMAINS + queue];
651 stat->hwtype = zc->card->ap_dev.device_type;
652 stat->functions = zc->card->functions >> 26;
653 stat->qid = zq->queue->qid;
654 stat->online = zq->online ? 0x01 : 0x00;
655 }
656 }
657 spin_unlock(&zcrypt_list_lock);
658}
659EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
660
661static void zcrypt_status_mask(char status[], size_t max_adapters)
662{
663 struct zcrypt_card *zc;
664 struct zcrypt_queue *zq;
665 int card;
666
667 memset(status, 0, max_adapters);
668 spin_lock(&zcrypt_list_lock);
669 for_each_zcrypt_card(zc) {
670 for_each_zcrypt_queue(zq, zc) {
671 card = AP_QID_CARD(zq->queue->qid);
672 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
673 || card >= max_adapters)
643 continue; 674 continue;
644 status[AP_QID_CARD(zq->queue->qid)] = 675 status[card] = zc->online ? zc->user_space_type : 0x0d;
645 zc->online ? zc->user_space_type : 0x0d;
646 } 676 }
647 } 677 }
648 spin_unlock(&zcrypt_list_lock); 678 spin_unlock(&zcrypt_list_lock);
649} 679}
650 680
651static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 681static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
652{ 682{
653 struct zcrypt_card *zc; 683 struct zcrypt_card *zc;
654 struct zcrypt_queue *zq; 684 struct zcrypt_queue *zq;
685 int card;
655 686
656 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 687 memset(qdepth, 0, max_adapters);
657 spin_lock(&zcrypt_list_lock); 688 spin_lock(&zcrypt_list_lock);
658 local_bh_disable(); 689 local_bh_disable();
659 for_each_zcrypt_card(zc) { 690 for_each_zcrypt_card(zc) {
660 for_each_zcrypt_queue(zq, zc) { 691 for_each_zcrypt_queue(zq, zc) {
661 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 692 card = AP_QID_CARD(zq->queue->qid);
693 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
694 || card >= max_adapters)
662 continue; 695 continue;
663 spin_lock(&zq->queue->lock); 696 spin_lock(&zq->queue->lock);
664 qdepth[AP_QID_CARD(zq->queue->qid)] = 697 qdepth[card] =
665 zq->queue->pendingq_count + 698 zq->queue->pendingq_count +
666 zq->queue->requestq_count; 699 zq->queue->requestq_count;
667 spin_unlock(&zq->queue->lock); 700 spin_unlock(&zq->queue->lock);
@@ -671,21 +704,23 @@ static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
671 spin_unlock(&zcrypt_list_lock); 704 spin_unlock(&zcrypt_list_lock);
672} 705}
673 706
674static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 707static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
675{ 708{
676 struct zcrypt_card *zc; 709 struct zcrypt_card *zc;
677 struct zcrypt_queue *zq; 710 struct zcrypt_queue *zq;
711 int card;
678 712
679 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 713 memset(reqcnt, 0, sizeof(int) * max_adapters);
680 spin_lock(&zcrypt_list_lock); 714 spin_lock(&zcrypt_list_lock);
681 local_bh_disable(); 715 local_bh_disable();
682 for_each_zcrypt_card(zc) { 716 for_each_zcrypt_card(zc) {
683 for_each_zcrypt_queue(zq, zc) { 717 for_each_zcrypt_queue(zq, zc) {
684 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 718 card = AP_QID_CARD(zq->queue->qid);
719 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
720 || card >= max_adapters)
685 continue; 721 continue;
686 spin_lock(&zq->queue->lock); 722 spin_lock(&zq->queue->lock);
687 reqcnt[AP_QID_CARD(zq->queue->qid)] = 723 reqcnt[card] = zq->queue->total_request_count;
688 zq->queue->total_request_count;
689 spin_unlock(&zq->queue->lock); 724 spin_unlock(&zq->queue->lock);
690 } 725 }
691 } 726 }
@@ -739,60 +774,10 @@ static int zcrypt_requestq_count(void)
739 return requestq_count; 774 return requestq_count;
740} 775}
741 776
742static int zcrypt_count_type(int type)
743{
744 struct zcrypt_card *zc;
745 struct zcrypt_queue *zq;
746 int device_count;
747
748 device_count = 0;
749 spin_lock(&zcrypt_list_lock);
750 for_each_zcrypt_card(zc) {
751 if (zc->card->id != type)
752 continue;
753 for_each_zcrypt_queue(zq, zc) {
754 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
755 continue;
756 device_count++;
757 }
758 }
759 spin_unlock(&zcrypt_list_lock);
760 return device_count;
761}
762
763/**
764 * zcrypt_ica_status(): Old, depracted combi status call.
765 *
766 * Old, deprecated combi status call.
767 */
768static long zcrypt_ica_status(struct file *filp, unsigned long arg)
769{
770 struct ica_z90_status *pstat;
771 int ret;
772
773 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
774 if (!pstat)
775 return -ENOMEM;
776 pstat->totalcount = zcrypt_device_count;
777 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
778 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
779 pstat->requestqWaitCount = zcrypt_requestq_count();
780 pstat->pendingqWaitCount = zcrypt_pendingq_count();
781 pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
782 pstat->cryptoDomain = ap_domain_index;
783 zcrypt_status_mask(pstat->status);
784 zcrypt_qdepth_mask(pstat->qdepth);
785 ret = 0;
786 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
787 ret = -EFAULT;
788 kfree(pstat);
789 return ret;
790}
791
792static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 777static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
793 unsigned long arg) 778 unsigned long arg)
794{ 779{
795 int rc; 780 int rc = 0;
796 781
797 switch (cmd) { 782 switch (cmd) {
798 case ICARSAMODEXPO: { 783 case ICARSAMODEXPO: {
@@ -871,48 +856,48 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
871 return -EFAULT; 856 return -EFAULT;
872 return rc; 857 return rc;
873 } 858 }
874 case ZDEVICESTATUS: { 859 case ZCRYPT_DEVICE_STATUS: {
875 struct zcrypt_device_matrix *device_status; 860 struct zcrypt_device_status_ext *device_status;
861 size_t total_size = MAX_ZDEV_ENTRIES_EXT
862 * sizeof(struct zcrypt_device_status_ext);
876 863
877 device_status = kzalloc(sizeof(struct zcrypt_device_matrix), 864 device_status = kzalloc(total_size, GFP_KERNEL);
878 GFP_KERNEL);
879 if (!device_status) 865 if (!device_status)
880 return -ENOMEM; 866 return -ENOMEM;
881 867 zcrypt_device_status_mask_ext(device_status);
882 zcrypt_device_status_mask(device_status);
883
884 if (copy_to_user((char __user *) arg, device_status, 868 if (copy_to_user((char __user *) arg, device_status,
885 sizeof(struct zcrypt_device_matrix))) { 869 total_size))
886 kfree(device_status); 870 rc = -EFAULT;
887 return -EFAULT;
888 }
889
890 kfree(device_status); 871 kfree(device_status);
891 return 0; 872 return rc;
892 } 873 }
893 case Z90STAT_STATUS_MASK: { 874 case ZCRYPT_STATUS_MASK: {
894 char status[AP_DEVICES]; 875 char status[AP_DEVICES];
895 zcrypt_status_mask(status); 876
896 if (copy_to_user((char __user *) arg, status, 877 zcrypt_status_mask(status, AP_DEVICES);
897 sizeof(char) * AP_DEVICES)) 878 if (copy_to_user((char __user *) arg, status, sizeof(status)))
898 return -EFAULT; 879 return -EFAULT;
899 return 0; 880 return 0;
900 } 881 }
901 case Z90STAT_QDEPTH_MASK: { 882 case ZCRYPT_QDEPTH_MASK: {
902 char qdepth[AP_DEVICES]; 883 char qdepth[AP_DEVICES];
903 zcrypt_qdepth_mask(qdepth); 884
904 if (copy_to_user((char __user *) arg, qdepth, 885 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
905 sizeof(char) * AP_DEVICES)) 886 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
906 return -EFAULT; 887 return -EFAULT;
907 return 0; 888 return 0;
908 } 889 }
909 case Z90STAT_PERDEV_REQCNT: { 890 case ZCRYPT_PERDEV_REQCNT: {
910 int reqcnt[AP_DEVICES]; 891 int *reqcnt;
911 zcrypt_perdev_reqcnt(reqcnt); 892
912 if (copy_to_user((int __user *) arg, reqcnt, 893 reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
913 sizeof(int) * AP_DEVICES)) 894 if (!reqcnt)
914 return -EFAULT; 895 return -ENOMEM;
915 return 0; 896 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
897 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
898 rc = -EFAULT;
899 kfree(reqcnt);
900 return rc;
916 } 901 }
917 case Z90STAT_REQUESTQ_COUNT: 902 case Z90STAT_REQUESTQ_COUNT:
918 return put_user(zcrypt_requestq_count(), (int __user *) arg); 903 return put_user(zcrypt_requestq_count(), (int __user *) arg);
@@ -924,38 +909,54 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
924 case Z90STAT_DOMAIN_INDEX: 909 case Z90STAT_DOMAIN_INDEX:
925 return put_user(ap_domain_index, (int __user *) arg); 910 return put_user(ap_domain_index, (int __user *) arg);
926 /* 911 /*
927 * Deprecated ioctls. Don't add another device count ioctl, 912 * Deprecated ioctls
928 * you can count them yourself in the user space with the
929 * output of the Z90STAT_STATUS_MASK ioctl.
930 */ 913 */
931 case ICAZ90STATUS: 914 case ZDEVICESTATUS: {
932 return zcrypt_ica_status(filp, arg); 915 /* the old ioctl supports only 64 adapters */
933 case Z90STAT_TOTALCOUNT: 916 struct zcrypt_device_status *device_status;
934 return put_user(zcrypt_device_count, (int __user *) arg); 917 size_t total_size = MAX_ZDEV_ENTRIES
935 case Z90STAT_PCICACOUNT: 918 * sizeof(struct zcrypt_device_status);
936 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 919
937 (int __user *) arg); 920 device_status = kzalloc(total_size, GFP_KERNEL);
938 case Z90STAT_PCICCCOUNT: 921 if (!device_status)
939 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 922 return -ENOMEM;
940 (int __user *) arg); 923 zcrypt_device_status_mask(device_status);
941 case Z90STAT_PCIXCCMCL2COUNT: 924 if (copy_to_user((char __user *) arg, device_status,
942 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 925 total_size))
943 (int __user *) arg); 926 rc = -EFAULT;
944 case Z90STAT_PCIXCCMCL3COUNT: 927 kfree(device_status);
945 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 928 return rc;
946 (int __user *) arg); 929 }
947 case Z90STAT_PCIXCCCOUNT: 930 case Z90STAT_STATUS_MASK: {
948 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 931 /* the old ioctl supports only 64 adapters */
949 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 932 char status[MAX_ZDEV_CARDIDS];
950 (int __user *) arg); 933
951 case Z90STAT_CEX2CCOUNT: 934 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
952 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 935 if (copy_to_user((char __user *) arg, status, sizeof(status)))
953 (int __user *) arg); 936 return -EFAULT;
954 case Z90STAT_CEX2ACOUNT: 937 return 0;
955 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 938 }
956 (int __user *) arg); 939 case Z90STAT_QDEPTH_MASK: {
940 /* the old ioctl supports only 64 adapters */
941 char qdepth[MAX_ZDEV_CARDIDS];
942
943 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
944 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
945 return -EFAULT;
946 return 0;
947 }
948 case Z90STAT_PERDEV_REQCNT: {
949 /* the old ioctl supports only 64 adapters */
950 int reqcnt[MAX_ZDEV_CARDIDS];
951
952 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
953 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
954 return -EFAULT;
955 return 0;
956 }
957 /* unknown ioctl number */
957 default: 958 default:
958 /* unknown ioctl number */ 959 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
959 return -ENOIOCTLCMD; 960 return -ENOIOCTLCMD;
960 } 961 }
961} 962}
@@ -1152,201 +1153,6 @@ static struct miscdevice zcrypt_misc_device = {
1152 .fops = &zcrypt_fops, 1153 .fops = &zcrypt_fops,
1153}; 1154};
1154 1155
1155/*
1156 * Deprecated /proc entry support.
1157 */
1158static struct proc_dir_entry *zcrypt_entry;
1159
1160static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
1161{
1162 int i;
1163
1164 for (i = 0; i < len; i++)
1165 seq_printf(m, "%01x", (unsigned int) addr[i]);
1166 seq_putc(m, ' ');
1167}
1168
1169static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
1170{
1171 int inl, c, cx;
1172
1173 seq_printf(m, " ");
1174 inl = 0;
1175 for (c = 0; c < (len / 16); c++) {
1176 sprintcl(m, addr+inl, 16);
1177 inl += 16;
1178 }
1179 cx = len%16;
1180 if (cx) {
1181 sprintcl(m, addr+inl, cx);
1182 inl += cx;
1183 }
1184 seq_putc(m, '\n');
1185}
1186
1187static void sprinthx(unsigned char *title, struct seq_file *m,
1188 unsigned char *addr, unsigned int len)
1189{
1190 int inl, r, rx;
1191
1192 seq_printf(m, "\n%s\n", title);
1193 inl = 0;
1194 for (r = 0; r < (len / 64); r++) {
1195 sprintrw(m, addr+inl, 64);
1196 inl += 64;
1197 }
1198 rx = len % 64;
1199 if (rx) {
1200 sprintrw(m, addr+inl, rx);
1201 inl += rx;
1202 }
1203 seq_putc(m, '\n');
1204}
1205
1206static void sprinthx4(unsigned char *title, struct seq_file *m,
1207 unsigned int *array, unsigned int len)
1208{
1209 seq_printf(m, "\n%s\n", title);
1210 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
1211 seq_putc(m, '\n');
1212}
1213
1214static int zcrypt_proc_show(struct seq_file *m, void *v)
1215{
1216 char workarea[sizeof(int) * AP_DEVICES];
1217
1218 seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
1219 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
1220 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
1221 seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
1222 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
1223 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
1224 seq_printf(m, "PCIXCC MCL2 count: %d\n",
1225 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
1226 seq_printf(m, "PCIXCC MCL3 count: %d\n",
1227 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1228 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1229 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1230 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1231 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1232 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1233 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1234 seq_printf(m, "Total open handles: %d\n\n",
1235 atomic_read(&zcrypt_open_count));
1236 zcrypt_status_mask(workarea);
1237 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1238 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1239 m, workarea, AP_DEVICES);
1240 zcrypt_qdepth_mask(workarea);
1241 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1242 zcrypt_perdev_reqcnt((int *) workarea);
1243 sprinthx4("Per-device successfully completed request counts",
1244 m, (unsigned int *) workarea, AP_DEVICES);
1245 return 0;
1246}
1247
1248static int zcrypt_proc_open(struct inode *inode, struct file *file)
1249{
1250 return single_open(file, zcrypt_proc_show, NULL);
1251}
1252
1253static void zcrypt_disable_card(int index)
1254{
1255 struct zcrypt_card *zc;
1256 struct zcrypt_queue *zq;
1257
1258 spin_lock(&zcrypt_list_lock);
1259 for_each_zcrypt_card(zc) {
1260 for_each_zcrypt_queue(zq, zc) {
1261 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1262 continue;
1263 zq->online = 0;
1264 ap_flush_queue(zq->queue);
1265 }
1266 }
1267 spin_unlock(&zcrypt_list_lock);
1268}
1269
1270static void zcrypt_enable_card(int index)
1271{
1272 struct zcrypt_card *zc;
1273 struct zcrypt_queue *zq;
1274
1275 spin_lock(&zcrypt_list_lock);
1276 for_each_zcrypt_card(zc) {
1277 for_each_zcrypt_queue(zq, zc) {
1278 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1279 continue;
1280 zq->online = 1;
1281 ap_flush_queue(zq->queue);
1282 }
1283 }
1284 spin_unlock(&zcrypt_list_lock);
1285}
1286
1287static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1288 size_t count, loff_t *pos)
1289{
1290 unsigned char *lbuf, *ptr;
1291 size_t local_count;
1292 int j;
1293
1294 if (count <= 0)
1295 return 0;
1296
1297#define LBUFSIZE 1200UL
1298 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1299 if (!lbuf)
1300 return 0;
1301
1302 local_count = min(LBUFSIZE - 1, count);
1303 if (copy_from_user(lbuf, buffer, local_count) != 0) {
1304 kfree(lbuf);
1305 return -EFAULT;
1306 }
1307 lbuf[local_count] = '\0';
1308
1309 ptr = strstr(lbuf, "Online devices");
1310 if (!ptr)
1311 goto out;
1312 ptr = strstr(ptr, "\n");
1313 if (!ptr)
1314 goto out;
1315 ptr++;
1316
1317 if (strstr(ptr, "Waiting work element counts") == NULL)
1318 goto out;
1319
1320 for (j = 0; j < 64 && *ptr; ptr++) {
1321 /*
1322 * '0' for no device, '1' for PCICA, '2' for PCICC,
1323 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1324 * '5' for CEX2C and '6' for CEX2A'
1325 * '7' for CEX3C and '8' for CEX3A
1326 */
1327 if (*ptr >= '0' && *ptr <= '8')
1328 j++;
1329 else if (*ptr == 'd' || *ptr == 'D')
1330 zcrypt_disable_card(j++);
1331 else if (*ptr == 'e' || *ptr == 'E')
1332 zcrypt_enable_card(j++);
1333 else if (*ptr != ' ' && *ptr != '\t')
1334 break;
1335 }
1336out:
1337 kfree(lbuf);
1338 return count;
1339}
1340
1341static const struct file_operations zcrypt_proc_fops = {
1342 .owner = THIS_MODULE,
1343 .open = zcrypt_proc_open,
1344 .read = seq_read,
1345 .llseek = seq_lseek,
1346 .release = single_release,
1347 .write = zcrypt_proc_write,
1348};
1349
1350static int zcrypt_rng_device_count; 1156static int zcrypt_rng_device_count;
1351static u32 *zcrypt_rng_buffer; 1157static u32 *zcrypt_rng_buffer;
1352static int zcrypt_rng_buffer_index; 1158static int zcrypt_rng_buffer_index;
@@ -1448,27 +1254,15 @@ int __init zcrypt_api_init(void)
1448 if (rc) 1254 if (rc)
1449 goto out; 1255 goto out;
1450 1256
1451 atomic_set(&zcrypt_rescan_req, 0);
1452
1453 /* Register the request sprayer. */ 1257 /* Register the request sprayer. */
1454 rc = misc_register(&zcrypt_misc_device); 1258 rc = misc_register(&zcrypt_misc_device);
1455 if (rc < 0) 1259 if (rc < 0)
1456 goto out; 1260 goto out;
1457 1261
1458 /* Set up the proc file system */
1459 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
1460 &zcrypt_proc_fops);
1461 if (!zcrypt_entry) {
1462 rc = -ENOMEM;
1463 goto out_misc;
1464 }
1465
1466 zcrypt_msgtype6_init(); 1262 zcrypt_msgtype6_init();
1467 zcrypt_msgtype50_init(); 1263 zcrypt_msgtype50_init();
1468 return 0; 1264 return 0;
1469 1265
1470out_misc:
1471 misc_deregister(&zcrypt_misc_device);
1472out: 1266out:
1473 return rc; 1267 return rc;
1474} 1268}
@@ -1480,7 +1274,6 @@ out:
1480 */ 1274 */
1481void __exit zcrypt_api_exit(void) 1275void __exit zcrypt_api_exit(void)
1482{ 1276{
1483 remove_proc_entry("driver/z90crypt", NULL);
1484 misc_deregister(&zcrypt_misc_device); 1277 misc_deregister(&zcrypt_misc_device);
1485 zcrypt_msgtype6_exit(); 1278 zcrypt_msgtype6_exit();
1486 zcrypt_msgtype50_exit(); 1279 zcrypt_msgtype50_exit();
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 9fff8912f6e3..f149a8fee60d 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -21,30 +21,6 @@
21#include <asm/zcrypt.h> 21#include <asm/zcrypt.h>
22#include "ap_bus.h" 22#include "ap_bus.h"
23 23
24/* deprecated status calls */
25#define ICAZ90STATUS _IOR(ZCRYPT_IOCTL_MAGIC, 0x10, struct ica_z90_status)
26#define Z90STAT_PCIXCCCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x43, int)
27
28/**
29 * This structure is deprecated and the corresponding ioctl() has been
30 * replaced with individual ioctl()s for each piece of data!
31 */
32struct ica_z90_status {
33 int totalcount;
34 int leedslitecount; // PCICA
35 int leeds2count; // PCICC
36 // int PCIXCCCount; is not in struct for backward compatibility
37 int requestqWaitCount;
38 int pendingqWaitCount;
39 int totalOpenCount;
40 int cryptoDomain;
41 // status: 0=not there, 1=PCICA, 2=PCICC, 3=PCIXCC_MCL2, 4=PCIXCC_MCL3,
42 // 5=CEX2C
43 unsigned char status[64];
44 // qdepth: # work elements waiting for each device
45 unsigned char qdepth[64];
46};
47
48/** 24/**
49 * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2, 25 * device type for an actual device is either PCICA, PCICC, PCIXCC_MCL2,
50 * PCIXCC_MCL3, CEX2C, or CEX2A 26 * PCIXCC_MCL3, CEX2C, or CEX2A
@@ -179,6 +155,6 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
179int zcrypt_api_init(void); 155int zcrypt_api_init(void);
180void zcrypt_api_exit(void); 156void zcrypt_api_exit(void);
181long zcrypt_send_cprb(struct ica_xcRB *xcRB); 157long zcrypt_send_cprb(struct ica_xcRB *xcRB);
182void zcrypt_device_status_mask(struct zcrypt_device_matrix *devstatus); 158void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
183 159
184#endif /* _ZCRYPT_API_H_ */ 160#endif /* _ZCRYPT_API_H_ */