aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-28 11:35:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-28 11:35:32 -0400
commitbfed6d0ffc8dba002312c2641c00ecd3bf9f9cbf (patch)
treed61f5fcd8d11f03c89ed3be58eaf13f0eb06c75b
parent97c41a6bdce506bad1cce623378656a5cb956a18 (diff)
parentb6ffdf27f3d4f1e9af56effe6f86989170d71e95 (diff)
Merge tag 's390-5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 fixes from Martin Schwidefsky: "Improvements and bug fixes for 5.1-rc2: - Fix early free of the channel program in vfio - On AP device removal make sure that all messages are flushed with the driver still attached that queued the message - Limit brk randomization to 32MB to reduce the chance that the heap of ld.so is placed after the main stack - Add a rolling average for the steal time of a CPU, this will be needed for KVM to decide when to do busy waiting - Fix a warning in the CPU-MF code - Add a notification handler for AP configuration change to react faster to new AP devices" * tag 's390-5.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/cpumf: Fix warning from check_processor_id zcrypt: handle AP Info notification from CHSC SEI command vfio: ccw: only free cp on final interrupt s390/vtime: steal time exponential moving average s390/zcrypt: revisit ap device remove procedure s390: limit brk randomization to 32MB
-rw-r--r--arch/s390/include/asm/ap.h11
-rw-r--r--arch/s390/include/asm/elf.h11
-rw-r--r--arch/s390/include/asm/lowcore.h61
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c19
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/s390/kernel/vtime.c19
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
12 files changed, 154 insertions, 68 deletions
diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
index 1a6a7092d942..e94a0a28b5eb 100644
--- a/arch/s390/include/asm/ap.h
+++ b/arch/s390/include/asm/ap.h
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
360 return reg1; 360 return reg1;
361} 361}
362 362
363/*
364 * Interface to tell the AP bus code that a configuration
365 * change has happened. The bus code should at least do
366 * an ap bus resource rescan.
367 */
368#if IS_ENABLED(CONFIG_ZCRYPT)
369void ap_bus_cfg_chg(void);
370#else
371static inline void ap_bus_cfg_chg(void){};
372#endif
373
363#endif /* _ASM_S390_AP_H_ */ 374#endif /* _ASM_S390_AP_H_ */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 7d22a474a040..f74639a05f0f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -252,11 +252,14 @@ do { \
252 252
253/* 253/*
254 * Cache aliasing on the latest machines calls for a mapping granularity 254 * Cache aliasing on the latest machines calls for a mapping granularity
255 * of 512KB. For 64-bit processes use a 512KB alignment and a randomization 255 * of 512KB for the anonymous mapping base. For 64-bit processes use a
256 * of up to 1GB. For 31-bit processes the virtual address space is limited, 256 * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
257 * use no alignment and limit the randomization to 8MB. 257 * the virtual address space is limited, use no alignment and limit the
258 * randomization to 8MB.
259 * For the additional randomization of the program break use 32MB for
260 * 64-bit and 8MB for 31-bit.
258 */ 261 */
259#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ffffUL) 262#define BRK_RND_MASK (is_compat_task() ? 0x7ffUL : 0x1fffUL)
260#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL) 263#define MMAP_RND_MASK (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
261#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL) 264#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
262#define STACK_RND_MASK MMAP_RND_MASK 265#define STACK_RND_MASK MMAP_RND_MASK
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index cc0947e08b6f..5b9f10b1e55d 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -91,52 +91,53 @@ struct lowcore {
91 __u64 hardirq_timer; /* 0x02e8 */ 91 __u64 hardirq_timer; /* 0x02e8 */
92 __u64 softirq_timer; /* 0x02f0 */ 92 __u64 softirq_timer; /* 0x02f0 */
93 __u64 steal_timer; /* 0x02f8 */ 93 __u64 steal_timer; /* 0x02f8 */
94 __u64 last_update_timer; /* 0x0300 */ 94 __u64 avg_steal_timer; /* 0x0300 */
95 __u64 last_update_clock; /* 0x0308 */ 95 __u64 last_update_timer; /* 0x0308 */
96 __u64 int_clock; /* 0x0310 */ 96 __u64 last_update_clock; /* 0x0310 */
97 __u64 mcck_clock; /* 0x0318 */ 97 __u64 int_clock; /* 0x0318*/
98 __u64 clock_comparator; /* 0x0320 */ 98 __u64 mcck_clock; /* 0x0320 */
99 __u64 boot_clock[2]; /* 0x0328 */ 99 __u64 clock_comparator; /* 0x0328 */
100 __u64 boot_clock[2]; /* 0x0330 */
100 101
101 /* Current process. */ 102 /* Current process. */
102 __u64 current_task; /* 0x0338 */ 103 __u64 current_task; /* 0x0340 */
103 __u64 kernel_stack; /* 0x0340 */ 104 __u64 kernel_stack; /* 0x0348 */
104 105
105 /* Interrupt, DAT-off and restartstack. */ 106 /* Interrupt, DAT-off and restartstack. */
106 __u64 async_stack; /* 0x0348 */ 107 __u64 async_stack; /* 0x0350 */
107 __u64 nodat_stack; /* 0x0350 */ 108 __u64 nodat_stack; /* 0x0358 */
108 __u64 restart_stack; /* 0x0358 */ 109 __u64 restart_stack; /* 0x0360 */
109 110
110 /* Restart function and parameter. */ 111 /* Restart function and parameter. */
111 __u64 restart_fn; /* 0x0360 */ 112 __u64 restart_fn; /* 0x0368 */
112 __u64 restart_data; /* 0x0368 */ 113 __u64 restart_data; /* 0x0370 */
113 __u64 restart_source; /* 0x0370 */ 114 __u64 restart_source; /* 0x0378 */
114 115
115 /* Address space pointer. */ 116 /* Address space pointer. */
116 __u64 kernel_asce; /* 0x0378 */ 117 __u64 kernel_asce; /* 0x0380 */
117 __u64 user_asce; /* 0x0380 */ 118 __u64 user_asce; /* 0x0388 */
118 __u64 vdso_asce; /* 0x0388 */ 119 __u64 vdso_asce; /* 0x0390 */
119 120
120 /* 121 /*
121 * The lpp and current_pid fields form a 122 * The lpp and current_pid fields form a
122 * 64-bit value that is set as program 123 * 64-bit value that is set as program
123 * parameter with the LPP instruction. 124 * parameter with the LPP instruction.
124 */ 125 */
125 __u32 lpp; /* 0x0390 */ 126 __u32 lpp; /* 0x0398 */
126 __u32 current_pid; /* 0x0394 */ 127 __u32 current_pid; /* 0x039c */
127 128
128 /* SMP info area */ 129 /* SMP info area */
129 __u32 cpu_nr; /* 0x0398 */ 130 __u32 cpu_nr; /* 0x03a0 */
130 __u32 softirq_pending; /* 0x039c */ 131 __u32 softirq_pending; /* 0x03a4 */
131 __u32 preempt_count; /* 0x03a0 */ 132 __u32 preempt_count; /* 0x03a8 */
132 __u32 spinlock_lockval; /* 0x03a4 */ 133 __u32 spinlock_lockval; /* 0x03ac */
133 __u32 spinlock_index; /* 0x03a8 */ 134 __u32 spinlock_index; /* 0x03b0 */
134 __u32 fpu_flags; /* 0x03ac */ 135 __u32 fpu_flags; /* 0x03b4 */
135 __u64 percpu_offset; /* 0x03b0 */ 136 __u64 percpu_offset; /* 0x03b8 */
136 __u64 vdso_per_cpu_data; /* 0x03b8 */ 137 __u64 vdso_per_cpu_data; /* 0x03c0 */
137 __u64 machine_flags; /* 0x03c0 */ 138 __u64 machine_flags; /* 0x03c8 */
138 __u64 gmap; /* 0x03c8 */ 139 __u64 gmap; /* 0x03d0 */
139 __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */ 140 __u8 pad_0x03d8[0x0400-0x03d8]; /* 0x03d8 */
140 141
141 /* br %r1 trampoline */ 142 /* br %r1 trampoline */
142 __u16 br_r1_trampoline; /* 0x0400 */ 143 __u16 br_r1_trampoline; /* 0x0400 */
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index c6fad208c2fa..b6854812d2ed 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
196 */ 196 */
197static int __hw_perf_event_init(struct perf_event *event) 197static int __hw_perf_event_init(struct perf_event *event)
198{ 198{
199 struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
200 struct perf_event_attr *attr = &event->attr; 199 struct perf_event_attr *attr = &event->attr;
200 struct cpu_cf_events *cpuhw;
201 enum cpumf_ctr_set i; 201 enum cpumf_ctr_set i;
202 int err = 0; 202 int err = 0;
203 203
204 debug_sprintf_event(cf_diag_dbg, 5, 204 debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
205 "%s event %p cpu %d authorized %#x\n", __func__, 205 event, event->cpu);
206 event, event->cpu, cpuhw->info.auth_ctl);
207 206
208 event->hw.config = attr->config; 207 event->hw.config = attr->config;
209 event->hw.config_base = 0; 208 event->hw.config_base = 0;
210 local64_set(&event->count, 0);
211 209
212 /* Add all authorized counter sets to config_base */ 210 /* Add all authorized counter sets to config_base. The
211 * the hardware init function is either called per-cpu or just once
212 * for all CPUS (event->cpu == -1). This depends on the whether
213 * counting is started for all CPUs or on a per workload base where
214 * the perf event moves from one CPU to another CPU.
215 * Checking the authorization on any CPU is fine as the hardware
216 * applies the same authorization settings to all CPUs.
217 */
218 cpuhw = &get_cpu_var(cpu_cf_events);
213 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) 219 for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
214 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i]) 220 if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
215 event->hw.config_base |= cpumf_ctr_ctl[i]; 221 event->hw.config_base |= cpumf_ctr_ctl[i];
222 put_cpu_var(cpu_cf_events);
216 223
217 /* No authorized counter sets, nothing to count/sample */ 224 /* No authorized counter sets, nothing to count/sample */
218 if (!event->hw.config_base) { 225 if (!event->hw.config_base) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 3fe1c77c361b..bd197baf1dc3 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
266 lc->percpu_offset = __per_cpu_offset[cpu]; 266 lc->percpu_offset = __per_cpu_offset[cpu];
267 lc->kernel_asce = S390_lowcore.kernel_asce; 267 lc->kernel_asce = S390_lowcore.kernel_asce;
268 lc->machine_flags = S390_lowcore.machine_flags; 268 lc->machine_flags = S390_lowcore.machine_flags;
269 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 269 lc->user_timer = lc->system_timer =
270 lc->steal_timer = lc->avg_steal_timer = 0;
270 __ctl_store(lc->cregs_save_area, 0, 15); 271 __ctl_store(lc->cregs_save_area, 0, 15);
271 save_access_regs((unsigned int *) lc->access_regs_save_area); 272 save_access_regs((unsigned int *) lc->access_regs_save_area);
272 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 273 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 98f850e00008..a69a0911ed0e 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
124 */ 124 */
125static int do_account_vtime(struct task_struct *tsk) 125static int do_account_vtime(struct task_struct *tsk)
126{ 126{
127 u64 timer, clock, user, guest, system, hardirq, softirq, steal; 127 u64 timer, clock, user, guest, system, hardirq, softirq;
128 128
129 timer = S390_lowcore.last_update_timer; 129 timer = S390_lowcore.last_update_timer;
130 clock = S390_lowcore.last_update_clock; 130 clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
182 if (softirq) 182 if (softirq)
183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ); 183 account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
184 184
185 steal = S390_lowcore.steal_timer;
186 if ((s64) steal > 0) {
187 S390_lowcore.steal_timer = 0;
188 account_steal_time(cputime_to_nsecs(steal));
189 }
190
191 return virt_timer_forward(user + guest + system + hardirq + softirq); 185 return virt_timer_forward(user + guest + system + hardirq + softirq);
192} 186}
193 187
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
213 */ 207 */
214void vtime_flush(struct task_struct *tsk) 208void vtime_flush(struct task_struct *tsk)
215{ 209{
210 u64 steal, avg_steal;
211
216 if (do_account_vtime(tsk)) 212 if (do_account_vtime(tsk))
217 virt_timer_expire(); 213 virt_timer_expire();
214
215 steal = S390_lowcore.steal_timer;
216 avg_steal = S390_lowcore.avg_steal_timer / 2;
217 if ((s64) steal > 0) {
218 S390_lowcore.steal_timer = 0;
219 account_steal_time(steal);
220 avg_steal += steal;
221 }
222 S390_lowcore.avg_steal_timer = avg_steal;
218} 223}
219 224
220/* 225/*
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4159c63a5fd2..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
24#include <asm/crw.h> 24#include <asm/crw.h>
25#include <asm/isc.h> 25#include <asm/isc.h>
26#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
27#include <asm/ap.h>
27 28
28#include "css.h" 29#include "css.h"
29#include "cio.h" 30#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
586 " failed (rc=%d).\n", ret); 587 " failed (rc=%d).\n", ret);
587} 588}
588 589
590static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
591{
592 CIO_CRW_EVENT(3, "chsc: ap config changed\n");
593 if (sei_area->rs != 5)
594 return;
595
596 ap_bus_cfg_chg();
597}
598
589static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 599static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
590{ 600{
591 switch (sei_area->cc) { 601 switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
612 case 2: /* i/o resource accessibility */ 622 case 2: /* i/o resource accessibility */
613 chsc_process_sei_res_acc(sei_area); 623 chsc_process_sei_res_acc(sei_area);
614 break; 624 break;
625 case 3: /* ap config changed */
626 chsc_process_sei_ap_cfg_chg(sei_area);
627 break;
615 case 7: /* channel-path-availability information */ 628 case 7: /* channel-path-availability information */
616 chsc_process_sei_chp_avail(sei_area); 629 chsc_process_sei_chp_avail(sei_area);
617 break; 630 break;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
72{ 72{
73 struct vfio_ccw_private *private; 73 struct vfio_ccw_private *private;
74 struct irb *irb; 74 struct irb *irb;
75 bool is_final;
75 76
76 private = container_of(work, struct vfio_ccw_private, io_work); 77 private = container_of(work, struct vfio_ccw_private, io_work);
77 irb = &private->irb; 78 irb = &private->irb;
78 79
80 is_final = !(scsw_actl(&irb->scsw) &
81 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
79 if (scsw_is_solicited(&irb->scsw)) { 82 if (scsw_is_solicited(&irb->scsw)) {
80 cp_update_scsw(&private->cp, &irb->scsw); 83 cp_update_scsw(&private->cp, &irb->scsw);
81 cp_free(&private->cp); 84 if (is_final)
85 cp_free(&private->cp);
82 } 86 }
83 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 87 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
84 88
85 if (private->io_trigger) 89 if (private->io_trigger)
86 eventfd_signal(private->io_trigger, 1); 90 eventfd_signal(private->io_trigger, 1);
87 91
88 if (private->mdev) 92 if (private->mdev && is_final)
89 private->state = VFIO_CCW_STATE_IDLE; 93 private->state = VFIO_CCW_STATE_IDLE;
90} 94}
91 95
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
810 struct ap_device *ap_dev = to_ap_dev(dev); 810 struct ap_device *ap_dev = to_ap_dev(dev);
811 struct ap_driver *ap_drv = ap_dev->drv; 811 struct ap_driver *ap_drv = ap_dev->drv;
812 812
813 /* prepare ap queue device removal */
813 if (is_queue_dev(dev)) 814 if (is_queue_dev(dev))
814 ap_queue_remove(to_ap_queue(dev)); 815 ap_queue_prepare_remove(to_ap_queue(dev));
816
817 /* driver's chance to clean up gracefully */
815 if (ap_drv->remove) 818 if (ap_drv->remove)
816 ap_drv->remove(ap_dev); 819 ap_drv->remove(ap_dev);
817 820
821 /* now do the ap queue device remove */
822 if (is_queue_dev(dev))
823 ap_queue_remove(to_ap_queue(dev));
824
818 /* Remove queue/card from list of active queues/cards */ 825 /* Remove queue/card from list of active queues/cards */
819 spin_lock_bh(&ap_list_lock); 826 spin_lock_bh(&ap_list_lock);
820 if (is_card_dev(dev)) 827 if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
861EXPORT_SYMBOL(ap_bus_force_rescan); 868EXPORT_SYMBOL(ap_bus_force_rescan);
862 869
863/* 870/*
871* A config change has happened, force an ap bus rescan.
872*/
873void ap_bus_cfg_chg(void)
874{
875 AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
876
877 ap_bus_force_rescan();
878}
879
880/*
864 * hex2bitmap() - parse hex mask string and set bitmap. 881 * hex2bitmap() - parse hex mask string and set bitmap.
865 * Valid strings are "0x012345678" with at least one valid hex number. 882 * Valid strings are "0x012345678" with at least one valid hex number.
866 * Rest of the bitmap to the right is padded with 0. No spaces allowed 883 * Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
91 AP_STATE_WORKING, 91 AP_STATE_WORKING,
92 AP_STATE_QUEUE_FULL, 92 AP_STATE_QUEUE_FULL,
93 AP_STATE_SUSPEND_WAIT, 93 AP_STATE_SUSPEND_WAIT,
94 AP_STATE_REMOVE, /* about to be removed from driver */
94 AP_STATE_UNBOUND, /* momentary not bound to a driver */ 95 AP_STATE_UNBOUND, /* momentary not bound to a driver */
95 AP_STATE_BORKED, /* broken */ 96 AP_STATE_BORKED, /* broken */
96 NR_AP_STATES 97 NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
252 253
253void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 254void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
254struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 255struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
256void ap_queue_prepare_remove(struct ap_queue *aq);
255void ap_queue_remove(struct ap_queue *aq); 257void ap_queue_remove(struct ap_queue *aq);
256void ap_queue_suspend(struct ap_device *ap_dev); 258void ap_queue_suspend(struct ap_device *ap_dev);
257void ap_queue_resume(struct ap_device *ap_dev); 259void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..6a340f2c3556 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
420 [AP_EVENT_POLL] = ap_sm_suspend_read, 420 [AP_EVENT_POLL] = ap_sm_suspend_read,
421 [AP_EVENT_TIMEOUT] = ap_sm_nop, 421 [AP_EVENT_TIMEOUT] = ap_sm_nop,
422 }, 422 },
423 [AP_STATE_REMOVE] = {
424 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop,
426 },
423 [AP_STATE_UNBOUND] = { 427 [AP_STATE_UNBOUND] = {
424 [AP_EVENT_POLL] = ap_sm_nop, 428 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop, 429 [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
740} 744}
741EXPORT_SYMBOL(ap_flush_queue); 745EXPORT_SYMBOL(ap_flush_queue);
742 746
743void ap_queue_remove(struct ap_queue *aq) 747void ap_queue_prepare_remove(struct ap_queue *aq)
744{ 748{
745 ap_flush_queue(aq); 749 spin_lock_bh(&aq->lock);
750 /* flush queue */
751 __ap_flush_queue(aq);
752 /* set REMOVE state to prevent new messages are queued in */
753 aq->state = AP_STATE_REMOVE;
746 del_timer_sync(&aq->timeout); 754 del_timer_sync(&aq->timeout);
755 spin_unlock_bh(&aq->lock);
756}
747 757
748 /* reset with zero, also clears irq registration */ 758void ap_queue_remove(struct ap_queue *aq)
759{
760 /*
761 * all messages have been flushed and the state is
762 * AP_STATE_REMOVE. Now reset with zero which also
763 * clears the irq registration and move the state
764 * to AP_STATE_UNBOUND to signal that this queue
765 * is not used by any driver currently.
766 */
749 spin_lock_bh(&aq->lock); 767 spin_lock_bh(&aq->lock);
750 ap_zapq(aq->qid); 768 ap_zapq(aq->qid);
751 aq->state = AP_STATE_UNBOUND; 769 aq->state = AP_STATE_UNBOUND;
752 spin_unlock_bh(&aq->lock); 770 spin_unlock_bh(&aq->lock);
753} 771}
754EXPORT_SYMBOL(ap_queue_remove);
755 772
756void ap_queue_reinit_state(struct ap_queue *aq) 773void ap_queue_reinit_state(struct ap_queue *aq)
757{ 774{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
760 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 777 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
761 spin_unlock_bh(&aq->lock); 778 spin_unlock_bh(&aq->lock);
762} 779}
763EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
586 586
587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
588 struct zcrypt_queue *zq, 588 struct zcrypt_queue *zq,
589 struct module **pmod,
589 unsigned int weight) 590 unsigned int weight)
590{ 591{
591 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 592 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
595 atomic_add(weight, &zc->load); 596 atomic_add(weight, &zc->load);
596 atomic_add(weight, &zq->load); 597 atomic_add(weight, &zq->load);
597 zq->request_count++; 598 zq->request_count++;
599 *pmod = zq->queue->ap_dev.drv->driver.owner;
598 return zq; 600 return zq;
599} 601}
600 602
601static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 603static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
602 struct zcrypt_queue *zq, 604 struct zcrypt_queue *zq,
605 struct module *mod,
603 unsigned int weight) 606 unsigned int weight)
604{ 607{
605 struct module *mod = zq->queue->ap_dev.drv->driver.owner;
606
607 zq->request_count--; 608 zq->request_count--;
608 atomic_sub(weight, &zc->load); 609 atomic_sub(weight, &zc->load);
609 atomic_sub(weight, &zq->load); 610 atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
653 unsigned int weight, pref_weight; 654 unsigned int weight, pref_weight;
654 unsigned int func_code; 655 unsigned int func_code;
655 int qid = 0, rc = -ENODEV; 656 int qid = 0, rc = -ENODEV;
657 struct module *mod;
656 658
657 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 659 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
658 660
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
706 pref_weight = weight; 708 pref_weight = weight;
707 } 709 }
708 } 710 }
709 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 711 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
710 spin_unlock(&zcrypt_list_lock); 712 spin_unlock(&zcrypt_list_lock);
711 713
712 if (!pref_zq) { 714 if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
718 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 720 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
719 721
720 spin_lock(&zcrypt_list_lock); 722 spin_lock(&zcrypt_list_lock);
721 zcrypt_drop_queue(pref_zc, pref_zq, weight); 723 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
722 spin_unlock(&zcrypt_list_lock); 724 spin_unlock(&zcrypt_list_lock);
723 725
724out: 726out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
735 unsigned int weight, pref_weight; 737 unsigned int weight, pref_weight;
736 unsigned int func_code; 738 unsigned int func_code;
737 int qid = 0, rc = -ENODEV; 739 int qid = 0, rc = -ENODEV;
740 struct module *mod;
738 741
739 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 742 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
740 743
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
788 pref_weight = weight; 791 pref_weight = weight;
789 } 792 }
790 } 793 }
791 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 794 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
792 spin_unlock(&zcrypt_list_lock); 795 spin_unlock(&zcrypt_list_lock);
793 796
794 if (!pref_zq) { 797 if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
800 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 803 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
801 804
802 spin_lock(&zcrypt_list_lock); 805 spin_lock(&zcrypt_list_lock);
803 zcrypt_drop_queue(pref_zc, pref_zq, weight); 806 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
804 spin_unlock(&zcrypt_list_lock); 807 spin_unlock(&zcrypt_list_lock);
805 808
806out: 809out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
819 unsigned int func_code; 822 unsigned int func_code;
820 unsigned short *domain; 823 unsigned short *domain;
821 int qid = 0, rc = -ENODEV; 824 int qid = 0, rc = -ENODEV;
825 struct module *mod;
822 826
823 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 827 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
824 828
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
865 pref_weight = weight; 869 pref_weight = weight;
866 } 870 }
867 } 871 }
868 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 872 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
869 spin_unlock(&zcrypt_list_lock); 873 spin_unlock(&zcrypt_list_lock);
870 874
871 if (!pref_zq) { 875 if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
881 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 885 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
882 886
883 spin_lock(&zcrypt_list_lock); 887 spin_lock(&zcrypt_list_lock);
884 zcrypt_drop_queue(pref_zc, pref_zq, weight); 888 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
885 spin_unlock(&zcrypt_list_lock); 889 spin_unlock(&zcrypt_list_lock);
886 890
887out: 891out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
932 unsigned int func_code; 936 unsigned int func_code;
933 struct ap_message ap_msg; 937 struct ap_message ap_msg;
934 int qid = 0, rc = -ENODEV; 938 int qid = 0, rc = -ENODEV;
939 struct module *mod;
935 940
936 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 941 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
937 942
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1000 pref_weight = weight; 1005 pref_weight = weight;
1001 } 1006 }
1002 } 1007 }
1003 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1008 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1004 spin_unlock(&zcrypt_list_lock); 1009 spin_unlock(&zcrypt_list_lock);
1005 1010
1006 if (!pref_zq) { 1011 if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1012 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 1017 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
1013 1018
1014 spin_lock(&zcrypt_list_lock); 1019 spin_lock(&zcrypt_list_lock);
1015 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1020 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1016 spin_unlock(&zcrypt_list_lock); 1021 spin_unlock(&zcrypt_list_lock);
1017 1022
1018out_free: 1023out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
1033 struct ap_message ap_msg; 1038 struct ap_message ap_msg;
1034 unsigned int domain; 1039 unsigned int domain;
1035 int qid = 0, rc = -ENODEV; 1040 int qid = 0, rc = -ENODEV;
1041 struct module *mod;
1036 1042
1037 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1043 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1038 1044
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
1064 pref_weight = weight; 1070 pref_weight = weight;
1065 } 1071 }
1066 } 1072 }
1067 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1073 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1068 spin_unlock(&zcrypt_list_lock); 1074 spin_unlock(&zcrypt_list_lock);
1069 1075
1070 if (!pref_zq) { 1076 if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
1076 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1082 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1077 1083
1078 spin_lock(&zcrypt_list_lock); 1084 spin_lock(&zcrypt_list_lock);
1079 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1085 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1080 spin_unlock(&zcrypt_list_lock); 1086 spin_unlock(&zcrypt_list_lock);
1081 1087
1082out: 1088out: