aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2014-01-23 06:26:52 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2014-04-22 07:24:51 -0400
commit27291e2165b6de70c476b7b675308113edd69a60 (patch)
tree1508e3bb47e7171c176d82ab4fa231947a267140 /arch/s390
parentaf1827e773c983f1d601d674447aea89efdb1acb (diff)
KVM: s390: hardware support for guest debugging
This patch adds support to debug the guest using the PER facility on s390. Single-stepping, hardware breakpoints and hardware watchpoints are supported. In order to use the PER facility of the guest without it noticing it, the control registers of the guest have to be patched and access to them has to be intercepted(stctl, stctg, lctl, lctlg). All PER program interrupts have to be intercepted and only the relevant PER interrupts for the guest have to be given back. Special care has to be taken about repeated exits on the same hardware breakpoint. The intervention of the host in the guests PER configuration is not fully transparent. PER instruction nullification can not be used by the guest and too many storage alteration events may be reported to the guest (if it is activated for special address ranges only) when the host concurrently debugging it. Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/kvm_host.h53
-rw-r--r--arch/s390/include/uapi/asm/kvm.h1
-rw-r--r--arch/s390/kvm/Makefile2
-rw-r--r--arch/s390/kvm/guestdbg.c479
-rw-r--r--arch/s390/kvm/intercept.c9
-rw-r--r--arch/s390/kvm/interrupt.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c53
-rw-r--r--arch/s390/kvm/kvm-s390.h10
8 files changed, 607 insertions, 8 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 5d9648925a8e..0d45f6fe734f 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -93,13 +93,18 @@ struct kvm_s390_sie_block {
93 __u8 reserved40[4]; /* 0x0040 */ 93 __u8 reserved40[4]; /* 0x0040 */
94#define LCTL_CR0 0x8000 94#define LCTL_CR0 0x8000
95#define LCTL_CR6 0x0200 95#define LCTL_CR6 0x0200
96#define LCTL_CR9 0x0040
97#define LCTL_CR10 0x0020
98#define LCTL_CR11 0x0010
96#define LCTL_CR14 0x0002 99#define LCTL_CR14 0x0002
97 __u16 lctl; /* 0x0044 */ 100 __u16 lctl; /* 0x0044 */
98 __s16 icpua; /* 0x0046 */ 101 __s16 icpua; /* 0x0046 */
99#define ICTL_LPSW 0x00400000 102#define ICTL_PINT 0x20000000
100#define ICTL_ISKE 0x00004000 103#define ICTL_LPSW 0x00400000
101#define ICTL_SSKE 0x00002000 104#define ICTL_STCTL 0x00040000
102#define ICTL_RRBE 0x00001000 105#define ICTL_ISKE 0x00004000
106#define ICTL_SSKE 0x00002000
107#define ICTL_RRBE 0x00001000
103 __u32 ictl; /* 0x0048 */ 108 __u32 ictl; /* 0x0048 */
104 __u32 eca; /* 0x004c */ 109 __u32 eca; /* 0x004c */
105#define ICPT_INST 0x04 110#define ICPT_INST 0x04
@@ -306,6 +311,45 @@ struct kvm_s390_float_interrupt {
306 unsigned int irq_count; 311 unsigned int irq_count;
307}; 312};
308 313
314struct kvm_hw_wp_info_arch {
315 unsigned long addr;
316 unsigned long phys_addr;
317 int len;
318 char *old_data;
319};
320
321struct kvm_hw_bp_info_arch {
322 unsigned long addr;
323 int len;
324};
325
326/*
327 * Only the upper 16 bits of kvm_guest_debug->control are arch specific.
328 * Further KVM_GUESTDBG flags which an be used from userspace can be found in
329 * arch/s390/include/uapi/asm/kvm.h
330 */
331#define KVM_GUESTDBG_EXIT_PENDING 0x10000000
332
333#define guestdbg_enabled(vcpu) \
334 (vcpu->guest_debug & KVM_GUESTDBG_ENABLE)
335#define guestdbg_sstep_enabled(vcpu) \
336 (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
337#define guestdbg_hw_bp_enabled(vcpu) \
338 (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
339#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
340 (vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
341
342struct kvm_guestdbg_info_arch {
343 unsigned long cr0;
344 unsigned long cr9;
345 unsigned long cr10;
346 unsigned long cr11;
347 struct kvm_hw_bp_info_arch *hw_bp_info;
348 struct kvm_hw_wp_info_arch *hw_wp_info;
349 int nr_hw_bp;
350 int nr_hw_wp;
351 unsigned long last_bp;
352};
309 353
310struct kvm_vcpu_arch { 354struct kvm_vcpu_arch {
311 struct kvm_s390_sie_block *sie_block; 355 struct kvm_s390_sie_block *sie_block;
@@ -321,6 +365,7 @@ struct kvm_vcpu_arch {
321 u64 stidp_data; 365 u64 stidp_data;
322 }; 366 };
323 struct gmap *gmap; 367 struct gmap *gmap;
368 struct kvm_guestdbg_info_arch guestdbg;
324#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL) 369#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
325 unsigned long pfault_token; 370 unsigned long pfault_token;
326 unsigned long pfault_select; 371 unsigned long pfault_select;
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index b8c0f07a0e08..0fc26430a1e5 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#define __KVM_S390 17#define __KVM_S390
18#define __KVM_HAVE_GUEST_DEBUG
18 19
19/* Device control API: s390-specific devices */ 20/* Device control API: s390-specific devices */
20#define KVM_DEV_FLIC_GET_ALL_IRQS 1 21#define KVM_DEV_FLIC_GET_ALL_IRQS 1
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 83a7a355befe..b3b553469650 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -12,6 +12,6 @@ common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqch
12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm 12ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
13 13
14kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o 14kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
15kvm-objs += diag.o gaccess.o 15kvm-objs += diag.o gaccess.o guestdbg.o
16 16
17obj-$(CONFIG_KVM) += kvm.o 17obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
new file mode 100644
index 000000000000..100e99d1030d
--- /dev/null
+++ b/arch/s390/kvm/guestdbg.c
@@ -0,0 +1,479 @@
1/*
2 * kvm guest debug support
3 *
4 * Copyright IBM Corp. 2014
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
11 */
12#include <linux/kvm_host.h>
13#include <linux/errno.h>
14#include "kvm-s390.h"
15#include "gaccess.h"
16
17/*
18 * Extends the address range given by *start and *stop to include the address
19 * range starting with estart and the length len. Takes care of overflowing
20 * intervals and tries to minimize the overall intervall size.
21 */
22static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len)
23{
24 u64 estop;
25
26 if (len > 0)
27 len--;
28 else
29 len = 0;
30
31 estop = estart + len;
32
33 /* 0-0 range represents "not set" */
34 if ((*start == 0) && (*stop == 0)) {
35 *start = estart;
36 *stop = estop;
37 } else if (*start <= *stop) {
38 /* increase the existing range */
39 if (estart < *start)
40 *start = estart;
41 if (estop > *stop)
42 *stop = estop;
43 } else {
44 /* "overflowing" interval, whereby *stop > *start */
45 if (estart <= *stop) {
46 if (estop > *stop)
47 *stop = estop;
48 } else if (estop > *start) {
49 if (estart < *start)
50 *start = estart;
51 }
52 /* minimize the range */
53 else if ((estop - *stop) < (*start - estart))
54 *stop = estop;
55 else
56 *start = estart;
57 }
58}
59
60#define MAX_INST_SIZE 6
61
62static void enable_all_hw_bp(struct kvm_vcpu *vcpu)
63{
64 unsigned long start, len;
65 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
66 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
67 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
68 int i;
69
70 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 ||
71 vcpu->arch.guestdbg.hw_bp_info == NULL)
72 return;
73
74 /*
75 * If the guest is not interrested in branching events, we can savely
76 * limit them to the PER address range.
77 */
78 if (!(*cr9 & PER_EVENT_BRANCH))
79 *cr9 |= PER_CONTROL_BRANCH_ADDRESS;
80 *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH;
81
82 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
83 start = vcpu->arch.guestdbg.hw_bp_info[i].addr;
84 len = vcpu->arch.guestdbg.hw_bp_info[i].len;
85
86 /*
87 * The instruction in front of the desired bp has to
88 * report instruction-fetching events
89 */
90 if (start < MAX_INST_SIZE) {
91 len += start;
92 start = 0;
93 } else {
94 start -= MAX_INST_SIZE;
95 len += MAX_INST_SIZE;
96 }
97
98 extend_address_range(cr10, cr11, start, len);
99 }
100}
101
102static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
103{
104 unsigned long start, len;
105 u64 *cr9 = &vcpu->arch.sie_block->gcr[9];
106 u64 *cr10 = &vcpu->arch.sie_block->gcr[10];
107 u64 *cr11 = &vcpu->arch.sie_block->gcr[11];
108 int i;
109
110 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 ||
111 vcpu->arch.guestdbg.hw_wp_info == NULL)
112 return;
113
114 /* if host uses storage alternation for special address
115 * spaces, enable all events and give all to the guest */
116 if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
117 *cr9 &= ~PER_CONTROL_ALTERATION;
118 *cr10 = 0;
119 *cr11 = PSW_ADDR_INSN;
120 } else {
121 *cr9 &= ~PER_CONTROL_ALTERATION;
122 *cr9 |= PER_EVENT_STORE;
123
124 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
125 start = vcpu->arch.guestdbg.hw_wp_info[i].addr;
126 len = vcpu->arch.guestdbg.hw_wp_info[i].len;
127
128 extend_address_range(cr10, cr11, start, len);
129 }
130 }
131}
132
133void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu)
134{
135 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0];
136 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9];
137 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10];
138 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11];
139}
140
141void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu)
142{
143 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0;
144 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9;
145 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10;
146 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11;
147}
148
149void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
150{
151 /*
152 * TODO: if guest psw has per enabled, otherwise 0s!
153 * This reduces the amount of reported events.
154 * Need to intercept all psw changes!
155 */
156
157 if (guestdbg_sstep_enabled(vcpu)) {
158 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
159 vcpu->arch.sie_block->gcr[10] = 0;
160 vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
161 }
162
163 if (guestdbg_hw_bp_enabled(vcpu)) {
164 enable_all_hw_bp(vcpu);
165 enable_all_hw_wp(vcpu);
166 }
167
168 /* TODO: Instruction-fetching-nullification not allowed for now */
169 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION)
170 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION;
171}
172
173#define MAX_WP_SIZE 100
174
175static int __import_wp_info(struct kvm_vcpu *vcpu,
176 struct kvm_hw_breakpoint *bp_data,
177 struct kvm_hw_wp_info_arch *wp_info)
178{
179 int ret = 0;
180 wp_info->len = bp_data->len;
181 wp_info->addr = bp_data->addr;
182 wp_info->phys_addr = bp_data->phys_addr;
183 wp_info->old_data = NULL;
184
185 if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE)
186 return -EINVAL;
187
188 wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL);
189 if (!wp_info->old_data)
190 return -ENOMEM;
191 /* try to backup the original value */
192 ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data,
193 wp_info->len);
194 if (ret) {
195 kfree(wp_info->old_data);
196 wp_info->old_data = NULL;
197 }
198
199 return ret;
200}
201
202#define MAX_BP_COUNT 50
203
204int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
205 struct kvm_guest_debug *dbg)
206{
207 int ret = 0, nr_wp = 0, nr_bp = 0, i, size;
208 struct kvm_hw_breakpoint *bp_data = NULL;
209 struct kvm_hw_wp_info_arch *wp_info = NULL;
210 struct kvm_hw_bp_info_arch *bp_info = NULL;
211
212 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp)
213 return 0;
214 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT)
215 return -EINVAL;
216
217 size = dbg->arch.nr_hw_bp * sizeof(struct kvm_hw_breakpoint);
218 bp_data = kmalloc(size, GFP_KERNEL);
219 if (!bp_data) {
220 ret = -ENOMEM;
221 goto error;
222 }
223
224 ret = copy_from_user(bp_data, dbg->arch.hw_bp, size);
225 if (ret)
226 goto error;
227
228 for (i = 0; i < dbg->arch.nr_hw_bp; i++) {
229 switch (bp_data[i].type) {
230 case KVM_HW_WP_WRITE:
231 nr_wp++;
232 break;
233 case KVM_HW_BP:
234 nr_bp++;
235 break;
236 default:
237 break;
238 }
239 }
240
241 size = nr_wp * sizeof(struct kvm_hw_wp_info_arch);
242 if (size > 0) {
243 wp_info = kmalloc(size, GFP_KERNEL);
244 if (!wp_info) {
245 ret = -ENOMEM;
246 goto error;
247 }
248 }
249 size = nr_bp * sizeof(struct kvm_hw_bp_info_arch);
250 if (size > 0) {
251 bp_info = kmalloc(size, GFP_KERNEL);
252 if (!bp_info) {
253 ret = -ENOMEM;
254 goto error;
255 }
256 }
257
258 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) {
259 switch (bp_data[i].type) {
260 case KVM_HW_WP_WRITE:
261 ret = __import_wp_info(vcpu, &bp_data[i],
262 &wp_info[nr_wp]);
263 if (ret)
264 goto error;
265 nr_wp++;
266 break;
267 case KVM_HW_BP:
268 bp_info[nr_bp].len = bp_data[i].len;
269 bp_info[nr_bp].addr = bp_data[i].addr;
270 nr_bp++;
271 break;
272 }
273 }
274
275 vcpu->arch.guestdbg.nr_hw_bp = nr_bp;
276 vcpu->arch.guestdbg.hw_bp_info = bp_info;
277 vcpu->arch.guestdbg.nr_hw_wp = nr_wp;
278 vcpu->arch.guestdbg.hw_wp_info = wp_info;
279 return 0;
280error:
281 kfree(bp_data);
282 kfree(wp_info);
283 kfree(bp_info);
284 return ret;
285}
286
287void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu)
288{
289 int i;
290 struct kvm_hw_wp_info_arch *hw_wp_info = NULL;
291
292 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
293 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
294 kfree(hw_wp_info->old_data);
295 hw_wp_info->old_data = NULL;
296 }
297 kfree(vcpu->arch.guestdbg.hw_wp_info);
298 vcpu->arch.guestdbg.hw_wp_info = NULL;
299
300 kfree(vcpu->arch.guestdbg.hw_bp_info);
301 vcpu->arch.guestdbg.hw_bp_info = NULL;
302
303 vcpu->arch.guestdbg.nr_hw_wp = 0;
304 vcpu->arch.guestdbg.nr_hw_bp = 0;
305}
306
307static inline int in_addr_range(u64 addr, u64 a, u64 b)
308{
309 if (a <= b)
310 return (addr >= a) && (addr <= b);
311 else
312 /* "overflowing" interval */
313 return (addr <= a) && (addr >= b);
314}
315
316#define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1)
317
318static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu,
319 unsigned long addr)
320{
321 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info;
322 int i;
323
324 if (vcpu->arch.guestdbg.nr_hw_bp == 0)
325 return NULL;
326
327 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) {
328 /* addr is directly the start or in the range of a bp */
329 if (addr == bp_info->addr)
330 goto found;
331 if (bp_info->len > 0 &&
332 in_addr_range(addr, bp_info->addr, end_of_range(bp_info)))
333 goto found;
334
335 bp_info++;
336 }
337
338 return NULL;
339found:
340 return bp_info;
341}
342
343static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
344{
345 int i;
346 struct kvm_hw_wp_info_arch *wp_info = NULL;
347 void *temp = NULL;
348
349 if (vcpu->arch.guestdbg.nr_hw_wp == 0)
350 return NULL;
351
352 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) {
353 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i];
354 if (!wp_info || !wp_info->old_data || wp_info->len <= 0)
355 continue;
356
357 temp = kmalloc(wp_info->len, GFP_KERNEL);
358 if (!temp)
359 continue;
360
361 /* refetch the wp data and compare it to the old value */
362 if (!read_guest(vcpu, wp_info->phys_addr, temp,
363 wp_info->len)) {
364 if (memcmp(temp, wp_info->old_data, wp_info->len)) {
365 kfree(temp);
366 return wp_info;
367 }
368 }
369 kfree(temp);
370 temp = NULL;
371 }
372
373 return NULL;
374}
375
376void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
377{
378 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
379 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
380}
381
382#define per_bp_event(code) \
383 (code & (PER_EVENT_IFETCH | PER_EVENT_BRANCH))
384#define per_write_wp_event(code) \
385 (code & (PER_EVENT_STORE | PER_EVENT_STORE_REAL))
386
387static int debug_exit_required(struct kvm_vcpu *vcpu)
388{
389 u32 perc = (vcpu->arch.sie_block->perc << 24);
390 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
391 struct kvm_hw_wp_info_arch *wp_info = NULL;
392 struct kvm_hw_bp_info_arch *bp_info = NULL;
393 unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
394 unsigned long peraddr = vcpu->arch.sie_block->peraddr;
395
396 if (guestdbg_hw_bp_enabled(vcpu)) {
397 if (per_write_wp_event(perc) &&
398 vcpu->arch.guestdbg.nr_hw_wp > 0) {
399 wp_info = any_wp_changed(vcpu);
400 if (wp_info) {
401 debug_exit->addr = wp_info->addr;
402 debug_exit->type = KVM_HW_WP_WRITE;
403 goto exit_required;
404 }
405 }
406 if (per_bp_event(perc) &&
407 vcpu->arch.guestdbg.nr_hw_bp > 0) {
408 bp_info = find_hw_bp(vcpu, addr);
409 /* remove duplicate events if PC==PER address */
410 if (bp_info && (addr != peraddr)) {
411 debug_exit->addr = addr;
412 debug_exit->type = KVM_HW_BP;
413 vcpu->arch.guestdbg.last_bp = addr;
414 goto exit_required;
415 }
416 /* breakpoint missed */
417 bp_info = find_hw_bp(vcpu, peraddr);
418 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) {
419 debug_exit->addr = peraddr;
420 debug_exit->type = KVM_HW_BP;
421 goto exit_required;
422 }
423 }
424 }
425 if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) {
426 debug_exit->addr = addr;
427 debug_exit->type = KVM_SINGLESTEP;
428 goto exit_required;
429 }
430
431 return 0;
432exit_required:
433 return 1;
434}
435
436#define guest_per_enabled(vcpu) \
437 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
438
439static void filter_guest_per_event(struct kvm_vcpu *vcpu)
440{
441 u32 perc = vcpu->arch.sie_block->perc << 24;
442 u64 peraddr = vcpu->arch.sie_block->peraddr;
443 u64 addr = vcpu->arch.sie_block->gpsw.addr;
444 u64 cr9 = vcpu->arch.sie_block->gcr[9];
445 u64 cr10 = vcpu->arch.sie_block->gcr[10];
446 u64 cr11 = vcpu->arch.sie_block->gcr[11];
447 /* filter all events, demanded by the guest */
448 u32 guest_perc = perc & cr9 & PER_EVENT_MASK;
449
450 if (!guest_per_enabled(vcpu))
451 guest_perc = 0;
452
453 /* filter "successful-branching" events */
454 if (guest_perc & PER_EVENT_BRANCH &&
455 cr9 & PER_CONTROL_BRANCH_ADDRESS &&
456 !in_addr_range(addr, cr10, cr11))
457 guest_perc &= ~PER_EVENT_BRANCH;
458
459 /* filter "instruction-fetching" events */
460 if (guest_perc & PER_EVENT_IFETCH &&
461 !in_addr_range(peraddr, cr10, cr11))
462 guest_perc &= ~PER_EVENT_IFETCH;
463
464 /* All other PER events will be given to the guest */
465 /* TODO: Check alterated address/address space */
466
467 vcpu->arch.sie_block->perc = guest_perc >> 24;
468
469 if (!guest_perc)
470 vcpu->arch.sie_block->iprcc &= ~PGM_PER;
471}
472
473void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
474{
475 if (debug_exit_required(vcpu))
476 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
477
478 filter_guest_per_event(vcpu);
479}
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index c0e6b49191ba..f61c800a2d2c 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -170,6 +170,8 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
170 } 170 }
171} 171}
172 172
173#define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
174
173static int handle_prog(struct kvm_vcpu *vcpu) 175static int handle_prog(struct kvm_vcpu *vcpu)
174{ 176{
175 struct kvm_s390_pgm_info pgm_info; 177 struct kvm_s390_pgm_info pgm_info;
@@ -178,6 +180,13 @@ static int handle_prog(struct kvm_vcpu *vcpu)
178 180
179 vcpu->stat.exit_program_interruption++; 181 vcpu->stat.exit_program_interruption++;
180 182
183 if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
184 kvm_s390_handle_per_event(vcpu);
185 /* the interrupt might have been filtered out completely */
186 if (vcpu->arch.sie_block->iprcc == 0)
187 return 0;
188 }
189
181 /* Restore ITDB to Program-Interruption TDB in guest memory */ 190 /* Restore ITDB to Program-Interruption TDB in guest memory */
182 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu)) 191 if (!IS_TE_ENABLED(vcpu) || !IS_ITDB_VALID(vcpu))
183 goto skip_itdb; 192 goto skip_itdb;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c49b4d4d310a..f331014dd766 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -131,7 +131,13 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
131 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT, 131 CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
132 &vcpu->arch.sie_block->cpuflags); 132 &vcpu->arch.sie_block->cpuflags);
133 vcpu->arch.sie_block->lctl = 0x0000; 133 vcpu->arch.sie_block->lctl = 0x0000;
134 vcpu->arch.sie_block->ictl &= ~ICTL_LPSW; 134 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
135
136 if (guestdbg_enabled(vcpu)) {
137 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
138 LCTL_CR10 | LCTL_CR11);
139 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
140 }
135} 141}
136 142
137static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) 143static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7ae8c26065fb..e6bbfe1a9474 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -934,10 +934,40 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
934 return -EINVAL; /* not implemented yet */ 934 return -EINVAL; /* not implemented yet */
935} 935}
936 936
937#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
938 KVM_GUESTDBG_USE_HW_BP | \
939 KVM_GUESTDBG_ENABLE)
940
937int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 941int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
938 struct kvm_guest_debug *dbg) 942 struct kvm_guest_debug *dbg)
939{ 943{
940 return -EINVAL; /* not implemented yet */ 944 int rc = 0;
945
946 vcpu->guest_debug = 0;
947 kvm_s390_clear_bp_data(vcpu);
948
949 if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS)
950 return -EINVAL;
951
952 if (dbg->control & KVM_GUESTDBG_ENABLE) {
953 vcpu->guest_debug = dbg->control;
954 /* enforce guest PER */
955 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
956
957 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
958 rc = kvm_s390_import_bp_data(vcpu, dbg);
959 } else {
960 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
961 vcpu->arch.guestdbg.last_bp = 0;
962 }
963
964 if (rc) {
965 vcpu->guest_debug = 0;
966 kvm_s390_clear_bp_data(vcpu);
967 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
968 }
969
970 return rc;
941} 971}
942 972
943int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 973int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
@@ -1095,6 +1125,11 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1095 if (rc) 1125 if (rc)
1096 return rc; 1126 return rc;
1097 1127
1128 if (guestdbg_enabled(vcpu)) {
1129 kvm_s390_backup_guest_per_regs(vcpu);
1130 kvm_s390_patch_guest_per_regs(vcpu);
1131 }
1132
1098 vcpu->arch.sie_block->icptcode = 0; 1133 vcpu->arch.sie_block->icptcode = 0;
1099 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 1134 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1100 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 1135 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
@@ -1111,6 +1146,9 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1111 vcpu->arch.sie_block->icptcode); 1146 vcpu->arch.sie_block->icptcode);
1112 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 1147 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1113 1148
1149 if (guestdbg_enabled(vcpu))
1150 kvm_s390_restore_guest_per_regs(vcpu);
1151
1114 if (exit_reason >= 0) { 1152 if (exit_reason >= 0) {
1115 rc = 0; 1153 rc = 0;
1116 } else if (kvm_is_ucontrol(vcpu->kvm)) { 1154 } else if (kvm_is_ucontrol(vcpu->kvm)) {
@@ -1176,7 +1214,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
1176 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1214 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1177 1215
1178 rc = vcpu_post_run(vcpu, exit_reason); 1216 rc = vcpu_post_run(vcpu, exit_reason);
1179 } while (!signal_pending(current) && !rc); 1217 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1180 1218
1181 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1219 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1182 return rc; 1220 return rc;
@@ -1187,6 +1225,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1187 int rc; 1225 int rc;
1188 sigset_t sigsaved; 1226 sigset_t sigsaved;
1189 1227
1228 if (guestdbg_exit_pending(vcpu)) {
1229 kvm_s390_prepare_debug_exit(vcpu);
1230 return 0;
1231 }
1232
1190 if (vcpu->sigset_active) 1233 if (vcpu->sigset_active)
1191 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1234 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1192 1235
@@ -1199,6 +1242,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1199 case KVM_EXIT_S390_RESET: 1242 case KVM_EXIT_S390_RESET:
1200 case KVM_EXIT_S390_UCONTROL: 1243 case KVM_EXIT_S390_UCONTROL:
1201 case KVM_EXIT_S390_TSCH: 1244 case KVM_EXIT_S390_TSCH:
1245 case KVM_EXIT_DEBUG:
1202 break; 1246 break;
1203 default: 1247 default:
1204 BUG(); 1248 BUG();
@@ -1224,6 +1268,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1224 rc = -EINTR; 1268 rc = -EINTR;
1225 } 1269 }
1226 1270
1271 if (guestdbg_exit_pending(vcpu) && !rc) {
1272 kvm_s390_prepare_debug_exit(vcpu);
1273 rc = 0;
1274 }
1275
1227 if (rc == -EOPNOTSUPP) { 1276 if (rc == -EOPNOTSUPP) {
1228 /* intercept cannot be handled in-kernel, prepare kvm-run */ 1277 /* intercept cannot be handled in-kernel, prepare kvm-run */
1229 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 1278 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 5f00fc1e9785..be8ae0d68ab6 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -211,4 +211,14 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
211int psw_extint_disabled(struct kvm_vcpu *vcpu); 211int psw_extint_disabled(struct kvm_vcpu *vcpu);
212void kvm_s390_destroy_adapters(struct kvm *kvm); 212void kvm_s390_destroy_adapters(struct kvm *kvm);
213 213
214/* implemented in guestdbg.c */
215void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
216void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
217void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
218int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
219 struct kvm_guest_debug *dbg);
220void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
221void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
222void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
223
214#endif 224#endif