aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 12:23:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 12:23:31 -0500
commitf479c01c8e81096e01eb20cec67dbaebae669aee (patch)
tree951890b22992aa6366b55308cdc05b1568cb8622 /arch
parentd8ec26d7f8287f5788a494f56e8814210f0e64be (diff)
parentf85168e4d96b31b09ecf09a679820b031224e69e (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The bulk of the s390 updates for v3.14. New features are the perf support for the CPU-Measurement Sample Facility and the EP11 support for the crypto cards. And the normal cleanups and bug-fixes" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (44 commits) s390/cpum_sf: fix printk format warnings s390: Fix misspellings using 'codespell' tool s390/qdio: bridgeport support - CHSC part s390: delete new instances of __cpuinit usage s390/compat: fix PSW32_USER_BITS definition s390/zcrypt: add support for EP11 coprocessor cards s390/mm: optimize randomize_et_dyn for !PF_RANDOMIZE s390: use IS_ENABLED to check if a CONFIG is set to y or m s390/cio: use device_lock to synchronize calls to the ccwgroup driver s390/cio: use device_lock to synchronize calls to the ccw driver s390/cio: fix unlocked access of online member s390/cpum_sf: Add flag to process full SDBs only s390/cpum_sf: Add raw data sampling to support the diagnostic-sampling function s390/cpum_sf: Filter perf events based event->attr.exclude_* settings s390/cpum_sf: Detect KVM guest samples s390/cpum_sf: Add helper to read TOD from trailer entries s390/cpum_sf: Atomically reset trailer entry fields of sample-data-blocks s390/cpum_sf: Dynamically extend the sampling buffer if overflows occur s390/pci: reenable per default s390/pci/dma: fix accounting of allocated_pages ...
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/include/asm/compat.h3
-rw-r--r--arch/s390/include/asm/cpu_mf.h181
-rw-r--r--arch/s390/include/asm/css_chars.h2
-rw-r--r--arch/s390/include/asm/pci.h1
-rw-r--r--arch/s390/include/asm/perf_event.h80
-rw-r--r--arch/s390/include/asm/qdio.h35
-rw-r--r--arch/s390/include/asm/sclp.h4
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h65
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/compat_signal.c5
-rw-r--r--arch/s390/kernel/entry64.S8
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c322
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c1641
-rw-r--r--arch/s390/kernel/perf_event.c174
-rw-r--r--arch/s390/kernel/process.c14
-rw-r--r--arch/s390/kernel/ptrace.c27
-rw-r--r--arch/s390/kernel/s390_ksyms.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c19
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c4
-rw-r--r--arch/s390/mm/pgtable.c4
-rw-r--r--arch/s390/oprofile/hwsampler.c78
-rw-r--r--arch/s390/oprofile/hwsampler.h52
-rw-r--r--arch/s390/oprofile/init.c23
-rw-r--r--arch/s390/pci/pci.c16
-rw-r--r--arch/s390/pci/pci_dma.c13
-rw-r--r--arch/s390/pci/pci_event.c26
29 files changed, 2608 insertions, 199 deletions
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 4bf9da03591e..5d7e8cf83bd6 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -38,7 +38,8 @@
38 38
39#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \ 39#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
40 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \ 40 PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
41 PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME) 41 PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | \
42 PSW32_ASC_PRIMARY)
42 43
43#define COMPAT_USER_HZ 100 44#define COMPAT_USER_HZ 100
44#define COMPAT_UTS_MACHINE "s390\0\0\0\0" 45#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index c879fad404c8..cb700d54bd83 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -56,6 +56,96 @@ struct cpumf_ctr_info {
56 u32 reserved2[12]; 56 u32 reserved2[12];
57} __packed; 57} __packed;
58 58
59/* QUERY SAMPLING INFORMATION block */
60struct hws_qsi_info_block { /* Bit(s) */
61 unsigned int b0_13:14; /* 0-13: zeros */
62 unsigned int as:1; /* 14: basic-sampling authorization */
63 unsigned int ad:1; /* 15: diag-sampling authorization */
64 unsigned int b16_21:6; /* 16-21: zeros */
65 unsigned int es:1; /* 22: basic-sampling enable control */
66 unsigned int ed:1; /* 23: diag-sampling enable control */
67 unsigned int b24_29:6; /* 24-29: zeros */
68 unsigned int cs:1; /* 30: basic-sampling activation control */
69 unsigned int cd:1; /* 31: diag-sampling activation control */
70 unsigned int bsdes:16; /* 4-5: size of basic sampling entry */
71 unsigned int dsdes:16; /* 6-7: size of diagnostic sampling entry */
72 unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
73 unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
74 unsigned long tear; /* 24-31: TEAR contents */
75 unsigned long dear; /* 32-39: DEAR contents */
76 unsigned int rsvrd0; /* 40-43: reserved */
77 unsigned int cpu_speed; /* 44-47: CPU speed */
78 unsigned long long rsvrd1; /* 48-55: reserved */
79 unsigned long long rsvrd2; /* 56-63: reserved */
80} __packed;
81
82/* SET SAMPLING CONTROLS request block */
83struct hws_lsctl_request_block {
84 unsigned int s:1; /* 0: maximum buffer indicator */
85 unsigned int h:1; /* 1: part. level reserved for VM use*/
86 unsigned long long b2_53:52;/* 2-53: zeros */
87 unsigned int es:1; /* 54: basic-sampling enable control */
88 unsigned int ed:1; /* 55: diag-sampling enable control */
89 unsigned int b56_61:6; /* 56-61: - zeros */
90 unsigned int cs:1; /* 62: basic-sampling activation control */
91 unsigned int cd:1; /* 63: diag-sampling activation control */
92 unsigned long interval; /* 8-15: sampling interval */
93 unsigned long tear; /* 16-23: TEAR contents */
94 unsigned long dear; /* 24-31: DEAR contents */
95 /* 32-63: */
96 unsigned long rsvrd1; /* reserved */
97 unsigned long rsvrd2; /* reserved */
98 unsigned long rsvrd3; /* reserved */
99 unsigned long rsvrd4; /* reserved */
100} __packed;
101
102struct hws_basic_entry {
103 unsigned int def:16; /* 0-15 Data Entry Format */
104 unsigned int R:4; /* 16-19 reserved */
105 unsigned int U:4; /* 20-23 Number of unique instruct. */
106 unsigned int z:2; /* zeros */
107 unsigned int T:1; /* 26 PSW DAT mode */
108 unsigned int W:1; /* 27 PSW wait state */
109 unsigned int P:1; /* 28 PSW Problem state */
110 unsigned int AS:2; /* 29-30 PSW address-space control */
111 unsigned int I:1; /* 31 entry valid or invalid */
112 unsigned int:16;
113 unsigned int prim_asn:16; /* primary ASN */
114 unsigned long long ia; /* Instruction Address */
115 unsigned long long gpp; /* Guest Program Parameter */
116 unsigned long long hpp; /* Host Program Parameter */
117} __packed;
118
119struct hws_diag_entry {
120 unsigned int def:16; /* 0-15 Data Entry Format */
121 unsigned int R:14; /* 16-19 and 20-30 reserved */
122 unsigned int I:1; /* 31 entry valid or invalid */
123 u8 data[]; /* Machine-dependent sample data */
124} __packed;
125
126struct hws_combined_entry {
127 struct hws_basic_entry basic; /* Basic-sampling data entry */
128 struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
129} __packed;
130
131struct hws_trailer_entry {
132 union {
133 struct {
134 unsigned int f:1; /* 0 - Block Full Indicator */
135 unsigned int a:1; /* 1 - Alert request control */
136 unsigned int t:1; /* 2 - Timestamp format */
137 unsigned long long:61; /* 3 - 63: Reserved */
138 };
139 unsigned long long flags; /* 0 - 63: All indicators */
140 };
141 unsigned long long overflow; /* 64 - sample Overflow count */
142 unsigned char timestamp[16]; /* 16 - 31 timestamp */
143 unsigned long long reserved1; /* 32 -Reserved */
144 unsigned long long reserved2; /* */
145 unsigned long long progusage1; /* 48 - reserved for programming use */
146 unsigned long long progusage2; /* */
147} __packed;
148
59/* Query counter information */ 149/* Query counter information */
60static inline int qctri(struct cpumf_ctr_info *info) 150static inline int qctri(struct cpumf_ctr_info *info)
61{ 151{
@@ -99,4 +189,95 @@ static inline int ecctr(u64 ctr, u64 *val)
99 return cc; 189 return cc;
100} 190}
101 191
192/* Query sampling information */
193static inline int qsi(struct hws_qsi_info_block *info)
194{
195 int cc;
196 cc = 1;
197
198 asm volatile(
199 "0: .insn s,0xb2860000,0(%1)\n"
200 "1: lhi %0,0\n"
201 "2:\n"
202 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
203 : "=d" (cc), "+a" (info)
204 : "m" (*info)
205 : "cc", "memory");
206
207 return cc ? -EINVAL : 0;
208}
209
210/* Load sampling controls */
211static inline int lsctl(struct hws_lsctl_request_block *req)
212{
213 int cc;
214
215 cc = 1;
216 asm volatile(
217 "0: .insn s,0xb2870000,0(%1)\n"
218 "1: ipm %0\n"
219 " srl %0,28\n"
220 "2:\n"
221 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
222 : "+d" (cc), "+a" (req)
223 : "m" (*req)
224 : "cc", "memory");
225
226 return cc ? -EINVAL : 0;
227}
228
229/* Sampling control helper functions */
230
231#include <linux/time.h>
232
233static inline unsigned long freq_to_sample_rate(struct hws_qsi_info_block *qsi,
234 unsigned long freq)
235{
236 return (USEC_PER_SEC / freq) * qsi->cpu_speed;
237}
238
239static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
240 unsigned long rate)
241{
242 return USEC_PER_SEC * qsi->cpu_speed / rate;
243}
244
245#define SDB_TE_ALERT_REQ_MASK 0x4000000000000000UL
246#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
247
248/* Return TOD timestamp contained in an trailer entry */
249static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
250{
251 /* TOD in STCKE format */
252 if (te->t)
253 return *((unsigned long long *) &te->timestamp[1]);
254
255 /* TOD in STCK format */
256 return *((unsigned long long *) &te->timestamp[0]);
257}
258
259/* Return pointer to trailer entry of an sample data block */
260static inline unsigned long *trailer_entry_ptr(unsigned long v)
261{
262 void *ret;
263
264 ret = (void *) v;
265 ret += PAGE_SIZE;
266 ret -= sizeof(struct hws_trailer_entry);
267
268 return (unsigned long *) ret;
269}
270
271/* Return if the entry in the sample data block table (sdbt)
272 * is a link to the next sdbt */
273static inline int is_link_entry(unsigned long *s)
274{
275 return *s & 0x1ul ? 1 : 0;
276}
277
278/* Return pointer to the linked sdbt */
279static inline unsigned long *get_next_sdbt(unsigned long *s)
280{
281 return (unsigned long *) (*s & ~0x1ul);
282}
102#endif /* _ASM_S390_CPU_MF_H */ 283#endif /* _ASM_S390_CPU_MF_H */
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index 7e1c917bbba2..09d1dd46bd57 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -29,6 +29,8 @@ struct css_general_char {
29 u32 fcx : 1; /* bit 88 */ 29 u32 fcx : 1; /* bit 88 */
30 u32 : 19; 30 u32 : 19;
31 u32 alt_ssi : 1; /* bit 108 */ 31 u32 alt_ssi : 1; /* bit 108 */
32 u32:1;
33 u32 narf:1; /* bit 110 */
32} __packed; 34} __packed;
33 35
34extern struct css_general_char css_general_characteristics; 36extern struct css_general_char css_general_characteristics;
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c129ab2ac731..2583466f576b 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -144,6 +144,7 @@ int clp_disable_fh(struct zpci_dev *);
144void zpci_event_error(void *); 144void zpci_event_error(void *);
145void zpci_event_availability(void *); 145void zpci_event_availability(void *);
146void zpci_rescan(void); 146void zpci_rescan(void);
147bool zpci_is_enabled(void);
147#else /* CONFIG_PCI */ 148#else /* CONFIG_PCI */
148static inline void zpci_event_error(void *e) {} 149static inline void zpci_event_error(void *e) {}
149static inline void zpci_event_availability(void *e) {} 150static inline void zpci_event_availability(void *e) {}
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 1141fb3e7b21..159a8ec6da9a 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -1,21 +1,40 @@
1/* 1/*
2 * Performance event support - s390 specific definitions. 2 * Performance event support - s390 specific definitions.
3 * 3 *
4 * Copyright IBM Corp. 2009, 2012 4 * Copyright IBM Corp. 2009, 2013
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 6 * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 */ 7 */
8 8
9#include <asm/cpu_mf.h> 9#ifndef _ASM_S390_PERF_EVENT_H
10#define _ASM_S390_PERF_EVENT_H
10 11
11/* CPU-measurement counter facility */ 12#ifdef CONFIG_64BIT
12#define PERF_CPUM_CF_MAX_CTR 256 13
14#include <linux/perf_event.h>
15#include <linux/device.h>
16#include <asm/cpu_mf.h>
13 17
14/* Per-CPU flags for PMU states */ 18/* Per-CPU flags for PMU states */
15#define PMU_F_RESERVED 0x1000 19#define PMU_F_RESERVED 0x1000
16#define PMU_F_ENABLED 0x2000 20#define PMU_F_ENABLED 0x2000
21#define PMU_F_IN_USE 0x4000
22#define PMU_F_ERR_IBE 0x0100
23#define PMU_F_ERR_LSDA 0x0200
24#define PMU_F_ERR_MASK (PMU_F_ERR_IBE|PMU_F_ERR_LSDA)
25
26/* Perf defintions for PMU event attributes in sysfs */
27extern __init const struct attribute_group **cpumf_cf_event_group(void);
28extern ssize_t cpumf_events_sysfs_show(struct device *dev,
29 struct device_attribute *attr,
30 char *page);
31#define EVENT_VAR(_cat, _name) event_attr_##_cat##_##_name
32#define EVENT_PTR(_cat, _name) (&EVENT_VAR(_cat, _name).attr.attr)
33
34#define CPUMF_EVENT_ATTR(cat, name, id) \
35 PMU_EVENT_ATTR(name, EVENT_VAR(cat, name), id, cpumf_events_sysfs_show)
36#define CPUMF_EVENT_PTR(cat, name) EVENT_PTR(cat, name)
17 37
18#ifdef CONFIG_64BIT
19 38
20/* Perf callbacks */ 39/* Perf callbacks */
21struct pt_regs; 40struct pt_regs;
@@ -23,4 +42,55 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
23extern unsigned long perf_misc_flags(struct pt_regs *regs); 42extern unsigned long perf_misc_flags(struct pt_regs *regs);
24#define perf_misc_flags(regs) perf_misc_flags(regs) 43#define perf_misc_flags(regs) perf_misc_flags(regs)
25 44
45/* Perf pt_regs extension for sample-data-entry indicators */
46struct perf_sf_sde_regs {
47 unsigned char in_guest:1; /* guest sample */
48 unsigned long reserved:63; /* reserved */
49};
50
51/* Perf PMU definitions for the counter facility */
52#define PERF_CPUM_CF_MAX_CTR 256
53
54/* Perf PMU definitions for the sampling facility */
55#define PERF_CPUM_SF_MAX_CTR 2
56#define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
57#define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
58#define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
59#define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
60#define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
61 PERF_CPUM_SF_DIAG_MODE)
62#define PERF_CPUM_SF_FULL_BLOCKS 0x0004 /* Process full SDBs only */
63
64#define REG_NONE 0
65#define REG_OVERFLOW 1
66#define OVERFLOW_REG(hwc) ((hwc)->extra_reg.config)
67#define SFB_ALLOC_REG(hwc) ((hwc)->extra_reg.alloc)
68#define RAWSAMPLE_REG(hwc) ((hwc)->config)
69#define TEAR_REG(hwc) ((hwc)->last_tag)
70#define SAMPL_RATE(hwc) ((hwc)->event_base)
71#define SAMPL_FLAGS(hwc) ((hwc)->config_base)
72#define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE)
73#define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS)
74
75/* Structure for sampling data entries to be passed as perf raw sample data
76 * to user space. Note that raw sample data must be aligned and, thus, might
77 * be padded with zeros.
78 */
79struct sf_raw_sample {
80#define SF_RAW_SAMPLE_BASIC PERF_CPUM_SF_BASIC_MODE
81#define SF_RAW_SAMPLE_DIAG PERF_CPUM_SF_DIAG_MODE
82 u64 format;
83 u32 size; /* Size of sf_raw_sample */
84 u16 bsdes; /* Basic-sampling data entry size */
85 u16 dsdes; /* Diagnostic-sampling data entry size */
86 struct hws_basic_entry basic; /* Basic-sampling data entry */
87 struct hws_diag_entry diag; /* Diagnostic-sampling data entry */
88 u8 padding[]; /* Padding to next multiple of 8 */
89} __packed;
90
91/* Perf hardware reserve and release functions */
92int perf_reserve_sampling(void);
93void perf_release_sampling(void);
94
26#endif /* CONFIG_64BIT */ 95#endif /* CONFIG_64BIT */
96#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 57d0d7e794b1..d786c634e052 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
336#define QDIO_FLAG_CLEANUP_USING_HALT 0x02 336#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
337 337
338/** 338/**
339 * struct qdio_initialize - qdio initalization data 339 * struct qdio_initialize - qdio initialization data
340 * @cdev: associated ccw device 340 * @cdev: associated ccw device
341 * @q_format: queue format 341 * @q_format: queue format
342 * @adapter_name: name for the adapter 342 * @adapter_name: name for the adapter
@@ -378,6 +378,34 @@ struct qdio_initialize {
378 struct qdio_outbuf_state *output_sbal_state_array; 378 struct qdio_outbuf_state *output_sbal_state_array;
379}; 379};
380 380
381/**
382 * enum qdio_brinfo_entry_type - type of address entry for qdio_brinfo_desc()
383 * @l3_ipv6_addr: entry contains IPv6 address
384 * @l3_ipv4_addr: entry contains IPv4 address
385 * @l2_addr_lnid: entry contains MAC address and VLAN ID
386 */
387enum qdio_brinfo_entry_type {l3_ipv6_addr, l3_ipv4_addr, l2_addr_lnid};
388
389/**
390 * struct qdio_brinfo_entry_XXX - Address entry for qdio_brinfo_desc()
391 * @nit: Network interface token
392 * @addr: Address of one of the three types
393 *
394 * The struct is passed to the callback function by qdio_brinfo_desc()
395 */
396struct qdio_brinfo_entry_l3_ipv6 {
397 u64 nit;
398 struct { unsigned char _s6_addr[16]; } addr;
399} __packed;
400struct qdio_brinfo_entry_l3_ipv4 {
401 u64 nit;
402 struct { uint32_t _s_addr; } addr;
403} __packed;
404struct qdio_brinfo_entry_l2 {
405 u64 nit;
406 struct { u8 mac[6]; u16 lnid; } addr_lnid;
407} __packed;
408
381#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */ 409#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
382#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */ 410#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
383#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */ 411#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
@@ -399,5 +427,10 @@ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
399extern int qdio_shutdown(struct ccw_device *, int); 427extern int qdio_shutdown(struct ccw_device *, int);
400extern int qdio_free(struct ccw_device *); 428extern int qdio_free(struct ccw_device *);
401extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *); 429extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
430extern int qdio_pnso_brinfo(struct subchannel_id schid,
431 int cnc, u16 *response,
432 void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
433 void *entry),
434 void *priv);
402 435
403#endif /* __QDIO_H__ */ 436#endif /* __QDIO_H__ */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 2f390956c7c1..220e171413f8 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -52,8 +52,8 @@ int sclp_chp_configure(struct chp_id chpid);
52int sclp_chp_deconfigure(struct chp_id chpid); 52int sclp_chp_deconfigure(struct chp_id chpid);
53int sclp_chp_read_info(struct sclp_chp_info *info); 53int sclp_chp_read_info(struct sclp_chp_info *info);
54void sclp_get_ipl_info(struct sclp_ipl_info *info); 54void sclp_get_ipl_info(struct sclp_ipl_info *info);
55bool sclp_has_linemode(void); 55bool __init sclp_has_linemode(void);
56bool sclp_has_vt220(void); 56bool __init sclp_has_vt220(void);
57int sclp_pci_configure(u32 fid); 57int sclp_pci_configure(u32 fid);
58int sclp_pci_deconfigure(u32 fid); 58int sclp_pci_deconfigure(u32 fid);
59int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); 59int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index e83fc116f5bf..f2b18eacaca8 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -154,6 +154,67 @@ struct ica_xcRB {
154 unsigned short priority_window; 154 unsigned short priority_window;
155 unsigned int status; 155 unsigned int status;
156} __attribute__((packed)); 156} __attribute__((packed));
157
158/**
159 * struct ep11_cprb - EP11 connectivity programming request block
160 * @cprb_len: CPRB header length [0x0020]
161 * @cprb_ver_id: CPRB version id. [0x04]
162 * @pad_000: Alignment pad bytes
163 * @flags: Admin cmd [0x80] or functional cmd [0x00]
164 * @func_id: Function id / subtype [0x5434]
165 * @source_id: Source id [originator id]
166 * @target_id: Target id [usage/ctrl domain id]
167 * @ret_code: Return code
168 * @reserved1: Reserved
169 * @reserved2: Reserved
170 * @payload_len: Payload length
171 */
172struct ep11_cprb {
173 uint16_t cprb_len;
174 unsigned char cprb_ver_id;
175 unsigned char pad_000[2];
176 unsigned char flags;
177 unsigned char func_id[2];
178 uint32_t source_id;
179 uint32_t target_id;
180 uint32_t ret_code;
181 uint32_t reserved1;
182 uint32_t reserved2;
183 uint32_t payload_len;
184} __attribute__((packed));
185
186/**
187 * struct ep11_target_dev - EP11 target device list
188 * @ap_id: AP device id
189 * @dom_id: Usage domain id
190 */
191struct ep11_target_dev {
192 uint16_t ap_id;
193 uint16_t dom_id;
194};
195
196/**
197 * struct ep11_urb - EP11 user request block
198 * @targets_num: Number of target adapters
199 * @targets: Addr to target adapter list
200 * @weight: Level of request priority
201 * @req_no: Request id/number
202 * @req_len: Request length
203 * @req: Addr to request block
204 * @resp_len: Response length
205 * @resp: Addr to response block
206 */
207struct ep11_urb {
208 uint16_t targets_num;
209 uint64_t targets;
210 uint64_t weight;
211 uint64_t req_no;
212 uint64_t req_len;
213 uint64_t req;
214 uint64_t resp_len;
215 uint64_t resp;
216} __attribute__((packed));
217
157#define AUTOSELECT ((unsigned int)0xFFFFFFFF) 218#define AUTOSELECT ((unsigned int)0xFFFFFFFF)
158 219
159#define ZCRYPT_IOCTL_MAGIC 'z' 220#define ZCRYPT_IOCTL_MAGIC 'z'
@@ -183,6 +244,9 @@ struct ica_xcRB {
183 * ZSECSENDCPRB 244 * ZSECSENDCPRB
184 * Send an arbitrary CPRB to a crypto card. 245 * Send an arbitrary CPRB to a crypto card.
185 * 246 *
247 * ZSENDEP11CPRB
248 * Send an arbitrary EP11 CPRB to an EP11 coprocessor crypto card.
249 *
186 * Z90STAT_STATUS_MASK 250 * Z90STAT_STATUS_MASK
187 * Return an 64 element array of unsigned chars for the status of 251 * Return an 64 element array of unsigned chars for the status of
188 * all devices. 252 * all devices.
@@ -256,6 +320,7 @@ struct ica_xcRB {
256#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0) 320#define ICARSAMODEXPO _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x05, 0)
257#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0) 321#define ICARSACRT _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x06, 0)
258#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0) 322#define ZSECSENDCPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x81, 0)
323#define ZSENDEP11CPRB _IOC(_IOC_READ|_IOC_WRITE, ZCRYPT_IOCTL_MAGIC, 0x04, 0)
259 324
260/* New status calls */ 325/* New status calls */
261#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int) 326#define Z90STAT_TOTALCOUNT _IOR(ZCRYPT_IOCTL_MAGIC, 0x40, int)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 2403303cfed7..1b3ac09c11b6 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -60,7 +60,8 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
60obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 60obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
61 61
62ifdef CONFIG_64BIT 62ifdef CONFIG_64BIT
63obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o 63obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o \
64 perf_cpum_cf_events.o
64obj-y += runtime_instr.o cache.o 65obj-y += runtime_instr.o cache.o
65endif 66endif
66 67
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 95e7ba0fbb7e..8b84bc373e94 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -412,8 +412,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
412 regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE; 412 regs->gprs[14] = (__u64 __force) ka->sa.sa_restorer | PSW32_ADDR_AMODE;
413 } else { 413 } else {
414 regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE; 414 regs->gprs[14] = (__u64 __force) frame->retcode | PSW32_ADDR_AMODE;
415 err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, 415 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
416 (u16 __force __user *)(frame->retcode)); 416 (u16 __force __user *)(frame->retcode)))
417 goto give_sigsegv;
417 } 418 }
418 419
419 /* Set up backchain. */ 420 /* Set up backchain. */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e5b43c97a834..384e609b4711 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -74,7 +74,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
74 .endm 74 .endm
75 75
76 .macro LPP newpp 76 .macro LPP newpp
77#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 77#if IS_ENABLED(CONFIG_KVM)
78 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP 78 tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
79 jz .+8 79 jz .+8
80 .insn s,0xb2800000,\newpp 80 .insn s,0xb2800000,\newpp
@@ -82,7 +82,7 @@ _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
82 .endm 82 .endm
83 83
84 .macro HANDLE_SIE_INTERCEPT scratch,reason 84 .macro HANDLE_SIE_INTERCEPT scratch,reason
85#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 85#if IS_ENABLED(CONFIG_KVM)
86 tmhh %r8,0x0001 # interrupting from user ? 86 tmhh %r8,0x0001 # interrupting from user ?
87 jnz .+62 87 jnz .+62
88 lgr \scratch,%r9 88 lgr \scratch,%r9
@@ -946,7 +946,7 @@ cleanup_idle_insn:
946 .quad __critical_end - __critical_start 946 .quad __critical_end - __critical_start
947 947
948 948
949#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 949#if IS_ENABLED(CONFIG_KVM)
950/* 950/*
951 * sie64a calling convention: 951 * sie64a calling convention:
952 * %r2 pointer to sie control block 952 * %r2 pointer to sie control block
@@ -975,7 +975,7 @@ sie_done:
975 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 975 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
976# some program checks are suppressing. C code (e.g. do_protection_exception) 976# some program checks are suppressing. C code (e.g. do_protection_exception)
977# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 977# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
978# instructions beween sie64a and sie_done should not cause program 978# instructions between sie64a and sie_done should not cause program
979# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 979# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
980# See also HANDLE_SIE_INTERCEPT 980# See also HANDLE_SIE_INTERCEPT
981rewind_pad: 981rewind_pad:
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 1105502bf6e9..f51214c04858 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -680,6 +680,7 @@ static int __init cpumf_pmu_init(void)
680 goto out; 680 goto out;
681 } 681 }
682 682
683 cpumf_pmu.attr_groups = cpumf_cf_event_group();
683 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); 684 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
684 if (rc) { 685 if (rc) {
685 pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); 686 pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
new file mode 100644
index 000000000000..4554a4bae39e
--- /dev/null
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -0,0 +1,322 @@
1/*
2 * Perf PMU sysfs events attributes for available CPU-measurement counters
3 *
4 */
5
6#include <linux/slab.h>
7#include <linux/perf_event.h>
8
9
10/* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
11
12CPUMF_EVENT_ATTR(cf, CPU_CYCLES, 0x0000);
13CPUMF_EVENT_ATTR(cf, INSTRUCTIONS, 0x0001);
14CPUMF_EVENT_ATTR(cf, L1I_DIR_WRITES, 0x0002);
15CPUMF_EVENT_ATTR(cf, L1I_PENALTY_CYCLES, 0x0003);
16CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_CPU_CYCLES, 0x0020);
17CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
18CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_DIR_WRITES, 0x0022);
19CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES, 0x0023);
20CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_DIR_WRITES, 0x0024);
21CPUMF_EVENT_ATTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES, 0x0025);
22CPUMF_EVENT_ATTR(cf, L1D_DIR_WRITES, 0x0004);
23CPUMF_EVENT_ATTR(cf, L1D_PENALTY_CYCLES, 0x0005);
24CPUMF_EVENT_ATTR(cf, PRNG_FUNCTIONS, 0x0040);
25CPUMF_EVENT_ATTR(cf, PRNG_CYCLES, 0x0041);
26CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_FUNCTIONS, 0x0042);
27CPUMF_EVENT_ATTR(cf, PRNG_BLOCKED_CYCLES, 0x0043);
28CPUMF_EVENT_ATTR(cf, SHA_FUNCTIONS, 0x0044);
29CPUMF_EVENT_ATTR(cf, SHA_CYCLES, 0x0045);
30CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_FUNCTIONS, 0x0046);
31CPUMF_EVENT_ATTR(cf, SHA_BLOCKED_CYCLES, 0x0047);
32CPUMF_EVENT_ATTR(cf, DEA_FUNCTIONS, 0x0048);
33CPUMF_EVENT_ATTR(cf, DEA_CYCLES, 0x0049);
34CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_FUNCTIONS, 0x004a);
35CPUMF_EVENT_ATTR(cf, DEA_BLOCKED_CYCLES, 0x004b);
36CPUMF_EVENT_ATTR(cf, AES_FUNCTIONS, 0x004c);
37CPUMF_EVENT_ATTR(cf, AES_CYCLES, 0x004d);
38CPUMF_EVENT_ATTR(cf, AES_BLOCKED_FUNCTIONS, 0x004e);
39CPUMF_EVENT_ATTR(cf, AES_BLOCKED_CYCLES, 0x004f);
40CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
41CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
42CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
43CPUMF_EVENT_ATTR(cf_z10, L1D_L3_LOCAL_WRITES, 0x0083);
44CPUMF_EVENT_ATTR(cf_z10, L1I_L3_REMOTE_WRITES, 0x0084);
45CPUMF_EVENT_ATTR(cf_z10, L1D_L3_REMOTE_WRITES, 0x0085);
46CPUMF_EVENT_ATTR(cf_z10, L1D_LMEM_SOURCED_WRITES, 0x0086);
47CPUMF_EVENT_ATTR(cf_z10, L1I_LMEM_SOURCED_WRITES, 0x0087);
48CPUMF_EVENT_ATTR(cf_z10, L1D_RO_EXCL_WRITES, 0x0088);
49CPUMF_EVENT_ATTR(cf_z10, L1I_CACHELINE_INVALIDATES, 0x0089);
50CPUMF_EVENT_ATTR(cf_z10, ITLB1_WRITES, 0x008a);
51CPUMF_EVENT_ATTR(cf_z10, DTLB1_WRITES, 0x008b);
52CPUMF_EVENT_ATTR(cf_z10, TLB2_PTE_WRITES, 0x008c);
53CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_WRITES, 0x008d);
54CPUMF_EVENT_ATTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES, 0x008e);
55CPUMF_EVENT_ATTR(cf_z10, ITLB1_MISSES, 0x0091);
56CPUMF_EVENT_ATTR(cf_z10, DTLB1_MISSES, 0x0092);
57CPUMF_EVENT_ATTR(cf_z10, L2C_STORES_SENT, 0x0093);
58CPUMF_EVENT_ATTR(cf_z196, L1D_L2_SOURCED_WRITES, 0x0080);
59CPUMF_EVENT_ATTR(cf_z196, L1I_L2_SOURCED_WRITES, 0x0081);
60CPUMF_EVENT_ATTR(cf_z196, DTLB1_MISSES, 0x0082);
61CPUMF_EVENT_ATTR(cf_z196, ITLB1_MISSES, 0x0083);
62CPUMF_EVENT_ATTR(cf_z196, L2C_STORES_SENT, 0x0085);
63CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0086);
64CPUMF_EVENT_ATTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0087);
65CPUMF_EVENT_ATTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES, 0x0088);
66CPUMF_EVENT_ATTR(cf_z196, L1D_RO_EXCL_WRITES, 0x0089);
67CPUMF_EVENT_ATTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x008a);
68CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x008b);
69CPUMF_EVENT_ATTR(cf_z196, DTLB1_HPAGE_WRITES, 0x008c);
70CPUMF_EVENT_ATTR(cf_z196, L1D_LMEM_SOURCED_WRITES, 0x008d);
71CPUMF_EVENT_ATTR(cf_z196, L1I_LMEM_SOURCED_WRITES, 0x008e);
72CPUMF_EVENT_ATTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x008f);
73CPUMF_EVENT_ATTR(cf_z196, DTLB1_WRITES, 0x0090);
74CPUMF_EVENT_ATTR(cf_z196, ITLB1_WRITES, 0x0091);
75CPUMF_EVENT_ATTR(cf_z196, TLB2_PTE_WRITES, 0x0092);
76CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES, 0x0093);
77CPUMF_EVENT_ATTR(cf_z196, TLB2_CRSTE_WRITES, 0x0094);
78CPUMF_EVENT_ATTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0096);
79CPUMF_EVENT_ATTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0098);
80CPUMF_EVENT_ATTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099);
81CPUMF_EVENT_ATTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009b);
82CPUMF_EVENT_ATTR(cf_zec12, DTLB1_MISSES, 0x0080);
83CPUMF_EVENT_ATTR(cf_zec12, ITLB1_MISSES, 0x0081);
84CPUMF_EVENT_ATTR(cf_zec12, L1D_L2I_SOURCED_WRITES, 0x0082);
85CPUMF_EVENT_ATTR(cf_zec12, L1I_L2I_SOURCED_WRITES, 0x0083);
86CPUMF_EVENT_ATTR(cf_zec12, L1D_L2D_SOURCED_WRITES, 0x0084);
87CPUMF_EVENT_ATTR(cf_zec12, DTLB1_WRITES, 0x0085);
88CPUMF_EVENT_ATTR(cf_zec12, L1D_LMEM_SOURCED_WRITES, 0x0087);
89CPUMF_EVENT_ATTR(cf_zec12, L1I_LMEM_SOURCED_WRITES, 0x0089);
90CPUMF_EVENT_ATTR(cf_zec12, L1D_RO_EXCL_WRITES, 0x008a);
91CPUMF_EVENT_ATTR(cf_zec12, DTLB1_HPAGE_WRITES, 0x008b);
92CPUMF_EVENT_ATTR(cf_zec12, ITLB1_WRITES, 0x008c);
93CPUMF_EVENT_ATTR(cf_zec12, TLB2_PTE_WRITES, 0x008d);
94CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES, 0x008e);
95CPUMF_EVENT_ATTR(cf_zec12, TLB2_CRSTE_WRITES, 0x008f);
96CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES, 0x0090);
97CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES, 0x0091);
98CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES, 0x0092);
99CPUMF_EVENT_ATTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES, 0x0093);
100CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES, 0x0094);
101CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TEND, 0x0095);
102CPUMF_EVENT_ATTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV, 0x0096);
103CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV, 0x0097);
104CPUMF_EVENT_ATTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV, 0x0098);
105CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES, 0x0099);
106CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES, 0x009a);
107CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES, 0x009b);
108CPUMF_EVENT_ATTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES, 0x009c);
109CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES, 0x009d);
110CPUMF_EVENT_ATTR(cf_zec12, TX_C_TEND, 0x009e);
111CPUMF_EVENT_ATTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV, 0x009f);
112CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV, 0x00a0);
113CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
114CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
115CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
116CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
117
118static struct attribute *cpumcf_pmu_event_attr[] = {
119 CPUMF_EVENT_PTR(cf, CPU_CYCLES),
120 CPUMF_EVENT_PTR(cf, INSTRUCTIONS),
121 CPUMF_EVENT_PTR(cf, L1I_DIR_WRITES),
122 CPUMF_EVENT_PTR(cf, L1I_PENALTY_CYCLES),
123 CPUMF_EVENT_PTR(cf, PROBLEM_STATE_CPU_CYCLES),
124 CPUMF_EVENT_PTR(cf, PROBLEM_STATE_INSTRUCTIONS),
125 CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_DIR_WRITES),
126 CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1I_PENALTY_CYCLES),
127 CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_DIR_WRITES),
128 CPUMF_EVENT_PTR(cf, PROBLEM_STATE_L1D_PENALTY_CYCLES),
129 CPUMF_EVENT_PTR(cf, L1D_DIR_WRITES),
130 CPUMF_EVENT_PTR(cf, L1D_PENALTY_CYCLES),
131 CPUMF_EVENT_PTR(cf, PRNG_FUNCTIONS),
132 CPUMF_EVENT_PTR(cf, PRNG_CYCLES),
133 CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_FUNCTIONS),
134 CPUMF_EVENT_PTR(cf, PRNG_BLOCKED_CYCLES),
135 CPUMF_EVENT_PTR(cf, SHA_FUNCTIONS),
136 CPUMF_EVENT_PTR(cf, SHA_CYCLES),
137 CPUMF_EVENT_PTR(cf, SHA_BLOCKED_FUNCTIONS),
138 CPUMF_EVENT_PTR(cf, SHA_BLOCKED_CYCLES),
139 CPUMF_EVENT_PTR(cf, DEA_FUNCTIONS),
140 CPUMF_EVENT_PTR(cf, DEA_CYCLES),
141 CPUMF_EVENT_PTR(cf, DEA_BLOCKED_FUNCTIONS),
142 CPUMF_EVENT_PTR(cf, DEA_BLOCKED_CYCLES),
143 CPUMF_EVENT_PTR(cf, AES_FUNCTIONS),
144 CPUMF_EVENT_PTR(cf, AES_CYCLES),
145 CPUMF_EVENT_PTR(cf, AES_BLOCKED_FUNCTIONS),
146 CPUMF_EVENT_PTR(cf, AES_BLOCKED_CYCLES),
147 NULL,
148};
149
150static struct attribute *cpumcf_z10_pmu_event_attr[] __initdata = {
151 CPUMF_EVENT_PTR(cf_z10, L1I_L2_SOURCED_WRITES),
152 CPUMF_EVENT_PTR(cf_z10, L1D_L2_SOURCED_WRITES),
153 CPUMF_EVENT_PTR(cf_z10, L1I_L3_LOCAL_WRITES),
154 CPUMF_EVENT_PTR(cf_z10, L1D_L3_LOCAL_WRITES),
155 CPUMF_EVENT_PTR(cf_z10, L1I_L3_REMOTE_WRITES),
156 CPUMF_EVENT_PTR(cf_z10, L1D_L3_REMOTE_WRITES),
157 CPUMF_EVENT_PTR(cf_z10, L1D_LMEM_SOURCED_WRITES),
158 CPUMF_EVENT_PTR(cf_z10, L1I_LMEM_SOURCED_WRITES),
159 CPUMF_EVENT_PTR(cf_z10, L1D_RO_EXCL_WRITES),
160 CPUMF_EVENT_PTR(cf_z10, L1I_CACHELINE_INVALIDATES),
161 CPUMF_EVENT_PTR(cf_z10, ITLB1_WRITES),
162 CPUMF_EVENT_PTR(cf_z10, DTLB1_WRITES),
163 CPUMF_EVENT_PTR(cf_z10, TLB2_PTE_WRITES),
164 CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_WRITES),
165 CPUMF_EVENT_PTR(cf_z10, TLB2_CRSTE_HPAGE_WRITES),
166 CPUMF_EVENT_PTR(cf_z10, ITLB1_MISSES),
167 CPUMF_EVENT_PTR(cf_z10, DTLB1_MISSES),
168 CPUMF_EVENT_PTR(cf_z10, L2C_STORES_SENT),
169 NULL,
170};
171
172static struct attribute *cpumcf_z196_pmu_event_attr[] __initdata = {
173 CPUMF_EVENT_PTR(cf_z196, L1D_L2_SOURCED_WRITES),
174 CPUMF_EVENT_PTR(cf_z196, L1I_L2_SOURCED_WRITES),
175 CPUMF_EVENT_PTR(cf_z196, DTLB1_MISSES),
176 CPUMF_EVENT_PTR(cf_z196, ITLB1_MISSES),
177 CPUMF_EVENT_PTR(cf_z196, L2C_STORES_SENT),
178 CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L3_SOURCED_WRITES),
179 CPUMF_EVENT_PTR(cf_z196, L1D_ONBOOK_L4_SOURCED_WRITES),
180 CPUMF_EVENT_PTR(cf_z196, L1I_ONBOOK_L4_SOURCED_WRITES),
181 CPUMF_EVENT_PTR(cf_z196, L1D_RO_EXCL_WRITES),
182 CPUMF_EVENT_PTR(cf_z196, L1D_OFFBOOK_L4_SOURCED_WRITES),
183 CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L4_SOURCED_WRITES),
184 CPUMF_EVENT_PTR(cf_z196, DTLB1_HPAGE_WRITES),
185 CPUMF_EVENT_PTR(cf_z196, L1D_LMEM_SOURCED_WRITES),
186 CPUMF_EVENT_PTR(cf_z196, L1I_LMEM_SOURCED_WRITES),
187 CPUMF_EVENT_PTR(cf_z196, L1I_OFFBOOK_L3_SOURCED_WRITES),
188 CPUMF_EVENT_PTR(cf_z196, DTLB1_WRITES),
189 CPUMF_EVENT_PTR(cf_z196, ITLB1_WRITES),
190 CPUMF_EVENT_PTR(cf_z196, TLB2_PTE_WRITES),
191 CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_HPAGE_WRITES),
192 CPUMF_EVENT_PTR(cf_z196, TLB2_CRSTE_WRITES),
193 CPUMF_EVENT_PTR(cf_z196, L1D_ONCHIP_L3_SOURCED_WRITES),
194 CPUMF_EVENT_PTR(cf_z196, L1D_OFFCHIP_L3_SOURCED_WRITES),
195 CPUMF_EVENT_PTR(cf_z196, L1I_ONCHIP_L3_SOURCED_WRITES),
196 CPUMF_EVENT_PTR(cf_z196, L1I_OFFCHIP_L3_SOURCED_WRITES),
197 NULL,
198};
199
200static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
201 CPUMF_EVENT_PTR(cf_zec12, DTLB1_MISSES),
202 CPUMF_EVENT_PTR(cf_zec12, ITLB1_MISSES),
203 CPUMF_EVENT_PTR(cf_zec12, L1D_L2I_SOURCED_WRITES),
204 CPUMF_EVENT_PTR(cf_zec12, L1I_L2I_SOURCED_WRITES),
205 CPUMF_EVENT_PTR(cf_zec12, L1D_L2D_SOURCED_WRITES),
206 CPUMF_EVENT_PTR(cf_zec12, DTLB1_WRITES),
207 CPUMF_EVENT_PTR(cf_zec12, L1D_LMEM_SOURCED_WRITES),
208 CPUMF_EVENT_PTR(cf_zec12, L1I_LMEM_SOURCED_WRITES),
209 CPUMF_EVENT_PTR(cf_zec12, L1D_RO_EXCL_WRITES),
210 CPUMF_EVENT_PTR(cf_zec12, DTLB1_HPAGE_WRITES),
211 CPUMF_EVENT_PTR(cf_zec12, ITLB1_WRITES),
212 CPUMF_EVENT_PTR(cf_zec12, TLB2_PTE_WRITES),
213 CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_HPAGE_WRITES),
214 CPUMF_EVENT_PTR(cf_zec12, TLB2_CRSTE_WRITES),
215 CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES),
216 CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES),
217 CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES),
218 CPUMF_EVENT_PTR(cf_zec12, L1D_ONBOOK_L4_SOURCED_WRITES),
219 CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L4_SOURCED_WRITES),
220 CPUMF_EVENT_PTR(cf_zec12, TX_NC_TEND),
221 CPUMF_EVENT_PTR(cf_zec12, L1D_ONCHIP_L3_SOURCED_WRITES_IV),
222 CPUMF_EVENT_PTR(cf_zec12, L1D_OFFCHIP_L3_SOURCED_WRITES_IV),
223 CPUMF_EVENT_PTR(cf_zec12, L1D_OFFBOOK_L3_SOURCED_WRITES_IV),
224 CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES),
225 CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES),
226 CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES),
227 CPUMF_EVENT_PTR(cf_zec12, L1I_ONBOOK_L4_SOURCED_WRITES),
228 CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L4_SOURCED_WRITES),
229 CPUMF_EVENT_PTR(cf_zec12, TX_C_TEND),
230 CPUMF_EVENT_PTR(cf_zec12, L1I_ONCHIP_L3_SOURCED_WRITES_IV),
231 CPUMF_EVENT_PTR(cf_zec12, L1I_OFFCHIP_L3_SOURCED_WRITES_IV),
232 CPUMF_EVENT_PTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV),
233 CPUMF_EVENT_PTR(cf_zec12, TX_NC_TABORT),
234 CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_NO_SPECIAL),
235 CPUMF_EVENT_PTR(cf_zec12, TX_C_TABORT_SPECIAL),
236 NULL,
237};
238
239/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
240
241static struct attribute_group cpumsf_pmu_events_group = {
242 .name = "events",
243 .attrs = cpumcf_pmu_event_attr,
244};
245
246PMU_FORMAT_ATTR(event, "config:0-63");
247
248static struct attribute *cpumsf_pmu_format_attr[] = {
249 &format_attr_event.attr,
250 NULL,
251};
252
253static struct attribute_group cpumsf_pmu_format_group = {
254 .name = "format",
255 .attrs = cpumsf_pmu_format_attr,
256};
257
258static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
259 &cpumsf_pmu_events_group,
260 &cpumsf_pmu_format_group,
261 NULL,
262};
263
264
265static __init struct attribute **merge_attr(struct attribute **a,
266 struct attribute **b)
267{
268 struct attribute **new;
269 int j, i;
270
271 for (j = 0; a[j]; j++)
272 ;
273 for (i = 0; b[i]; i++)
274 j++;
275 j++;
276
277 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
278 if (!new)
279 return NULL;
280 j = 0;
281 for (i = 0; a[i]; i++)
282 new[j++] = a[i];
283 for (i = 0; b[i]; i++)
284 new[j++] = b[i];
285 new[j] = NULL;
286
287 return new;
288}
289
290__init const struct attribute_group **cpumf_cf_event_group(void)
291{
292 struct attribute **combined, **model;
293 struct cpuid cpu_id;
294
295 get_cpu_id(&cpu_id);
296 switch (cpu_id.machine) {
297 case 0x2097:
298 case 0x2098:
299 model = cpumcf_z10_pmu_event_attr;
300 break;
301 case 0x2817:
302 case 0x2818:
303 model = cpumcf_z196_pmu_event_attr;
304 break;
305 case 0x2827:
306 case 0x2828:
307 model = cpumcf_zec12_pmu_event_attr;
308 break;
309 default:
310 model = NULL;
311 break;
312 };
313
314 if (!model)
315 goto out;
316
317 combined = merge_attr(cpumcf_pmu_event_attr, model);
318 if (combined)
319 cpumsf_pmu_events_group.attrs = combined;
320out:
321 return cpumsf_pmu_attr_groups;
322}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
new file mode 100644
index 000000000000..6c0d29827cb6
--- /dev/null
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -0,0 +1,1641 @@
1/*
2 * Performance event support for the System z CPU-measurement Sampling Facility
3 *
4 * Copyright IBM Corp. 2013
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11#define KMSG_COMPONENT "cpum_sf"
12#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/perf_event.h>
17#include <linux/percpu.h>
18#include <linux/notifier.h>
19#include <linux/export.h>
20#include <linux/slab.h>
21#include <linux/mm.h>
22#include <linux/moduleparam.h>
23#include <asm/cpu_mf.h>
24#include <asm/irq.h>
25#include <asm/debug.h>
26#include <asm/timex.h>
27
28/* Minimum number of sample-data-block-tables:
29 * At least one table is required for the sampling buffer structure.
30 * A single table contains up to 511 pointers to sample-data-blocks.
31 */
32#define CPUM_SF_MIN_SDBT 1
33
34/* Number of sample-data-blocks per sample-data-block-table (SDBT):
35 * A table contains SDB pointers (8 bytes) and one table-link entry
36 * that points to the origin of the next SDBT.
37 */
38#define CPUM_SF_SDB_PER_TABLE ((PAGE_SIZE - 8) / 8)
39
40/* Maximum page offset for an SDBT table-link entry:
41 * If this page offset is reached, a table-link entry to the next SDBT
42 * must be added.
43 */
44#define CPUM_SF_SDBT_TL_OFFSET (CPUM_SF_SDB_PER_TABLE * 8)
45static inline int require_table_link(const void *sdbt)
46{
47 return ((unsigned long) sdbt & ~PAGE_MASK) == CPUM_SF_SDBT_TL_OFFSET;
48}
49
50/* Minimum and maximum sampling buffer sizes:
51 *
52 * This number represents the maximum size of the sampling buffer taking
53 * the number of sample-data-block-tables into account. Note that these
54 * numbers apply to the basic-sampling function only.
55 * The maximum number of SDBs is increased by CPUM_SF_SDB_DIAG_FACTOR if
56 * the diagnostic-sampling function is active.
57 *
58 * Sampling buffer size Buffer characteristics
59 * ---------------------------------------------------
60 * 64KB == 16 pages (4KB per page)
61 * 1 page for SDB-tables
62 * 15 pages for SDBs
63 *
64 * 32MB == 8192 pages (4KB per page)
65 * 16 pages for SDB-tables
66 * 8176 pages for SDBs
67 */
68static unsigned long __read_mostly CPUM_SF_MIN_SDB = 15;
69static unsigned long __read_mostly CPUM_SF_MAX_SDB = 8176;
70static unsigned long __read_mostly CPUM_SF_SDB_DIAG_FACTOR = 1;
71
72struct sf_buffer {
73 unsigned long *sdbt; /* Sample-data-block-table origin */
74 /* buffer characteristics (required for buffer increments) */
75 unsigned long num_sdb; /* Number of sample-data-blocks */
76 unsigned long num_sdbt; /* Number of sample-data-block-tables */
77 unsigned long *tail; /* last sample-data-block-table */
78};
79
80struct cpu_hw_sf {
81 /* CPU-measurement sampling information block */
82 struct hws_qsi_info_block qsi;
83 /* CPU-measurement sampling control block */
84 struct hws_lsctl_request_block lsctl;
85 struct sf_buffer sfb; /* Sampling buffer */
86 unsigned int flags; /* Status flags */
87 struct perf_event *event; /* Scheduled perf event */
88};
89static DEFINE_PER_CPU(struct cpu_hw_sf, cpu_hw_sf);
90
91/* Debug feature */
92static debug_info_t *sfdbg;
93
94/*
95 * sf_disable() - Switch off sampling facility
96 */
97static int sf_disable(void)
98{
99 struct hws_lsctl_request_block sreq;
100
101 memset(&sreq, 0, sizeof(sreq));
102 return lsctl(&sreq);
103}
104
105/*
106 * sf_buffer_available() - Check for an allocated sampling buffer
107 */
108static int sf_buffer_available(struct cpu_hw_sf *cpuhw)
109{
110 return !!cpuhw->sfb.sdbt;
111}
112
113/*
114 * deallocate sampling facility buffer
115 */
116static void free_sampling_buffer(struct sf_buffer *sfb)
117{
118 unsigned long *sdbt, *curr;
119
120 if (!sfb->sdbt)
121 return;
122
123 sdbt = sfb->sdbt;
124 curr = sdbt;
125
126 /* Free the SDBT after all SDBs are processed... */
127 while (1) {
128 if (!*curr || !sdbt)
129 break;
130
131 /* Process table-link entries */
132 if (is_link_entry(curr)) {
133 curr = get_next_sdbt(curr);
134 if (sdbt)
135 free_page((unsigned long) sdbt);
136
137 /* If the origin is reached, sampling buffer is freed */
138 if (curr == sfb->sdbt)
139 break;
140 else
141 sdbt = curr;
142 } else {
143 /* Process SDB pointer */
144 if (*curr) {
145 free_page(*curr);
146 curr++;
147 }
148 }
149 }
150
151 debug_sprintf_event(sfdbg, 5,
152 "free_sampling_buffer: freed sdbt=%p\n", sfb->sdbt);
153 memset(sfb, 0, sizeof(*sfb));
154}
155
156static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
157{
158 unsigned long sdb, *trailer;
159
160 /* Allocate and initialize sample-data-block */
161 sdb = get_zeroed_page(gfp_flags);
162 if (!sdb)
163 return -ENOMEM;
164 trailer = trailer_entry_ptr(sdb);
165 *trailer = SDB_TE_ALERT_REQ_MASK;
166
167 /* Link SDB into the sample-data-block-table */
168 *sdbt = sdb;
169
170 return 0;
171}
172
173/*
174 * realloc_sampling_buffer() - extend sampler memory
175 *
176 * Allocates new sample-data-blocks and adds them to the specified sampling
177 * buffer memory.
178 *
179 * Important: This modifies the sampling buffer and must be called when the
180 * sampling facility is disabled.
181 *
182 * Returns zero on success, non-zero otherwise.
183 */
184static int realloc_sampling_buffer(struct sf_buffer *sfb,
185 unsigned long num_sdb, gfp_t gfp_flags)
186{
187 int i, rc;
188 unsigned long *new, *tail;
189
190 if (!sfb->sdbt || !sfb->tail)
191 return -EINVAL;
192
193 if (!is_link_entry(sfb->tail))
194 return -EINVAL;
195
196 /* Append to the existing sampling buffer, overwriting the table-link
197 * register.
198 * The tail variables always points to the "tail" (last and table-link)
199 * entry in an SDB-table.
200 */
201 tail = sfb->tail;
202
203 /* Do a sanity check whether the table-link entry points to
204 * the sampling buffer origin.
205 */
206 if (sfb->sdbt != get_next_sdbt(tail)) {
207 debug_sprintf_event(sfdbg, 3, "realloc_sampling_buffer: "
208 "sampling buffer is not linked: origin=%p"
209 "tail=%p\n",
210 (void *) sfb->sdbt, (void *) tail);
211 return -EINVAL;
212 }
213
214 /* Allocate remaining SDBs */
215 rc = 0;
216 for (i = 0; i < num_sdb; i++) {
217 /* Allocate a new SDB-table if it is full. */
218 if (require_table_link(tail)) {
219 new = (unsigned long *) get_zeroed_page(gfp_flags);
220 if (!new) {
221 rc = -ENOMEM;
222 break;
223 }
224 sfb->num_sdbt++;
225 /* Link current page to tail of chain */
226 *tail = (unsigned long)(void *) new + 1;
227 tail = new;
228 }
229
230 /* Allocate a new sample-data-block.
231 * If there is not enough memory, stop the realloc process
232 * and simply use what was allocated. If this is a temporary
233 * issue, a new realloc call (if required) might succeed.
234 */
235 rc = alloc_sample_data_block(tail, gfp_flags);
236 if (rc)
237 break;
238 sfb->num_sdb++;
239 tail++;
240 }
241
242 /* Link sampling buffer to its origin */
243 *tail = (unsigned long) sfb->sdbt + 1;
244 sfb->tail = tail;
245
246 debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer"
247 " settings: sdbt=%lu sdb=%lu\n",
248 sfb->num_sdbt, sfb->num_sdb);
249 return rc;
250}
251
252/*
253 * allocate_sampling_buffer() - allocate sampler memory
254 *
255 * Allocates and initializes a sampling buffer structure using the
256 * specified number of sample-data-blocks (SDB). For each allocation,
257 * a 4K page is used. The number of sample-data-block-tables (SDBT)
258 * are calculated from SDBs.
259 * Also set the ALERT_REQ mask in each SDBs trailer.
260 *
261 * Returns zero on success, non-zero otherwise.
262 */
263static int alloc_sampling_buffer(struct sf_buffer *sfb, unsigned long num_sdb)
264{
265 int rc;
266
267 if (sfb->sdbt)
268 return -EINVAL;
269
270 /* Allocate the sample-data-block-table origin */
271 sfb->sdbt = (unsigned long *) get_zeroed_page(GFP_KERNEL);
272 if (!sfb->sdbt)
273 return -ENOMEM;
274 sfb->num_sdb = 0;
275 sfb->num_sdbt = 1;
276
277 /* Link the table origin to point to itself to prepare for
278 * realloc_sampling_buffer() invocation.
279 */
280 sfb->tail = sfb->sdbt;
281 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1;
282
283 /* Allocate requested number of sample-data-blocks */
284 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL);
285 if (rc) {
286 free_sampling_buffer(sfb);
287 debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: "
288 "realloc_sampling_buffer failed with rc=%i\n", rc);
289 } else
290 debug_sprintf_event(sfdbg, 4,
291 "alloc_sampling_buffer: tear=%p dear=%p\n",
292 sfb->sdbt, (void *) *sfb->sdbt);
293 return rc;
294}
295
296static void sfb_set_limits(unsigned long min, unsigned long max)
297{
298 struct hws_qsi_info_block si;
299
300 CPUM_SF_MIN_SDB = min;
301 CPUM_SF_MAX_SDB = max;
302
303 memset(&si, 0, sizeof(si));
304 if (!qsi(&si))
305 CPUM_SF_SDB_DIAG_FACTOR = DIV_ROUND_UP(si.dsdes, si.bsdes);
306}
307
308static unsigned long sfb_max_limit(struct hw_perf_event *hwc)
309{
310 return SAMPL_DIAG_MODE(hwc) ? CPUM_SF_MAX_SDB * CPUM_SF_SDB_DIAG_FACTOR
311 : CPUM_SF_MAX_SDB;
312}
313
314static unsigned long sfb_pending_allocs(struct sf_buffer *sfb,
315 struct hw_perf_event *hwc)
316{
317 if (!sfb->sdbt)
318 return SFB_ALLOC_REG(hwc);
319 if (SFB_ALLOC_REG(hwc) > sfb->num_sdb)
320 return SFB_ALLOC_REG(hwc) - sfb->num_sdb;
321 return 0;
322}
323
324static int sfb_has_pending_allocs(struct sf_buffer *sfb,
325 struct hw_perf_event *hwc)
326{
327 return sfb_pending_allocs(sfb, hwc) > 0;
328}
329
330static void sfb_account_allocs(unsigned long num, struct hw_perf_event *hwc)
331{
332 /* Limit the number of SDBs to not exceed the maximum */
333 num = min_t(unsigned long, num, sfb_max_limit(hwc) - SFB_ALLOC_REG(hwc));
334 if (num)
335 SFB_ALLOC_REG(hwc) += num;
336}
337
338static void sfb_init_allocs(unsigned long num, struct hw_perf_event *hwc)
339{
340 SFB_ALLOC_REG(hwc) = 0;
341 sfb_account_allocs(num, hwc);
342}
343
344static size_t event_sample_size(struct hw_perf_event *hwc)
345{
346 struct sf_raw_sample *sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
347 size_t sample_size;
348
349 /* The sample size depends on the sampling function: The basic-sampling
350 * function must be always enabled, diagnostic-sampling function is
351 * optional.
352 */
353 sample_size = sfr->bsdes;
354 if (SAMPL_DIAG_MODE(hwc))
355 sample_size += sfr->dsdes;
356
357 return sample_size;
358}
359
360static void deallocate_buffers(struct cpu_hw_sf *cpuhw)
361{
362 if (cpuhw->sfb.sdbt)
363 free_sampling_buffer(&cpuhw->sfb);
364}
365
366static int allocate_buffers(struct cpu_hw_sf *cpuhw, struct hw_perf_event *hwc)
367{
368 unsigned long n_sdb, freq, factor;
369 size_t sfr_size, sample_size;
370 struct sf_raw_sample *sfr;
371
372 /* Allocate raw sample buffer
373 *
374 * The raw sample buffer is used to temporarily store sampling data
375 * entries for perf raw sample processing. The buffer size mainly
376 * depends on the size of diagnostic-sampling data entries which is
377 * machine-specific. The exact size calculation includes:
378 * 1. The first 4 bytes of diagnostic-sampling data entries are
379 * already reflected in the sf_raw_sample structure. Subtract
380 * these bytes.
381 * 2. The perf raw sample data must be 8-byte aligned (u64) and
382 * perf's internal data size must be considered too. So add
383 * an additional u32 for correct alignment and subtract before
384 * allocating the buffer.
385 * 3. Store the raw sample buffer pointer in the perf event
386 * hardware structure.
387 */
388 sfr_size = ALIGN((sizeof(*sfr) - sizeof(sfr->diag) + cpuhw->qsi.dsdes) +
389 sizeof(u32), sizeof(u64));
390 sfr_size -= sizeof(u32);
391 sfr = kzalloc(sfr_size, GFP_KERNEL);
392 if (!sfr)
393 return -ENOMEM;
394 sfr->size = sfr_size;
395 sfr->bsdes = cpuhw->qsi.bsdes;
396 sfr->dsdes = cpuhw->qsi.dsdes;
397 RAWSAMPLE_REG(hwc) = (unsigned long) sfr;
398
399 /* Calculate sampling buffers using 4K pages
400 *
401 * 1. Determine the sample data size which depends on the used
402 * sampling functions, for example, basic-sampling or
403 * basic-sampling with diagnostic-sampling.
404 *
405 * 2. Use the sampling frequency as input. The sampling buffer is
406 * designed for almost one second. This can be adjusted through
407 * the "factor" variable.
408 * In any case, alloc_sampling_buffer() sets the Alert Request
409 * Control indicator to trigger a measurement-alert to harvest
410 * sample-data-blocks (sdb).
411 *
412 * 3. Compute the number of sample-data-blocks and ensure a minimum
413 * of CPUM_SF_MIN_SDB. Also ensure the upper limit does not
414 * exceed a "calculated" maximum. The symbolic maximum is
415 * designed for basic-sampling only and needs to be increased if
416 * diagnostic-sampling is active.
417 * See also the remarks for these symbolic constants.
418 *
419 * 4. Compute the number of sample-data-block-tables (SDBT) and
420 * ensure a minimum of CPUM_SF_MIN_SDBT (one table can manage up
421 * to 511 SDBs).
422 */
423 sample_size = event_sample_size(hwc);
424 freq = sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc));
425 factor = 1;
426 n_sdb = DIV_ROUND_UP(freq, factor * ((PAGE_SIZE-64) / sample_size));
427 if (n_sdb < CPUM_SF_MIN_SDB)
428 n_sdb = CPUM_SF_MIN_SDB;
429
430 /* If there is already a sampling buffer allocated, it is very likely
431 * that the sampling facility is enabled too. If the event to be
432 * initialized requires a greater sampling buffer, the allocation must
433 * be postponed. Changing the sampling buffer requires the sampling
434 * facility to be in the disabled state. So, account the number of
435 * required SDBs and let cpumsf_pmu_enable() resize the buffer just
436 * before the event is started.
437 */
438 sfb_init_allocs(n_sdb, hwc);
439 if (sf_buffer_available(cpuhw))
440 return 0;
441
442 debug_sprintf_event(sfdbg, 3,
443 "allocate_buffers: rate=%lu f=%lu sdb=%lu/%lu"
444 " sample_size=%lu cpuhw=%p\n",
445 SAMPL_RATE(hwc), freq, n_sdb, sfb_max_limit(hwc),
446 sample_size, cpuhw);
447
448 return alloc_sampling_buffer(&cpuhw->sfb,
449 sfb_pending_allocs(&cpuhw->sfb, hwc));
450}
451
452static unsigned long min_percent(unsigned int percent, unsigned long base,
453 unsigned long min)
454{
455 return min_t(unsigned long, min, DIV_ROUND_UP(percent * base, 100));
456}
457
458static unsigned long compute_sfb_extent(unsigned long ratio, unsigned long base)
459{
460 /* Use a percentage-based approach to extend the sampling facility
461 * buffer. Accept up to 5% sample data loss.
462 * Vary the extents between 1% to 5% of the current number of
463 * sample-data-blocks.
464 */
465 if (ratio <= 5)
466 return 0;
467 if (ratio <= 25)
468 return min_percent(1, base, 1);
469 if (ratio <= 50)
470 return min_percent(1, base, 1);
471 if (ratio <= 75)
472 return min_percent(2, base, 2);
473 if (ratio <= 100)
474 return min_percent(3, base, 3);
475 if (ratio <= 250)
476 return min_percent(4, base, 4);
477
478 return min_percent(5, base, 8);
479}
480
481static void sfb_account_overflows(struct cpu_hw_sf *cpuhw,
482 struct hw_perf_event *hwc)
483{
484 unsigned long ratio, num;
485
486 if (!OVERFLOW_REG(hwc))
487 return;
488
489 /* The sample_overflow contains the average number of sample data
490 * that has been lost because sample-data-blocks were full.
491 *
492 * Calculate the total number of sample data entries that has been
493 * discarded. Then calculate the ratio of lost samples to total samples
494 * per second in percent.
495 */
496 ratio = DIV_ROUND_UP(100 * OVERFLOW_REG(hwc) * cpuhw->sfb.num_sdb,
497 sample_rate_to_freq(&cpuhw->qsi, SAMPL_RATE(hwc)));
498
499 /* Compute number of sample-data-blocks */
500 num = compute_sfb_extent(ratio, cpuhw->sfb.num_sdb);
501 if (num)
502 sfb_account_allocs(num, hwc);
503
504 debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow=%llu ratio=%lu"
505 " num=%lu\n", OVERFLOW_REG(hwc), ratio, num);
506 OVERFLOW_REG(hwc) = 0;
507}
508
509/* extend_sampling_buffer() - Extend sampling buffer
510 * @sfb: Sampling buffer structure (for local CPU)
511 * @hwc: Perf event hardware structure
512 *
513 * Use this function to extend the sampling buffer based on the overflow counter
514 * and postponed allocation extents stored in the specified Perf event hardware.
515 *
516 * Important: This function disables the sampling facility in order to safely
517 * change the sampling buffer structure. Do not call this function
518 * when the PMU is active.
519 */
520static void extend_sampling_buffer(struct sf_buffer *sfb,
521 struct hw_perf_event *hwc)
522{
523 unsigned long num, num_old;
524 int rc;
525
526 num = sfb_pending_allocs(sfb, hwc);
527 if (!num)
528 return;
529 num_old = sfb->num_sdb;
530
531 /* Disable the sampling facility to reset any states and also
532 * clear pending measurement alerts.
533 */
534 sf_disable();
535
536 /* Extend the sampling buffer.
537 * This memory allocation typically happens in an atomic context when
538 * called by perf. Because this is a reallocation, it is fine if the
539 * new SDB-request cannot be satisfied immediately.
540 */
541 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC);
542 if (rc)
543 debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc "
544 "failed with rc=%i\n", rc);
545
546 if (sfb_has_pending_allocs(sfb, hwc))
547 debug_sprintf_event(sfdbg, 5, "sfb: extend: "
548 "req=%lu alloc=%lu remaining=%lu\n",
549 num, sfb->num_sdb - num_old,
550 sfb_pending_allocs(sfb, hwc));
551}
552
553
554/* Number of perf events counting hardware events */
555static atomic_t num_events;
556/* Used to avoid races in calling reserve/release_cpumf_hardware */
557static DEFINE_MUTEX(pmc_reserve_mutex);
558
559#define PMC_INIT 0
560#define PMC_RELEASE 1
561#define PMC_FAILURE 2
562static void setup_pmc_cpu(void *flags)
563{
564 int err;
565 struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf);
566
567 err = 0;
568 switch (*((int *) flags)) {
569 case PMC_INIT:
570 memset(cpusf, 0, sizeof(*cpusf));
571 err = qsi(&cpusf->qsi);
572 if (err)
573 break;
574 cpusf->flags |= PMU_F_RESERVED;
575 err = sf_disable();
576 if (err)
577 pr_err("Switching off the sampling facility failed "
578 "with rc=%i\n", err);
579 debug_sprintf_event(sfdbg, 5,
580 "setup_pmc_cpu: initialized: cpuhw=%p\n", cpusf);
581 break;
582 case PMC_RELEASE:
583 cpusf->flags &= ~PMU_F_RESERVED;
584 err = sf_disable();
585 if (err) {
586 pr_err("Switching off the sampling facility failed "
587 "with rc=%i\n", err);
588 } else
589 deallocate_buffers(cpusf);
590 debug_sprintf_event(sfdbg, 5,
591 "setup_pmc_cpu: released: cpuhw=%p\n", cpusf);
592 break;
593 }
594 if (err)
595 *((int *) flags) |= PMC_FAILURE;
596}
597
598static void release_pmc_hardware(void)
599{
600 int flags = PMC_RELEASE;
601
602 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
603 on_each_cpu(setup_pmc_cpu, &flags, 1);
604 perf_release_sampling();
605}
606
607static int reserve_pmc_hardware(void)
608{
609 int flags = PMC_INIT;
610 int err;
611
612 err = perf_reserve_sampling();
613 if (err)
614 return err;
615 on_each_cpu(setup_pmc_cpu, &flags, 1);
616 if (flags & PMC_FAILURE) {
617 release_pmc_hardware();
618 return -ENODEV;
619 }
620 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
621
622 return 0;
623}
624
625static void hw_perf_event_destroy(struct perf_event *event)
626{
627 /* Free raw sample buffer */
628 if (RAWSAMPLE_REG(&event->hw))
629 kfree((void *) RAWSAMPLE_REG(&event->hw));
630
631 /* Release PMC if this is the last perf event */
632 if (!atomic_add_unless(&num_events, -1, 1)) {
633 mutex_lock(&pmc_reserve_mutex);
634 if (atomic_dec_return(&num_events) == 0)
635 release_pmc_hardware();
636 mutex_unlock(&pmc_reserve_mutex);
637 }
638}
639
640static void hw_init_period(struct hw_perf_event *hwc, u64 period)
641{
642 hwc->sample_period = period;
643 hwc->last_period = hwc->sample_period;
644 local64_set(&hwc->period_left, hwc->sample_period);
645}
646
647static void hw_reset_registers(struct hw_perf_event *hwc,
648 unsigned long *sdbt_origin)
649{
650 struct sf_raw_sample *sfr;
651
652 /* (Re)set to first sample-data-block-table */
653 TEAR_REG(hwc) = (unsigned long) sdbt_origin;
654
655 /* (Re)set raw sampling buffer register */
656 sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(hwc);
657 memset(&sfr->basic, 0, sizeof(sfr->basic));
658 memset(&sfr->diag, 0, sfr->dsdes);
659}
660
661static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si,
662 unsigned long rate)
663{
664 return clamp_t(unsigned long, rate,
665 si->min_sampl_rate, si->max_sampl_rate);
666}
667
668static int __hw_perf_event_init(struct perf_event *event)
669{
670 struct cpu_hw_sf *cpuhw;
671 struct hws_qsi_info_block si;
672 struct perf_event_attr *attr = &event->attr;
673 struct hw_perf_event *hwc = &event->hw;
674 unsigned long rate;
675 int cpu, err;
676
677 /* Reserve CPU-measurement sampling facility */
678 err = 0;
679 if (!atomic_inc_not_zero(&num_events)) {
680 mutex_lock(&pmc_reserve_mutex);
681 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
682 err = -EBUSY;
683 else
684 atomic_inc(&num_events);
685 mutex_unlock(&pmc_reserve_mutex);
686 }
687 event->destroy = hw_perf_event_destroy;
688
689 if (err)
690 goto out;
691
692 /* Access per-CPU sampling information (query sampling info) */
693 /*
694 * The event->cpu value can be -1 to count on every CPU, for example,
695 * when attaching to a task. If this is specified, use the query
696 * sampling info from the current CPU, otherwise use event->cpu to
697 * retrieve the per-CPU information.
698 * Later, cpuhw indicates whether to allocate sampling buffers for a
699 * particular CPU (cpuhw!=NULL) or each online CPU (cpuw==NULL).
700 */
701 memset(&si, 0, sizeof(si));
702 cpuhw = NULL;
703 if (event->cpu == -1)
704 qsi(&si);
705 else {
706 /* Event is pinned to a particular CPU, retrieve the per-CPU
707 * sampling structure for accessing the CPU-specific QSI.
708 */
709 cpuhw = &per_cpu(cpu_hw_sf, event->cpu);
710 si = cpuhw->qsi;
711 }
712
713 /* Check sampling facility authorization and, if not authorized,
714 * fall back to other PMUs. It is safe to check any CPU because
715 * the authorization is identical for all configured CPUs.
716 */
717 if (!si.as) {
718 err = -ENOENT;
719 goto out;
720 }
721
722 /* Always enable basic sampling */
723 SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
724
725 /* Check if diagnostic sampling is requested. Deny if the required
726 * sampling authorization is missing.
727 */
728 if (attr->config == PERF_EVENT_CPUM_SF_DIAG) {
729 if (!si.ad) {
730 err = -EPERM;
731 goto out;
732 }
733 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_DIAG_MODE;
734 }
735
736 /* Check and set other sampling flags */
737 if (attr->config1 & PERF_CPUM_SF_FULL_BLOCKS)
738 SAMPL_FLAGS(hwc) |= PERF_CPUM_SF_FULL_BLOCKS;
739
740 /* The sampling information (si) contains information about the
741 * min/max sampling intervals and the CPU speed. So calculate the
742 * correct sampling interval and avoid the whole period adjust
743 * feedback loop.
744 */
745 rate = 0;
746 if (attr->freq) {
747 rate = freq_to_sample_rate(&si, attr->sample_freq);
748 rate = hw_limit_rate(&si, rate);
749 attr->freq = 0;
750 attr->sample_period = rate;
751 } else {
752 /* The min/max sampling rates specifies the valid range
753 * of sample periods. If the specified sample period is
754 * out of range, limit the period to the range boundary.
755 */
756 rate = hw_limit_rate(&si, hwc->sample_period);
757
758 /* The perf core maintains a maximum sample rate that is
759 * configurable through the sysctl interface. Ensure the
760 * sampling rate does not exceed this value. This also helps
761 * to avoid throttling when pushing samples with
762 * perf_event_overflow().
763 */
764 if (sample_rate_to_freq(&si, rate) >
765 sysctl_perf_event_sample_rate) {
766 err = -EINVAL;
767 debug_sprintf_event(sfdbg, 1, "Sampling rate exceeds maximum perf sample rate\n");
768 goto out;
769 }
770 }
771 SAMPL_RATE(hwc) = rate;
772 hw_init_period(hwc, SAMPL_RATE(hwc));
773
774 /* Initialize sample data overflow accounting */
775 hwc->extra_reg.reg = REG_OVERFLOW;
776 OVERFLOW_REG(hwc) = 0;
777
778 /* Allocate the per-CPU sampling buffer using the CPU information
779 * from the event. If the event is not pinned to a particular
780 * CPU (event->cpu == -1; or cpuhw == NULL), allocate sampling
781 * buffers for each online CPU.
782 */
783 if (cpuhw)
784 /* Event is pinned to a particular CPU */
785 err = allocate_buffers(cpuhw, hwc);
786 else {
787 /* Event is not pinned, allocate sampling buffer on
788 * each online CPU
789 */
790 for_each_online_cpu(cpu) {
791 cpuhw = &per_cpu(cpu_hw_sf, cpu);
792 err = allocate_buffers(cpuhw, hwc);
793 if (err)
794 break;
795 }
796 }
797out:
798 return err;
799}
800
801static int cpumsf_pmu_event_init(struct perf_event *event)
802{
803 int err;
804
805 /* No support for taken branch sampling */
806 if (has_branch_stack(event))
807 return -EOPNOTSUPP;
808
809 switch (event->attr.type) {
810 case PERF_TYPE_RAW:
811 if ((event->attr.config != PERF_EVENT_CPUM_SF) &&
812 (event->attr.config != PERF_EVENT_CPUM_SF_DIAG))
813 return -ENOENT;
814 break;
815 case PERF_TYPE_HARDWARE:
816 /* Support sampling of CPU cycles in addition to the
817 * counter facility. However, the counter facility
818 * is more precise and, hence, restrict this PMU to
819 * sampling events only.
820 */
821 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES)
822 return -ENOENT;
823 if (!is_sampling_event(event))
824 return -ENOENT;
825 break;
826 default:
827 return -ENOENT;
828 }
829
830 /* Check online status of the CPU to which the event is pinned */
831 if (event->cpu >= nr_cpumask_bits ||
832 (event->cpu >= 0 && !cpu_online(event->cpu)))
833 return -ENODEV;
834
835 /* Force reset of idle/hv excludes regardless of what the
836 * user requested.
837 */
838 if (event->attr.exclude_hv)
839 event->attr.exclude_hv = 0;
840 if (event->attr.exclude_idle)
841 event->attr.exclude_idle = 0;
842
843 err = __hw_perf_event_init(event);
844 if (unlikely(err))
845 if (event->destroy)
846 event->destroy(event);
847 return err;
848}
849
850static void cpumsf_pmu_enable(struct pmu *pmu)
851{
852 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
853 struct hw_perf_event *hwc;
854 int err;
855
856 if (cpuhw->flags & PMU_F_ENABLED)
857 return;
858
859 if (cpuhw->flags & PMU_F_ERR_MASK)
860 return;
861
862 /* Check whether to extent the sampling buffer.
863 *
864 * Two conditions trigger an increase of the sampling buffer for a
865 * perf event:
866 * 1. Postponed buffer allocations from the event initialization.
867 * 2. Sampling overflows that contribute to pending allocations.
868 *
869 * Note that the extend_sampling_buffer() function disables the sampling
870 * facility, but it can be fully re-enabled using sampling controls that
871 * have been saved in cpumsf_pmu_disable().
872 */
873 if (cpuhw->event) {
874 hwc = &cpuhw->event->hw;
875 /* Account number of overflow-designated buffer extents */
876 sfb_account_overflows(cpuhw, hwc);
877 if (sfb_has_pending_allocs(&cpuhw->sfb, hwc))
878 extend_sampling_buffer(&cpuhw->sfb, hwc);
879 }
880
881 /* (Re)enable the PMU and sampling facility */
882 cpuhw->flags |= PMU_F_ENABLED;
883 barrier();
884
885 err = lsctl(&cpuhw->lsctl);
886 if (err) {
887 cpuhw->flags &= ~PMU_F_ENABLED;
888 pr_err("Loading sampling controls failed: op=%i err=%i\n",
889 1, err);
890 return;
891 }
892
893 debug_sprintf_event(sfdbg, 6, "pmu_enable: es=%i cs=%i ed=%i cd=%i "
894 "tear=%p dear=%p\n", cpuhw->lsctl.es, cpuhw->lsctl.cs,
895 cpuhw->lsctl.ed, cpuhw->lsctl.cd,
896 (void *) cpuhw->lsctl.tear, (void *) cpuhw->lsctl.dear);
897}
898
899static void cpumsf_pmu_disable(struct pmu *pmu)
900{
901 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
902 struct hws_lsctl_request_block inactive;
903 struct hws_qsi_info_block si;
904 int err;
905
906 if (!(cpuhw->flags & PMU_F_ENABLED))
907 return;
908
909 if (cpuhw->flags & PMU_F_ERR_MASK)
910 return;
911
912 /* Switch off sampling activation control */
913 inactive = cpuhw->lsctl;
914 inactive.cs = 0;
915 inactive.cd = 0;
916
917 err = lsctl(&inactive);
918 if (err) {
919 pr_err("Loading sampling controls failed: op=%i err=%i\n",
920 2, err);
921 return;
922 }
923
924 /* Save state of TEAR and DEAR register contents */
925 if (!qsi(&si)) {
926 /* TEAR/DEAR values are valid only if the sampling facility is
927 * enabled. Note that cpumsf_pmu_disable() might be called even
928 * for a disabled sampling facility because cpumsf_pmu_enable()
929 * controls the enable/disable state.
930 */
931 if (si.es) {
932 cpuhw->lsctl.tear = si.tear;
933 cpuhw->lsctl.dear = si.dear;
934 }
935 } else
936 debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: "
937 "qsi() failed with err=%i\n", err);
938
939 cpuhw->flags &= ~PMU_F_ENABLED;
940}
941
942/* perf_exclude_event() - Filter event
943 * @event: The perf event
944 * @regs: pt_regs structure
945 * @sde_regs: Sample-data-entry (sde) regs structure
946 *
947 * Filter perf events according to their exclude specification.
948 *
949 * Return non-zero if the event shall be excluded.
950 */
951static int perf_exclude_event(struct perf_event *event, struct pt_regs *regs,
952 struct perf_sf_sde_regs *sde_regs)
953{
954 if (event->attr.exclude_user && user_mode(regs))
955 return 1;
956 if (event->attr.exclude_kernel && !user_mode(regs))
957 return 1;
958 if (event->attr.exclude_guest && sde_regs->in_guest)
959 return 1;
960 if (event->attr.exclude_host && !sde_regs->in_guest)
961 return 1;
962 return 0;
963}
964
965/* perf_push_sample() - Push samples to perf
966 * @event: The perf event
967 * @sample: Hardware sample data
968 *
969 * Use the hardware sample data to create perf event sample. The sample
970 * is the pushed to the event subsystem and the function checks for
971 * possible event overflows. If an event overflow occurs, the PMU is
972 * stopped.
973 *
974 * Return non-zero if an event overflow occurred.
975 */
976static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
977{
978 int overflow;
979 struct pt_regs regs;
980 struct perf_sf_sde_regs *sde_regs;
981 struct perf_sample_data data;
982 struct perf_raw_record raw;
983
984 /* Setup perf sample */
985 perf_sample_data_init(&data, 0, event->hw.last_period);
986 raw.size = sfr->size;
987 raw.data = sfr;
988 data.raw = &raw;
989
990 /* Setup pt_regs to look like an CPU-measurement external interrupt
991 * using the Program Request Alert code. The regs.int_parm_long
992 * field which is unused contains additional sample-data-entry related
993 * indicators.
994 */
995 memset(&regs, 0, sizeof(regs));
996 regs.int_code = 0x1407;
997 regs.int_parm = CPU_MF_INT_SF_PRA;
998 sde_regs = (struct perf_sf_sde_regs *) &regs.int_parm_long;
999
1000 regs.psw.addr = sfr->basic.ia;
1001 if (sfr->basic.T)
1002 regs.psw.mask |= PSW_MASK_DAT;
1003 if (sfr->basic.W)
1004 regs.psw.mask |= PSW_MASK_WAIT;
1005 if (sfr->basic.P)
1006 regs.psw.mask |= PSW_MASK_PSTATE;
1007 switch (sfr->basic.AS) {
1008 case 0x0:
1009 regs.psw.mask |= PSW_ASC_PRIMARY;
1010 break;
1011 case 0x1:
1012 regs.psw.mask |= PSW_ASC_ACCREG;
1013 break;
1014 case 0x2:
1015 regs.psw.mask |= PSW_ASC_SECONDARY;
1016 break;
1017 case 0x3:
1018 regs.psw.mask |= PSW_ASC_HOME;
1019 break;
1020 }
1021
1022 /* The host-program-parameter (hpp) contains the sie control
1023 * block that is set by sie64a() in entry64.S. Check if hpp
1024 * refers to a valid control block and set sde_regs flags
1025 * accordingly. This would allow to use hpp values for other
1026 * purposes too.
1027 * For now, simply use a non-zero value as guest indicator.
1028 */
1029 if (sfr->basic.hpp)
1030 sde_regs->in_guest = 1;
1031
1032 overflow = 0;
1033 if (perf_exclude_event(event, &regs, sde_regs))
1034 goto out;
1035 if (perf_event_overflow(event, &data, &regs)) {
1036 overflow = 1;
1037 event->pmu->stop(event, 0);
1038 }
1039 perf_event_update_userpage(event);
1040out:
1041 return overflow;
1042}
1043
1044static void perf_event_count_update(struct perf_event *event, u64 count)
1045{
1046 local64_add(count, &event->count);
1047}
1048
1049static int sample_format_is_valid(struct hws_combined_entry *sample,
1050 unsigned int flags)
1051{
1052 if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
1053 /* Only basic-sampling data entries with data-entry-format
1054 * version of 0x0001 can be processed.
1055 */
1056 if (sample->basic.def != 0x0001)
1057 return 0;
1058 if (flags & PERF_CPUM_SF_DIAG_MODE)
1059 /* The data-entry-format number of diagnostic-sampling data
1060 * entries can vary. Because diagnostic data is just passed
1061 * through, do only a sanity check on the DEF.
1062 */
1063 if (sample->diag.def < 0x8001)
1064 return 0;
1065 return 1;
1066}
1067
1068static int sample_is_consistent(struct hws_combined_entry *sample,
1069 unsigned long flags)
1070{
1071 /* This check applies only to basic-sampling data entries of potentially
1072 * combined-sampling data entries. Invalid entries cannot be processed
1073 * by the PMU and, thus, do not deliver an associated
1074 * diagnostic-sampling data entry.
1075 */
1076 if (unlikely(!(flags & PERF_CPUM_SF_BASIC_MODE)))
1077 return 0;
1078 /*
1079 * Samples are skipped, if they are invalid or for which the
1080 * instruction address is not predictable, i.e., the wait-state bit is
1081 * set.
1082 */
1083 if (sample->basic.I || sample->basic.W)
1084 return 0;
1085 return 1;
1086}
1087
1088static void reset_sample_slot(struct hws_combined_entry *sample,
1089 unsigned long flags)
1090{
1091 if (likely(flags & PERF_CPUM_SF_BASIC_MODE))
1092 sample->basic.def = 0;
1093 if (flags & PERF_CPUM_SF_DIAG_MODE)
1094 sample->diag.def = 0;
1095}
1096
1097static void sfr_store_sample(struct sf_raw_sample *sfr,
1098 struct hws_combined_entry *sample)
1099{
1100 if (likely(sfr->format & PERF_CPUM_SF_BASIC_MODE))
1101 sfr->basic = sample->basic;
1102 if (sfr->format & PERF_CPUM_SF_DIAG_MODE)
1103 memcpy(&sfr->diag, &sample->diag, sfr->dsdes);
1104}
1105
1106static void debug_sample_entry(struct hws_combined_entry *sample,
1107 struct hws_trailer_entry *te,
1108 unsigned long flags)
1109{
1110 debug_sprintf_event(sfdbg, 4, "hw_collect_samples: Found unknown "
1111 "sampling data entry: te->f=%i basic.def=%04x (%p)"
1112 " diag.def=%04x (%p)\n", te->f,
1113 sample->basic.def, &sample->basic,
1114 (flags & PERF_CPUM_SF_DIAG_MODE)
1115 ? sample->diag.def : 0xFFFF,
1116 (flags & PERF_CPUM_SF_DIAG_MODE)
1117 ? &sample->diag : NULL);
1118}
1119
1120/* hw_collect_samples() - Walk through a sample-data-block and collect samples
1121 * @event: The perf event
1122 * @sdbt: Sample-data-block table
1123 * @overflow: Event overflow counter
1124 *
1125 * Walks through a sample-data-block and collects sampling data entries that are
1126 * then pushed to the perf event subsystem. Depending on the sampling function,
1127 * there can be either basic-sampling or combined-sampling data entries. A
1128 * combined-sampling data entry consists of a basic- and a diagnostic-sampling
1129 * data entry. The sampling function is determined by the flags in the perf
1130 * event hardware structure. The function always works with a combined-sampling
1131 * data entry but ignores the the diagnostic portion if it is not available.
1132 *
1133 * Note that the implementation focuses on basic-sampling data entries and, if
1134 * such an entry is not valid, the entire combined-sampling data entry is
1135 * ignored.
1136 *
1137 * The overflow variables counts the number of samples that has been discarded
1138 * due to a perf event overflow.
1139 */
1140static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
1141 unsigned long long *overflow)
1142{
1143 unsigned long flags = SAMPL_FLAGS(&event->hw);
1144 struct hws_combined_entry *sample;
1145 struct hws_trailer_entry *te;
1146 struct sf_raw_sample *sfr;
1147 size_t sample_size;
1148
1149 /* Prepare and initialize raw sample data */
1150 sfr = (struct sf_raw_sample *) RAWSAMPLE_REG(&event->hw);
1151 sfr->format = flags & PERF_CPUM_SF_MODE_MASK;
1152
1153 sample_size = event_sample_size(&event->hw);
1154 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
1155 sample = (struct hws_combined_entry *) *sdbt;
1156 while ((unsigned long *) sample < (unsigned long *) te) {
1157 /* Check for an empty sample */
1158 if (!sample->basic.def)
1159 break;
1160
1161 /* Update perf event period */
1162 perf_event_count_update(event, SAMPL_RATE(&event->hw));
1163
1164 /* Check sampling data entry */
1165 if (sample_format_is_valid(sample, flags)) {
1166 /* If an event overflow occurred, the PMU is stopped to
1167 * throttle event delivery. Remaining sample data is
1168 * discarded.
1169 */
1170 if (!*overflow) {
1171 if (sample_is_consistent(sample, flags)) {
1172 /* Deliver sample data to perf */
1173 sfr_store_sample(sfr, sample);
1174 *overflow = perf_push_sample(event, sfr);
1175 }
1176 } else
1177 /* Count discarded samples */
1178 *overflow += 1;
1179 } else {
1180 debug_sample_entry(sample, te, flags);
1181 /* Sample slot is not yet written or other record.
1182 *
1183 * This condition can occur if the buffer was reused
1184 * from a combined basic- and diagnostic-sampling.
1185 * If only basic-sampling is then active, entries are
1186 * written into the larger diagnostic entries.
1187 * This is typically the case for sample-data-blocks
1188 * that are not full. Stop processing if the first
1189 * invalid format was detected.
1190 */
1191 if (!te->f)
1192 break;
1193 }
1194
1195 /* Reset sample slot and advance to next sample */
1196 reset_sample_slot(sample, flags);
1197 sample += sample_size;
1198 }
1199}
1200
1201/* hw_perf_event_update() - Process sampling buffer
1202 * @event: The perf event
1203 * @flush_all: Flag to also flush partially filled sample-data-blocks
1204 *
1205 * Processes the sampling buffer and create perf event samples.
1206 * The sampling buffer position are retrieved and saved in the TEAR_REG
1207 * register of the specified perf event.
1208 *
1209 * Only full sample-data-blocks are processed. Specify the flash_all flag
1210 * to also walk through partially filled sample-data-blocks. It is ignored
1211 * if PERF_CPUM_SF_FULL_BLOCKS is set. The PERF_CPUM_SF_FULL_BLOCKS flag
1212 * enforces the processing of full sample-data-blocks only (trailer entries
1213 * with the block-full-indicator bit set).
1214 */
1215static void hw_perf_event_update(struct perf_event *event, int flush_all)
1216{
1217 struct hw_perf_event *hwc = &event->hw;
1218 struct hws_trailer_entry *te;
1219 unsigned long *sdbt;
1220 unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
1221 int done;
1222
1223 if (flush_all && SDB_FULL_BLOCKS(hwc))
1224 flush_all = 0;
1225
1226 sdbt = (unsigned long *) TEAR_REG(hwc);
1227 done = event_overflow = sampl_overflow = num_sdb = 0;
1228 while (!done) {
1229 /* Get the trailer entry of the sample-data-block */
1230 te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
1231
1232 /* Leave loop if no more work to do (block full indicator) */
1233 if (!te->f) {
1234 done = 1;
1235 if (!flush_all)
1236 break;
1237 }
1238
1239 /* Check the sample overflow count */
1240 if (te->overflow)
1241 /* Account sample overflows and, if a particular limit
1242 * is reached, extend the sampling buffer.
1243 * For details, see sfb_account_overflows().
1244 */
1245 sampl_overflow += te->overflow;
1246
1247 /* Timestamps are valid for full sample-data-blocks only */
1248 debug_sprintf_event(sfdbg, 6, "hw_perf_event_update: sdbt=%p "
1249 "overflow=%llu timestamp=0x%llx\n",
1250 sdbt, te->overflow,
1251 (te->f) ? trailer_timestamp(te) : 0ULL);
1252
1253 /* Collect all samples from a single sample-data-block and
1254 * flag if an (perf) event overflow happened. If so, the PMU
1255 * is stopped and remaining samples will be discarded.
1256 */
1257 hw_collect_samples(event, sdbt, &event_overflow);
1258 num_sdb++;
1259
1260 /* Reset trailer (using compare-double-and-swap) */
1261 do {
1262 te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
1263 te_flags |= SDB_TE_ALERT_REQ_MASK;
1264 } while (!cmpxchg_double(&te->flags, &te->overflow,
1265 te->flags, te->overflow,
1266 te_flags, 0ULL));
1267
1268 /* Advance to next sample-data-block */
1269 sdbt++;
1270 if (is_link_entry(sdbt))
1271 sdbt = get_next_sdbt(sdbt);
1272
1273 /* Update event hardware registers */
1274 TEAR_REG(hwc) = (unsigned long) sdbt;
1275
1276 /* Stop processing sample-data if all samples of the current
1277 * sample-data-block were flushed even if it was not full.
1278 */
1279 if (flush_all && done)
1280 break;
1281
1282 /* If an event overflow happened, discard samples by
1283 * processing any remaining sample-data-blocks.
1284 */
1285 if (event_overflow)
1286 flush_all = 1;
1287 }
1288
1289 /* Account sample overflows in the event hardware structure */
1290 if (sampl_overflow)
1291 OVERFLOW_REG(hwc) = DIV_ROUND_UP(OVERFLOW_REG(hwc) +
1292 sampl_overflow, 1 + num_sdb);
1293 if (sampl_overflow || event_overflow)
1294 debug_sprintf_event(sfdbg, 4, "hw_perf_event_update: "
1295 "overflow stats: sample=%llu event=%llu\n",
1296 sampl_overflow, event_overflow);
1297}
1298
1299static void cpumsf_pmu_read(struct perf_event *event)
1300{
1301 /* Nothing to do ... updates are interrupt-driven */
1302}
1303
1304/* Activate sampling control.
1305 * Next call of pmu_enable() starts sampling.
1306 */
1307static void cpumsf_pmu_start(struct perf_event *event, int flags)
1308{
1309 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
1310
1311 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1312 return;
1313
1314 if (flags & PERF_EF_RELOAD)
1315 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1316
1317 perf_pmu_disable(event->pmu);
1318 event->hw.state = 0;
1319 cpuhw->lsctl.cs = 1;
1320 if (SAMPL_DIAG_MODE(&event->hw))
1321 cpuhw->lsctl.cd = 1;
1322 perf_pmu_enable(event->pmu);
1323}
1324
1325/* Deactivate sampling control.
1326 * Next call of pmu_enable() stops sampling.
1327 */
1328static void cpumsf_pmu_stop(struct perf_event *event, int flags)
1329{
1330 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
1331
1332 if (event->hw.state & PERF_HES_STOPPED)
1333 return;
1334
1335 perf_pmu_disable(event->pmu);
1336 cpuhw->lsctl.cs = 0;
1337 cpuhw->lsctl.cd = 0;
1338 event->hw.state |= PERF_HES_STOPPED;
1339
1340 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
1341 hw_perf_event_update(event, 1);
1342 event->hw.state |= PERF_HES_UPTODATE;
1343 }
1344 perf_pmu_enable(event->pmu);
1345}
1346
1347static int cpumsf_pmu_add(struct perf_event *event, int flags)
1348{
1349 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
1350 int err;
1351
1352 if (cpuhw->flags & PMU_F_IN_USE)
1353 return -EAGAIN;
1354
1355 if (!cpuhw->sfb.sdbt)
1356 return -EINVAL;
1357
1358 err = 0;
1359 perf_pmu_disable(event->pmu);
1360
1361 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1362
1363 /* Set up sampling controls. Always program the sampling register
1364 * using the SDB-table start. Reset TEAR_REG event hardware register
1365 * that is used by hw_perf_event_update() to store the sampling buffer
1366 * position after samples have been flushed.
1367 */
1368 cpuhw->lsctl.s = 0;
1369 cpuhw->lsctl.h = 1;
1370 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt;
1371 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt;
1372 cpuhw->lsctl.interval = SAMPL_RATE(&event->hw);
1373 hw_reset_registers(&event->hw, cpuhw->sfb.sdbt);
1374
1375 /* Ensure sampling functions are in the disabled state. If disabled,
1376 * switch on sampling enable control. */
1377 if (WARN_ON_ONCE(cpuhw->lsctl.es == 1 || cpuhw->lsctl.ed == 1)) {
1378 err = -EAGAIN;
1379 goto out;
1380 }
1381 cpuhw->lsctl.es = 1;
1382 if (SAMPL_DIAG_MODE(&event->hw))
1383 cpuhw->lsctl.ed = 1;
1384
1385 /* Set in_use flag and store event */
1386 event->hw.idx = 0; /* only one sampling event per CPU supported */
1387 cpuhw->event = event;
1388 cpuhw->flags |= PMU_F_IN_USE;
1389
1390 if (flags & PERF_EF_START)
1391 cpumsf_pmu_start(event, PERF_EF_RELOAD);
1392out:
1393 perf_event_update_userpage(event);
1394 perf_pmu_enable(event->pmu);
1395 return err;
1396}
1397
1398static void cpumsf_pmu_del(struct perf_event *event, int flags)
1399{
1400 struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
1401
1402 perf_pmu_disable(event->pmu);
1403 cpumsf_pmu_stop(event, PERF_EF_UPDATE);
1404
1405 cpuhw->lsctl.es = 0;
1406 cpuhw->lsctl.ed = 0;
1407 cpuhw->flags &= ~PMU_F_IN_USE;
1408 cpuhw->event = NULL;
1409
1410 perf_event_update_userpage(event);
1411 perf_pmu_enable(event->pmu);
1412}
1413
1414static int cpumsf_pmu_event_idx(struct perf_event *event)
1415{
1416 return event->hw.idx;
1417}
1418
1419CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
1420CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1421
1422static struct attribute *cpumsf_pmu_events_attr[] = {
1423 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
1424 CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
1425 NULL,
1426};
1427
1428PMU_FORMAT_ATTR(event, "config:0-63");
1429
1430static struct attribute *cpumsf_pmu_format_attr[] = {
1431 &format_attr_event.attr,
1432 NULL,
1433};
1434
1435static struct attribute_group cpumsf_pmu_events_group = {
1436 .name = "events",
1437 .attrs = cpumsf_pmu_events_attr,
1438};
1439static struct attribute_group cpumsf_pmu_format_group = {
1440 .name = "format",
1441 .attrs = cpumsf_pmu_format_attr,
1442};
1443static const struct attribute_group *cpumsf_pmu_attr_groups[] = {
1444 &cpumsf_pmu_events_group,
1445 &cpumsf_pmu_format_group,
1446 NULL,
1447};
1448
1449static struct pmu cpumf_sampling = {
1450 .pmu_enable = cpumsf_pmu_enable,
1451 .pmu_disable = cpumsf_pmu_disable,
1452
1453 .event_init = cpumsf_pmu_event_init,
1454 .add = cpumsf_pmu_add,
1455 .del = cpumsf_pmu_del,
1456
1457 .start = cpumsf_pmu_start,
1458 .stop = cpumsf_pmu_stop,
1459 .read = cpumsf_pmu_read,
1460
1461 .event_idx = cpumsf_pmu_event_idx,
1462 .attr_groups = cpumsf_pmu_attr_groups,
1463};
1464
1465static void cpumf_measurement_alert(struct ext_code ext_code,
1466 unsigned int alert, unsigned long unused)
1467{
1468 struct cpu_hw_sf *cpuhw;
1469
1470 if (!(alert & CPU_MF_INT_SF_MASK))
1471 return;
1472 inc_irq_stat(IRQEXT_CMS);
1473 cpuhw = &__get_cpu_var(cpu_hw_sf);
1474
1475 /* Measurement alerts are shared and might happen when the PMU
1476 * is not reserved. Ignore these alerts in this case. */
1477 if (!(cpuhw->flags & PMU_F_RESERVED))
1478 return;
1479
1480 /* The processing below must take care of multiple alert events that
1481 * might be indicated concurrently. */
1482
1483 /* Program alert request */
1484 if (alert & CPU_MF_INT_SF_PRA) {
1485 if (cpuhw->flags & PMU_F_IN_USE)
1486 hw_perf_event_update(cpuhw->event, 0);
1487 else
1488 WARN_ON_ONCE(!(cpuhw->flags & PMU_F_IN_USE));
1489 }
1490
1491 /* Report measurement alerts only for non-PRA codes */
1492 if (alert != CPU_MF_INT_SF_PRA)
1493 debug_sprintf_event(sfdbg, 6, "measurement alert: 0x%x\n", alert);
1494
1495 /* Sampling authorization change request */
1496 if (alert & CPU_MF_INT_SF_SACA)
1497 qsi(&cpuhw->qsi);
1498
1499 /* Loss of sample data due to high-priority machine activities */
1500 if (alert & CPU_MF_INT_SF_LSDA) {
1501 pr_err("Sample data was lost\n");
1502 cpuhw->flags |= PMU_F_ERR_LSDA;
1503 sf_disable();
1504 }
1505
1506 /* Invalid sampling buffer entry */
1507 if (alert & (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE)) {
1508 pr_err("A sampling buffer entry is incorrect (alert=0x%x)\n",
1509 alert);
1510 cpuhw->flags |= PMU_F_ERR_IBE;
1511 sf_disable();
1512 }
1513}
1514
1515static int cpumf_pmu_notifier(struct notifier_block *self,
1516 unsigned long action, void *hcpu)
1517{
1518 unsigned int cpu = (long) hcpu;
1519 int flags;
1520
1521 /* Ignore the notification if no events are scheduled on the PMU.
1522 * This might be racy...
1523 */
1524 if (!atomic_read(&num_events))
1525 return NOTIFY_OK;
1526
1527 switch (action & ~CPU_TASKS_FROZEN) {
1528 case CPU_ONLINE:
1529 case CPU_ONLINE_FROZEN:
1530 flags = PMC_INIT;
1531 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
1532 break;
1533 case CPU_DOWN_PREPARE:
1534 flags = PMC_RELEASE;
1535 smp_call_function_single(cpu, setup_pmc_cpu, &flags, 1);
1536 break;
1537 default:
1538 break;
1539 }
1540
1541 return NOTIFY_OK;
1542}
1543
1544static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
1545{
1546 if (!cpum_sf_avail())
1547 return -ENODEV;
1548 return sprintf(buffer, "%lu,%lu", CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1549}
1550
1551static int param_set_sfb_size(const char *val, const struct kernel_param *kp)
1552{
1553 int rc;
1554 unsigned long min, max;
1555
1556 if (!cpum_sf_avail())
1557 return -ENODEV;
1558 if (!val || !strlen(val))
1559 return -EINVAL;
1560
1561 /* Valid parameter values: "min,max" or "max" */
1562 min = CPUM_SF_MIN_SDB;
1563 max = CPUM_SF_MAX_SDB;
1564 if (strchr(val, ','))
1565 rc = (sscanf(val, "%lu,%lu", &min, &max) == 2) ? 0 : -EINVAL;
1566 else
1567 rc = kstrtoul(val, 10, &max);
1568
1569 if (min < 2 || min >= max || max > get_num_physpages())
1570 rc = -EINVAL;
1571 if (rc)
1572 return rc;
1573
1574 sfb_set_limits(min, max);
1575 pr_info("The sampling buffer limits have changed to: "
1576 "min=%lu max=%lu (diag=x%lu)\n",
1577 CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB, CPUM_SF_SDB_DIAG_FACTOR);
1578 return 0;
1579}
1580
1581#define param_check_sfb_size(name, p) __param_check(name, p, void)
1582static struct kernel_param_ops param_ops_sfb_size = {
1583 .set = param_set_sfb_size,
1584 .get = param_get_sfb_size,
1585};
1586
1587#define RS_INIT_FAILURE_QSI 0x0001
1588#define RS_INIT_FAILURE_BSDES 0x0002
1589#define RS_INIT_FAILURE_ALRT 0x0003
1590#define RS_INIT_FAILURE_PERF 0x0004
1591static void __init pr_cpumsf_err(unsigned int reason)
1592{
1593 pr_err("Sampling facility support for perf is not available: "
1594 "reason=%04x\n", reason);
1595}
1596
1597static int __init init_cpum_sampling_pmu(void)
1598{
1599 struct hws_qsi_info_block si;
1600 int err;
1601
1602 if (!cpum_sf_avail())
1603 return -ENODEV;
1604
1605 memset(&si, 0, sizeof(si));
1606 if (qsi(&si)) {
1607 pr_cpumsf_err(RS_INIT_FAILURE_QSI);
1608 return -ENODEV;
1609 }
1610
1611 if (si.bsdes != sizeof(struct hws_basic_entry)) {
1612 pr_cpumsf_err(RS_INIT_FAILURE_BSDES);
1613 return -EINVAL;
1614 }
1615
1616 if (si.ad)
1617 sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
1618
1619 sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
1620 if (!sfdbg)
1621 pr_err("Registering for s390dbf failed\n");
1622 debug_register_view(sfdbg, &debug_sprintf_view);
1623
1624 err = register_external_interrupt(0x1407, cpumf_measurement_alert);
1625 if (err) {
1626 pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
1627 goto out;
1628 }
1629
1630 err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
1631 if (err) {
1632 pr_cpumsf_err(RS_INIT_FAILURE_PERF);
1633 unregister_external_interrupt(0x1407, cpumf_measurement_alert);
1634 goto out;
1635 }
1636 perf_cpu_notifier(cpumf_pmu_notifier);
1637out:
1638 return err;
1639}
1640arch_initcall(init_cpum_sampling_pmu);
1641core_param(cpum_sfb_size, CPUM_SF_MAX_SDB, sfb_size, 0640);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 2343c218b8f9..5d2dfa31c4ef 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Performance event support for s390x 2 * Performance event support for s390x
3 * 3 *
4 * Copyright IBM Corp. 2012 4 * Copyright IBM Corp. 2012, 2013
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> 5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -16,15 +16,19 @@
16#include <linux/kvm_host.h> 16#include <linux/kvm_host.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/export.h> 18#include <linux/export.h>
19#include <linux/seq_file.h>
20#include <linux/spinlock.h>
21#include <linux/sysfs.h>
19#include <asm/irq.h> 22#include <asm/irq.h>
20#include <asm/cpu_mf.h> 23#include <asm/cpu_mf.h>
21#include <asm/lowcore.h> 24#include <asm/lowcore.h>
22#include <asm/processor.h> 25#include <asm/processor.h>
26#include <asm/sysinfo.h>
23 27
24const char *perf_pmu_name(void) 28const char *perf_pmu_name(void)
25{ 29{
26 if (cpum_cf_avail() || cpum_sf_avail()) 30 if (cpum_cf_avail() || cpum_sf_avail())
27 return "CPU-measurement facilities (CPUMF)"; 31 return "CPU-Measurement Facilities (CPU-MF)";
28 return "pmu"; 32 return "pmu";
29} 33}
30EXPORT_SYMBOL(perf_pmu_name); 34EXPORT_SYMBOL(perf_pmu_name);
@@ -35,6 +39,8 @@ int perf_num_counters(void)
35 39
36 if (cpum_cf_avail()) 40 if (cpum_cf_avail())
37 num += PERF_CPUM_CF_MAX_CTR; 41 num += PERF_CPUM_CF_MAX_CTR;
42 if (cpum_sf_avail())
43 num += PERF_CPUM_SF_MAX_CTR;
38 44
39 return num; 45 return num;
40} 46}
@@ -54,7 +60,7 @@ static bool is_in_guest(struct pt_regs *regs)
54{ 60{
55 if (user_mode(regs)) 61 if (user_mode(regs))
56 return false; 62 return false;
57#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 63#if IS_ENABLED(CONFIG_KVM)
58 return instruction_pointer(regs) == (unsigned long) &sie_exit; 64 return instruction_pointer(regs) == (unsigned long) &sie_exit;
59#else 65#else
60 return false; 66 return false;
@@ -83,8 +89,31 @@ static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
83 : PERF_RECORD_MISC_GUEST_KERNEL; 89 : PERF_RECORD_MISC_GUEST_KERNEL;
84} 90}
85 91
92static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
93{
94 struct perf_sf_sde_regs *sde_regs;
95 unsigned long flags;
96
97 sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
98 if (sde_regs->in_guest)
99 flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
100 : PERF_RECORD_MISC_GUEST_KERNEL;
101 else
102 flags = user_mode(regs) ? PERF_RECORD_MISC_USER
103 : PERF_RECORD_MISC_KERNEL;
104 return flags;
105}
106
86unsigned long perf_misc_flags(struct pt_regs *regs) 107unsigned long perf_misc_flags(struct pt_regs *regs)
87{ 108{
109 /* Check if the cpum_sf PMU has created the pt_regs structure.
110 * In this case, perf misc flags can be easily extracted. Otherwise,
111 * do regular checks on the pt_regs content.
112 */
113 if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
114 if (!regs->gprs[15])
115 return perf_misc_flags_sf(regs);
116
88 if (is_in_guest(regs)) 117 if (is_in_guest(regs))
89 return perf_misc_guest_flags(regs); 118 return perf_misc_guest_flags(regs);
90 119
@@ -92,27 +121,107 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
92 : PERF_RECORD_MISC_KERNEL; 121 : PERF_RECORD_MISC_KERNEL;
93} 122}
94 123
95void perf_event_print_debug(void) 124void print_debug_cf(void)
96{ 125{
97 struct cpumf_ctr_info cf_info; 126 struct cpumf_ctr_info cf_info;
98 unsigned long flags; 127 int cpu = smp_processor_id();
99 int cpu;
100
101 if (!cpum_cf_avail())
102 return;
103
104 local_irq_save(flags);
105 128
106 cpu = smp_processor_id();
107 memset(&cf_info, 0, sizeof(cf_info)); 129 memset(&cf_info, 0, sizeof(cf_info));
108 if (!qctri(&cf_info)) 130 if (!qctri(&cf_info))
109 pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n", 131 pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
110 cpu, cf_info.cfvn, cf_info.csvn, 132 cpu, cf_info.cfvn, cf_info.csvn,
111 cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl); 133 cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
134}
135
136static void print_debug_sf(void)
137{
138 struct hws_qsi_info_block si;
139 int cpu = smp_processor_id();
112 140
141 memset(&si, 0, sizeof(si));
142 if (qsi(&si))
143 return;
144
145 pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
146 cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
147 si.cpu_speed);
148
149 if (si.as)
150 pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
151 " bsdes=%i tear=%016lx dear=%016lx\n", cpu,
152 si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
153 if (si.ad)
154 pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
155 " dsdes=%i tear=%016lx dear=%016lx\n", cpu,
156 si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
157}
158
159void perf_event_print_debug(void)
160{
161 unsigned long flags;
162
163 local_irq_save(flags);
164 if (cpum_cf_avail())
165 print_debug_cf();
166 if (cpum_sf_avail())
167 print_debug_sf();
113 local_irq_restore(flags); 168 local_irq_restore(flags);
114} 169}
115 170
171/* Service level infrastructure */
172static void sl_print_counter(struct seq_file *m)
173{
174 struct cpumf_ctr_info ci;
175
176 memset(&ci, 0, sizeof(ci));
177 if (qctri(&ci))
178 return;
179
180 seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
181 "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
182}
183
184static void sl_print_sampling(struct seq_file *m)
185{
186 struct hws_qsi_info_block si;
187
188 memset(&si, 0, sizeof(si));
189 if (qsi(&si))
190 return;
191
192 if (!si.as && !si.ad)
193 return;
194
195 seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
196 " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
197 si.cpu_speed);
198 if (si.as)
199 seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
200 " sample_size=%u\n", si.bsdes);
201 if (si.ad)
202 seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
203 " sample_size=%u\n", si.dsdes);
204}
205
206static void service_level_perf_print(struct seq_file *m,
207 struct service_level *sl)
208{
209 if (cpum_cf_avail())
210 sl_print_counter(m);
211 if (cpum_sf_avail())
212 sl_print_sampling(m);
213}
214
215static struct service_level service_level_perf = {
216 .seq_print = service_level_perf_print,
217};
218
219static int __init service_level_perf_register(void)
220{
221 return register_service_level(&service_level_perf);
222}
223arch_initcall(service_level_perf_register);
224
116/* See also arch/s390/kernel/traps.c */ 225/* See also arch/s390/kernel/traps.c */
117static unsigned long __store_trace(struct perf_callchain_entry *entry, 226static unsigned long __store_trace(struct perf_callchain_entry *entry,
118 unsigned long sp, 227 unsigned long sp,
@@ -172,3 +281,44 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
172 __store_trace(entry, head, S390_lowcore.thread_info, 281 __store_trace(entry, head, S390_lowcore.thread_info,
173 S390_lowcore.thread_info + THREAD_SIZE); 282 S390_lowcore.thread_info + THREAD_SIZE);
174} 283}
284
285/* Perf defintions for PMU event attributes in sysfs */
286ssize_t cpumf_events_sysfs_show(struct device *dev,
287 struct device_attribute *attr, char *page)
288{
289 struct perf_pmu_events_attr *pmu_attr;
290
291 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
292 return sprintf(page, "event=0x%04llx,name=%s\n",
293 pmu_attr->id, attr->attr.name);
294}
295
296/* Reserve/release functions for sharing perf hardware */
297static DEFINE_SPINLOCK(perf_hw_owner_lock);
298static void *perf_sampling_owner;
299
300int perf_reserve_sampling(void)
301{
302 int err;
303
304 err = 0;
305 spin_lock(&perf_hw_owner_lock);
306 if (perf_sampling_owner) {
307 pr_warn("The sampling facility is already reserved by %p\n",
308 perf_sampling_owner);
309 err = -EBUSY;
310 } else
311 perf_sampling_owner = __builtin_return_address(0);
312 spin_unlock(&perf_hw_owner_lock);
313 return err;
314}
315EXPORT_SYMBOL(perf_reserve_sampling);
316
317void perf_release_sampling(void)
318{
319 spin_lock(&perf_hw_owner_lock);
320 WARN_ON(!perf_sampling_owner);
321 perf_sampling_owner = NULL;
322 spin_unlock(&perf_hw_owner_lock);
323}
324EXPORT_SYMBOL(perf_release_sampling);
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7ed0d4e2a435..dd145321d215 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -261,20 +261,18 @@ static inline unsigned long brk_rnd(void)
261 261
262unsigned long arch_randomize_brk(struct mm_struct *mm) 262unsigned long arch_randomize_brk(struct mm_struct *mm)
263{ 263{
264 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd()); 264 unsigned long ret;
265 265
266 if (ret < mm->brk) 266 ret = PAGE_ALIGN(mm->brk + brk_rnd());
267 return mm->brk; 267 return (ret > mm->brk) ? ret : mm->brk;
268 return ret;
269} 268}
270 269
271unsigned long randomize_et_dyn(unsigned long base) 270unsigned long randomize_et_dyn(unsigned long base)
272{ 271{
273 unsigned long ret = PAGE_ALIGN(base + brk_rnd()); 272 unsigned long ret;
274 273
275 if (!(current->flags & PF_RANDOMIZE)) 274 if (!(current->flags & PF_RANDOMIZE))
276 return base; 275 return base;
277 if (ret < base) 276 ret = PAGE_ALIGN(base + brk_rnd());
278 return base; 277 return (ret > base) ? ret : base;
279 return ret;
280} 278}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index e65c91c591e8..f6be6087a0e9 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -56,25 +56,26 @@ void update_cr_regs(struct task_struct *task)
56#ifdef CONFIG_64BIT 56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 58 if (MACHINE_HAS_TE) {
59 unsigned long cr[3], cr_new[3]; 59 unsigned long cr, cr_new;
60 60
61 __ctl_store(cr, 0, 2); 61 __ctl_store(cr, 0, 0);
62 cr_new[1] = cr[1];
63 /* Set or clear transaction execution TXC bit 8. */ 62 /* Set or clear transaction execution TXC bit 8. */
63 cr_new = cr | (1UL << 55);
64 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
65 cr_new[0] = cr[0] & ~(1UL << 55); 65 cr_new &= ~(1UL << 55);
66 else 66 if (cr_new != cr)
67 cr_new[0] = cr[0] | (1UL << 55); 67 __ctl_load(cr, 0, 0);
68 /* Set or clear transaction execution TDC bits 62 and 63. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
69 cr_new[2] = cr[2] & ~3UL; 69 __ctl_store(cr, 2, 2);
70 cr_new = cr & ~3UL;
70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { 71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND) 72 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
72 cr_new[2] |= 1UL; 73 cr_new |= 1UL;
73 else 74 else
74 cr_new[2] |= 2UL; 75 cr_new |= 2UL;
75 } 76 }
76 if (memcmp(&cr_new, &cr, sizeof(cr))) 77 if (cr_new != cr)
77 __ctl_load(cr_new, 0, 2); 78 __ctl_load(cr_new, 2, 2);
78 } 79 }
79#endif 80#endif
80 /* Copy user specified PER registers */ 81 /* Copy user specified PER registers */
@@ -107,15 +108,11 @@ void update_cr_regs(struct task_struct *task)
107void user_enable_single_step(struct task_struct *task) 108void user_enable_single_step(struct task_struct *task)
108{ 109{
109 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 110 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
110 if (task == current)
111 update_cr_regs(task);
112} 111}
113 112
114void user_disable_single_step(struct task_struct *task) 113void user_disable_single_step(struct task_struct *task)
115{ 114{
116 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 115 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
117 if (task == current)
118 update_cr_regs(task);
119} 116}
120 117
121/* 118/*
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 3bac589844a7..9f60467938d1 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -5,7 +5,7 @@
5#ifdef CONFIG_FUNCTION_TRACER 5#ifdef CONFIG_FUNCTION_TRACER
6EXPORT_SYMBOL(_mcount); 6EXPORT_SYMBOL(_mcount);
7#endif 7#endif
8#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 8#if IS_ENABLED(CONFIG_KVM)
9EXPORT_SYMBOL(sie64a); 9EXPORT_SYMBOL(sie64a);
10EXPORT_SYMBOL(sie_exit); 10EXPORT_SYMBOL(sie_exit);
11#endif 11#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0f3d44ecbfc6..09e2f468f48b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -373,7 +373,7 @@ static void __init setup_lowcore(void)
373 373
374 /* 374 /*
375 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant 375 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
376 * restart data to the absolute zero lowcore. This is necesary if 376 * restart data to the absolute zero lowcore. This is necessary if
377 * PSW restart is done on an offline CPU that has lowcore zero. 377 * PSW restart is done on an offline CPU that has lowcore zero.
378 */ 378 */
379 lc->restart_stack = (unsigned long) restart_stack; 379 lc->restart_stack = (unsigned long) restart_stack;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 958704798f4a..a7125b62a9a6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,7 +59,7 @@ enum {
59}; 59};
60 60
61struct pcpu { 61struct pcpu {
62 struct cpu cpu; 62 struct cpu *cpu;
63 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ 63 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
64 unsigned long async_stack; /* async stack for the cpu */ 64 unsigned long async_stack; /* async stack for the cpu */
65 unsigned long panic_stack; /* panic stack for the cpu */ 65 unsigned long panic_stack; /* panic stack for the cpu */
@@ -159,9 +159,9 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
159{ 159{
160 int order; 160 int order;
161 161
162 set_bit(ec_bit, &pcpu->ec_mask); 162 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
163 order = pcpu_running(pcpu) ? 163 return;
164 SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; 164 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
165 pcpu_sigp_retry(pcpu, order, 0); 165 pcpu_sigp_retry(pcpu, order, 0);
166} 166}
167 167
@@ -965,7 +965,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
965 void *hcpu) 965 void *hcpu)
966{ 966{
967 unsigned int cpu = (unsigned int)(long)hcpu; 967 unsigned int cpu = (unsigned int)(long)hcpu;
968 struct cpu *c = &pcpu_devices[cpu].cpu; 968 struct cpu *c = pcpu_devices[cpu].cpu;
969 struct device *s = &c->dev; 969 struct device *s = &c->dev;
970 int err = 0; 970 int err = 0;
971 971
@@ -982,10 +982,15 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
982 982
983static int smp_add_present_cpu(int cpu) 983static int smp_add_present_cpu(int cpu)
984{ 984{
985 struct cpu *c = &pcpu_devices[cpu].cpu; 985 struct device *s;
986 struct device *s = &c->dev; 986 struct cpu *c;
987 int rc; 987 int rc;
988 988
989 c = kzalloc(sizeof(*c), GFP_KERNEL);
990 if (!c)
991 return -ENOMEM;
992 pcpu_devices[cpu].cpu = c;
993 s = &c->dev;
989 c->hotpluggable = 1; 994 c->hotpluggable = 1;
990 rc = register_cpu(c, cpu); 995 rc = register_cpu(c, cpu);
991 if (rc) 996 if (rc)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 2440602e6df1..d101dae62771 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -275,7 +275,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
275 return -EOPNOTSUPP; 275 return -EOPNOTSUPP;
276 } else { 276 } else {
277 /* 277 /*
278 * Set condition code 3 to stop the guest from issueing channel 278 * Set condition code 3 to stop the guest from issuing channel
279 * I/O instructions. 279 * I/O instructions.
280 */ 280 */
281 kvm_s390_set_psw_cc(vcpu, 3); 281 kvm_s390_set_psw_cc(vcpu, 3);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index dbdab3e7a1a6..0632dc50da78 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -74,8 +74,8 @@ static size_t copy_in_kernel(size_t count, void __user *to,
74 74
75/* 75/*
76 * Returns kernel address for user virtual address. If the returned address is 76 * Returns kernel address for user virtual address. If the returned address is
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address 77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
78 * contains the (negative) exception code. 78 * address contains the (negative) exception code.
79 */ 79 */
80#ifdef CONFIG_64BIT 80#ifdef CONFIG_64BIT
81 81
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index e794c88f699a..3584ed9b20a1 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -293,7 +293,7 @@ static int gmap_alloc_table(struct gmap *gmap,
293 * @addr: address in the guest address space 293 * @addr: address in the guest address space
294 * @len: length of the memory area to unmap 294 * @len: length of the memory area to unmap
295 * 295 *
296 * Returns 0 if the unmap succeded, -EINVAL if not. 296 * Returns 0 if the unmap succeeded, -EINVAL if not.
297 */ 297 */
298int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) 298int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
299{ 299{
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(gmap_unmap_segment);
344 * @from: source address in the parent address space 344 * @from: source address in the parent address space
345 * @to: target address in the guest address space 345 * @to: target address in the guest address space
346 * 346 *
347 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not. 347 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
348 */ 348 */
349int gmap_map_segment(struct gmap *gmap, unsigned long from, 349int gmap_map_segment(struct gmap *gmap, unsigned long from,
350 unsigned long to, unsigned long len) 350 unsigned long to, unsigned long len)
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index 231cecafc2f1..a32c96761eab 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -26,9 +26,6 @@
26#define MAX_NUM_SDB 511 26#define MAX_NUM_SDB 511
27#define MIN_NUM_SDB 1 27#define MIN_NUM_SDB 1
28 28
29#define ALERT_REQ_MASK 0x4000000000000000ul
30#define BUFFER_FULL_MASK 0x8000000000000000ul
31
32DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer); 29DECLARE_PER_CPU(struct hws_cpu_buffer, sampler_cpu_buffer);
33 30
34struct hws_execute_parms { 31struct hws_execute_parms {
@@ -44,6 +41,7 @@ static DEFINE_MUTEX(hws_sem_oom);
44 41
45static unsigned char hws_flush_all; 42static unsigned char hws_flush_all;
46static unsigned int hws_oom; 43static unsigned int hws_oom;
44static unsigned int hws_alert;
47static struct workqueue_struct *hws_wq; 45static struct workqueue_struct *hws_wq;
48 46
49static unsigned int hws_state; 47static unsigned int hws_state;
@@ -65,43 +63,6 @@ static unsigned long interval;
65static unsigned long min_sampler_rate; 63static unsigned long min_sampler_rate;
66static unsigned long max_sampler_rate; 64static unsigned long max_sampler_rate;
67 65
68static int ssctl(void *buffer)
69{
70 int cc;
71
72 /* set in order to detect a program check */
73 cc = 1;
74
75 asm volatile(
76 "0: .insn s,0xB2870000,0(%1)\n"
77 "1: ipm %0\n"
78 " srl %0,28\n"
79 "2:\n"
80 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
81 : "+d" (cc), "+a" (buffer)
82 : "m" (*((struct hws_ssctl_request_block *)buffer))
83 : "cc", "memory");
84
85 return cc ? -EINVAL : 0 ;
86}
87
88static int qsi(void *buffer)
89{
90 int cc;
91 cc = 1;
92
93 asm volatile(
94 "0: .insn s,0xB2860000,0(%1)\n"
95 "1: lhi %0,0\n"
96 "2:\n"
97 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
98 : "=d" (cc), "+a" (buffer)
99 : "m" (*((struct hws_qsi_info_block *)buffer))
100 : "cc", "memory");
101
102 return cc ? -EINVAL : 0;
103}
104
105static void execute_qsi(void *parms) 66static void execute_qsi(void *parms)
106{ 67{
107 struct hws_execute_parms *ep = parms; 68 struct hws_execute_parms *ep = parms;
@@ -113,7 +74,7 @@ static void execute_ssctl(void *parms)
113{ 74{
114 struct hws_execute_parms *ep = parms; 75 struct hws_execute_parms *ep = parms;
115 76
116 ep->rc = ssctl(ep->buffer); 77 ep->rc = lsctl(ep->buffer);
117} 78}
118 79
119static int smp_ctl_ssctl_stop(int cpu) 80static int smp_ctl_ssctl_stop(int cpu)
@@ -214,17 +175,6 @@ static int smp_ctl_qsi(int cpu)
214 return ep.rc; 175 return ep.rc;
215} 176}
216 177
217static inline unsigned long *trailer_entry_ptr(unsigned long v)
218{
219 void *ret;
220
221 ret = (void *)v;
222 ret += PAGE_SIZE;
223 ret -= sizeof(struct hws_trailer_entry);
224
225 return (unsigned long *) ret;
226}
227
228static void hws_ext_handler(struct ext_code ext_code, 178static void hws_ext_handler(struct ext_code ext_code,
229 unsigned int param32, unsigned long param64) 179 unsigned int param32, unsigned long param64)
230{ 180{
@@ -233,6 +183,9 @@ static void hws_ext_handler(struct ext_code ext_code,
233 if (!(param32 & CPU_MF_INT_SF_MASK)) 183 if (!(param32 & CPU_MF_INT_SF_MASK))
234 return; 184 return;
235 185
186 if (!hws_alert)
187 return;
188
236 inc_irq_stat(IRQEXT_CMS); 189 inc_irq_stat(IRQEXT_CMS);
237 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); 190 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
238 191
@@ -256,16 +209,6 @@ static void init_all_cpu_buffers(void)
256 } 209 }
257} 210}
258 211
259static int is_link_entry(unsigned long *s)
260{
261 return *s & 0x1ul ? 1 : 0;
262}
263
264static unsigned long *get_next_sdbt(unsigned long *s)
265{
266 return (unsigned long *) (*s & ~0x1ul);
267}
268
269static int prepare_cpu_buffers(void) 212static int prepare_cpu_buffers(void)
270{ 213{
271 int cpu; 214 int cpu;
@@ -353,7 +296,7 @@ static int allocate_sdbt(int cpu)
353 } 296 }
354 *sdbt = sdb; 297 *sdbt = sdb;
355 trailer = trailer_entry_ptr(*sdbt); 298 trailer = trailer_entry_ptr(*sdbt);
356 *trailer = ALERT_REQ_MASK; 299 *trailer = SDB_TE_ALERT_REQ_MASK;
357 sdbt++; 300 sdbt++;
358 mutex_unlock(&hws_sem_oom); 301 mutex_unlock(&hws_sem_oom);
359 } 302 }
@@ -829,7 +772,7 @@ static void worker_on_interrupt(unsigned int cpu)
829 772
830 trailer = trailer_entry_ptr(*sdbt); 773 trailer = trailer_entry_ptr(*sdbt);
831 /* leave loop if no more work to do */ 774 /* leave loop if no more work to do */
832 if (!(*trailer & BUFFER_FULL_MASK)) { 775 if (!(*trailer & SDB_TE_BUFFER_FULL_MASK)) {
833 done = 1; 776 done = 1;
834 if (!hws_flush_all) 777 if (!hws_flush_all)
835 continue; 778 continue;
@@ -856,7 +799,7 @@ static void worker_on_interrupt(unsigned int cpu)
856static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt, 799static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
857 unsigned long *dear) 800 unsigned long *dear)
858{ 801{
859 struct hws_data_entry *sample_data_ptr; 802 struct hws_basic_entry *sample_data_ptr;
860 unsigned long *trailer; 803 unsigned long *trailer;
861 804
862 trailer = trailer_entry_ptr(*sdbt); 805 trailer = trailer_entry_ptr(*sdbt);
@@ -866,7 +809,7 @@ static void add_samples_to_oprofile(unsigned int cpu, unsigned long *sdbt,
866 trailer = dear; 809 trailer = dear;
867 } 810 }
868 811
869 sample_data_ptr = (struct hws_data_entry *)(*sdbt); 812 sample_data_ptr = (struct hws_basic_entry *)(*sdbt);
870 813
871 while ((unsigned long *)sample_data_ptr < trailer) { 814 while ((unsigned long *)sample_data_ptr < trailer) {
872 struct pt_regs *regs = NULL; 815 struct pt_regs *regs = NULL;
@@ -1002,6 +945,7 @@ int hwsampler_deallocate(void)
1002 goto deallocate_exit; 945 goto deallocate_exit;
1003 946
1004 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 947 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
948 hws_alert = 0;
1005 deallocate_sdbt(); 949 deallocate_sdbt();
1006 950
1007 hws_state = HWS_DEALLOCATED; 951 hws_state = HWS_DEALLOCATED;
@@ -1116,6 +1060,7 @@ int hwsampler_shutdown(void)
1116 1060
1117 if (hws_state == HWS_STOPPED) { 1061 if (hws_state == HWS_STOPPED) {
1118 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); 1062 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
1063 hws_alert = 0;
1119 deallocate_sdbt(); 1064 deallocate_sdbt();
1120 } 1065 }
1121 if (hws_wq) { 1066 if (hws_wq) {
@@ -1190,6 +1135,7 @@ start_all_exit:
1190 hws_oom = 1; 1135 hws_oom = 1;
1191 hws_flush_all = 0; 1136 hws_flush_all = 0;
1192 /* now let them in, 1407 CPUMF external interrupts */ 1137 /* now let them in, 1407 CPUMF external interrupts */
1138 hws_alert = 1;
1193 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); 1139 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
1194 1140
1195 return 0; 1141 return 0;
diff --git a/arch/s390/oprofile/hwsampler.h b/arch/s390/oprofile/hwsampler.h
index 0022e1ebfbde..a483d06f2fa7 100644
--- a/arch/s390/oprofile/hwsampler.h
+++ b/arch/s390/oprofile/hwsampler.h
@@ -9,27 +9,7 @@
9#define HWSAMPLER_H_ 9#define HWSAMPLER_H_
10 10
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12 12#include <asm/cpu_mf.h>
13struct hws_qsi_info_block /* QUERY SAMPLING information block */
14{ /* Bit(s) */
15 unsigned int b0_13:14; /* 0-13: zeros */
16 unsigned int as:1; /* 14: sampling authorisation control*/
17 unsigned int b15_21:7; /* 15-21: zeros */
18 unsigned int es:1; /* 22: sampling enable control */
19 unsigned int b23_29:7; /* 23-29: zeros */
20 unsigned int cs:1; /* 30: sampling activation control */
21 unsigned int:1; /* 31: reserved */
22 unsigned int bsdes:16; /* 4-5: size of sampling entry */
23 unsigned int:16; /* 6-7: reserved */
24 unsigned long min_sampl_rate; /* 8-15: minimum sampling interval */
25 unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
26 unsigned long tear; /* 24-31: TEAR contents */
27 unsigned long dear; /* 32-39: DEAR contents */
28 unsigned int rsvrd0; /* 40-43: reserved */
29 unsigned int cpu_speed; /* 44-47: CPU speed */
30 unsigned long long rsvrd1; /* 48-55: reserved */
31 unsigned long long rsvrd2; /* 56-63: reserved */
32};
33 13
34struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */ 14struct hws_ssctl_request_block /* SET SAMPLING CONTROLS req block */
35{ /* bytes 0 - 7 Bit(s) */ 15{ /* bytes 0 - 7 Bit(s) */
@@ -68,36 +48,6 @@ struct hws_cpu_buffer {
68 unsigned int stop_mode:1; 48 unsigned int stop_mode:1;
69}; 49};
70 50
71struct hws_data_entry {
72 unsigned int def:16; /* 0-15 Data Entry Format */
73 unsigned int R:4; /* 16-19 reserved */
74 unsigned int U:4; /* 20-23 Number of unique instruct. */
75 unsigned int z:2; /* zeros */
76 unsigned int T:1; /* 26 PSW DAT mode */
77 unsigned int W:1; /* 27 PSW wait state */
78 unsigned int P:1; /* 28 PSW Problem state */
79 unsigned int AS:2; /* 29-30 PSW address-space control */
80 unsigned int I:1; /* 31 entry valid or invalid */
81 unsigned int:16;
82 unsigned int prim_asn:16; /* primary ASN */
83 unsigned long long ia; /* Instruction Address */
84 unsigned long long gpp; /* Guest Program Parameter */
85 unsigned long long hpp; /* Host Program Parameter */
86};
87
88struct hws_trailer_entry {
89 unsigned int f:1; /* 0 - Block Full Indicator */
90 unsigned int a:1; /* 1 - Alert request control */
91 unsigned long:62; /* 2 - 63: Reserved */
92 unsigned long overflow; /* 64 - sample Overflow count */
93 unsigned long timestamp; /* 16 - time-stamp */
94 unsigned long timestamp1; /* */
95 unsigned long reserved1; /* 32 -Reserved */
96 unsigned long reserved2; /* */
97 unsigned long progusage1; /* 48 - reserved for programming use */
98 unsigned long progusage2; /* */
99};
100
101int hwsampler_setup(void); 51int hwsampler_setup(void);
102int hwsampler_shutdown(void); 52int hwsampler_shutdown(void);
103int hwsampler_allocate(unsigned long sdbt, unsigned long sdb); 53int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 04e1b6a85362..9ffe645d5989 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/oprofile.h> 12#include <linux/oprofile.h>
13#include <linux/perf_event.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/errno.h> 15#include <linux/errno.h>
15#include <linux/fs.h> 16#include <linux/fs.h>
@@ -67,6 +68,21 @@ module_param_call(cpu_type, set_cpu_type, NULL, NULL, 0);
67MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling" 68MODULE_PARM_DESC(cpu_type, "Force legacy basic mode sampling"
68 "(report cpu_type \"timer\""); 69 "(report cpu_type \"timer\"");
69 70
71static int __oprofile_hwsampler_start(void)
72{
73 int retval;
74
75 retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks);
76 if (retval)
77 return retval;
78
79 retval = hwsampler_start_all(oprofile_hw_interval);
80 if (retval)
81 hwsampler_deallocate();
82
83 return retval;
84}
85
70static int oprofile_hwsampler_start(void) 86static int oprofile_hwsampler_start(void)
71{ 87{
72 int retval; 88 int retval;
@@ -76,13 +92,13 @@ static int oprofile_hwsampler_start(void)
76 if (!hwsampler_running) 92 if (!hwsampler_running)
77 return timer_ops.start(); 93 return timer_ops.start();
78 94
79 retval = hwsampler_allocate(oprofile_sdbt_blocks, oprofile_sdb_blocks); 95 retval = perf_reserve_sampling();
80 if (retval) 96 if (retval)
81 return retval; 97 return retval;
82 98
83 retval = hwsampler_start_all(oprofile_hw_interval); 99 retval = __oprofile_hwsampler_start();
84 if (retval) 100 if (retval)
85 hwsampler_deallocate(); 101 perf_release_sampling();
86 102
87 return retval; 103 return retval;
88} 104}
@@ -96,6 +112,7 @@ static void oprofile_hwsampler_stop(void)
96 112
97 hwsampler_stop_all(); 113 hwsampler_stop_all();
98 hwsampler_deallocate(); 114 hwsampler_deallocate();
115 perf_release_sampling();
99 return; 116 return;
100} 117}
101 118
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bf7c73d71eef..0820362c7b0f 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -919,17 +919,23 @@ static void zpci_mem_exit(void)
919 kmem_cache_destroy(zdev_fmb_cache); 919 kmem_cache_destroy(zdev_fmb_cache);
920} 920}
921 921
922static unsigned int s390_pci_probe; 922static unsigned int s390_pci_probe = 1;
923static unsigned int s390_pci_initialized;
923 924
924char * __init pcibios_setup(char *str) 925char * __init pcibios_setup(char *str)
925{ 926{
926 if (!strcmp(str, "on")) { 927 if (!strcmp(str, "off")) {
927 s390_pci_probe = 1; 928 s390_pci_probe = 0;
928 return NULL; 929 return NULL;
929 } 930 }
930 return str; 931 return str;
931} 932}
932 933
934bool zpci_is_enabled(void)
935{
936 return s390_pci_initialized;
937}
938
933static int __init pci_base_init(void) 939static int __init pci_base_init(void)
934{ 940{
935 int rc; 941 int rc;
@@ -961,6 +967,7 @@ static int __init pci_base_init(void)
961 if (rc) 967 if (rc)
962 goto out_find; 968 goto out_find;
963 969
970 s390_pci_initialized = 1;
964 return 0; 971 return 0;
965 972
966out_find: 973out_find:
@@ -978,5 +985,6 @@ subsys_initcall_sync(pci_base_init);
978 985
979void zpci_rescan(void) 986void zpci_rescan(void)
980{ 987{
981 clp_rescan_pci_devices_simple(); 988 if (zpci_is_enabled())
989 clp_rescan_pci_devices_simple();
982} 990}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 9b83d080902d..60c11a629d96 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -285,7 +285,7 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
285 flags |= ZPCI_TABLE_PROTECTED; 285 flags |= ZPCI_TABLE_PROTECTED;
286 286
287 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { 287 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
288 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages); 288 atomic64_add(nr_pages, &zdev->fmb->mapped_pages);
289 return dma_addr + (offset & ~PAGE_MASK); 289 return dma_addr + (offset & ~PAGE_MASK);
290 } 290 }
291 291
@@ -313,7 +313,7 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
313 zpci_err_hex(&dma_addr, sizeof(dma_addr)); 313 zpci_err_hex(&dma_addr, sizeof(dma_addr));
314 } 314 }
315 315
316 atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages); 316 atomic64_add(npages, &zdev->fmb->unmapped_pages);
317 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; 317 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
318 dma_free_iommu(zdev, iommu_page_index, npages); 318 dma_free_iommu(zdev, iommu_page_index, npages);
319} 319}
@@ -332,7 +332,6 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
332 if (!page) 332 if (!page)
333 return NULL; 333 return NULL;
334 334
335 atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
336 pa = page_to_phys(page); 335 pa = page_to_phys(page);
337 memset((void *) pa, 0, size); 336 memset((void *) pa, 0, size);
338 337
@@ -343,6 +342,7 @@ static void *s390_dma_alloc(struct device *dev, size_t size,
343 return NULL; 342 return NULL;
344 } 343 }
345 344
345 atomic64_add(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
346 if (dma_handle) 346 if (dma_handle)
347 *dma_handle = map; 347 *dma_handle = map;
348 return (void *) pa; 348 return (void *) pa;
@@ -352,8 +352,11 @@ static void s390_dma_free(struct device *dev, size_t size,
352 void *pa, dma_addr_t dma_handle, 352 void *pa, dma_addr_t dma_handle,
353 struct dma_attrs *attrs) 353 struct dma_attrs *attrs)
354{ 354{
355 s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size), 355 struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
356 DMA_BIDIRECTIONAL, NULL); 356
357 size = PAGE_ALIGN(size);
358 atomic64_sub(size / PAGE_SIZE, &zdev->fmb->allocated_pages);
359 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
357 free_pages((unsigned long) pa, get_order(size)); 360 free_pages((unsigned long) pa, get_order(size));
358} 361}
359 362
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 069607209a30..01e251b1da0c 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -43,9 +43,8 @@ struct zpci_ccdf_avail {
43 u16 pec; /* PCI event code */ 43 u16 pec; /* PCI event code */
44} __packed; 44} __packed;
45 45
46void zpci_event_error(void *data) 46static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
47{ 47{
48 struct zpci_ccdf_err *ccdf = data;
49 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 48 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
50 49
51 zpci_err("error CCDF:\n"); 50 zpci_err("error CCDF:\n");
@@ -58,9 +57,14 @@ void zpci_event_error(void *data)
58 pci_name(zdev->pdev), ccdf->pec, ccdf->fid); 57 pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
59} 58}
60 59
61void zpci_event_availability(void *data) 60void zpci_event_error(void *data)
61{
62 if (zpci_is_enabled())
63 __zpci_event_error(data);
64}
65
66static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
62{ 67{
63 struct zpci_ccdf_avail *ccdf = data;
64 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid); 68 struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
65 struct pci_dev *pdev = zdev ? zdev->pdev : NULL; 69 struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
66 int ret; 70 int ret;
@@ -99,8 +103,12 @@ void zpci_event_availability(void *data)
99 103
100 break; 104 break;
101 case 0x0304: /* Configured -> Standby */ 105 case 0x0304: /* Configured -> Standby */
102 if (pdev) 106 if (pdev) {
107 /* Give the driver a hint that the function is
108 * already unusable. */
109 pdev->error_state = pci_channel_io_perm_failure;
103 pci_stop_and_remove_bus_device(pdev); 110 pci_stop_and_remove_bus_device(pdev);
111 }
104 112
105 zdev->fh = ccdf->fh; 113 zdev->fh = ccdf->fh;
106 zpci_disable_device(zdev); 114 zpci_disable_device(zdev);
@@ -110,6 +118,8 @@ void zpci_event_availability(void *data)
110 clp_rescan_pci_devices(); 118 clp_rescan_pci_devices();
111 break; 119 break;
112 case 0x0308: /* Standby -> Reserved */ 120 case 0x0308: /* Standby -> Reserved */
121 if (!zdev)
122 break;
113 pci_stop_root_bus(zdev->bus); 123 pci_stop_root_bus(zdev->bus);
114 pci_remove_root_bus(zdev->bus); 124 pci_remove_root_bus(zdev->bus);
115 break; 125 break;
@@ -117,3 +127,9 @@ void zpci_event_availability(void *data)
117 break; 127 break;
118 } 128 }
119} 129}
130
131void zpci_event_availability(void *data)
132{
133 if (zpci_is_enabled())
134 __zpci_event_availability(data);
135}