diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
commit | 92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch) | |
tree | 15626ff9287e37c3cb81c7286d6db5a7fd77c854 /include/linux/perf_event.h | |
parent | 15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff) | |
parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) |
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2
Backmerge to get at
commit 1b0e3a049efe471c399674fd954500ce97438d30
Author: Imre Deak <imre.deak@intel.com>
Date: Thu Nov 5 23:04:11 2015 +0200
drm/i915/skl: disable display side power well support for now
so that we can proplery re-eanble skl power wells in -next.
Conflicts are just adjacent lines changed, except for intel_fbdev.c
where we need to interleave the changs. Nothing nefarious.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 120 |
1 files changed, 103 insertions, 17 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 092a0e8a479a..d841d33bcdc9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -140,33 +140,67 @@ struct hw_perf_event { | |||
140 | }; | 140 | }; |
141 | #endif | 141 | #endif |
142 | }; | 142 | }; |
143 | /* | ||
144 | * If the event is a per task event, this will point to the task in | ||
145 | * question. See the comment in perf_event_alloc(). | ||
146 | */ | ||
143 | struct task_struct *target; | 147 | struct task_struct *target; |
148 | |||
149 | /* | ||
150 | * hw_perf_event::state flags; used to track the PERF_EF_* state. | ||
151 | */ | ||
152 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
153 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
154 | #define PERF_HES_ARCH 0x04 | ||
155 | |||
144 | int state; | 156 | int state; |
157 | |||
158 | /* | ||
159 | * The last observed hardware counter value, updated with a | ||
160 | * local64_cmpxchg() such that pmu::read() can be called nested. | ||
161 | */ | ||
145 | local64_t prev_count; | 162 | local64_t prev_count; |
163 | |||
164 | /* | ||
165 | * The period to start the next sample with. | ||
166 | */ | ||
146 | u64 sample_period; | 167 | u64 sample_period; |
168 | |||
169 | /* | ||
170 | * The period we started this sample with. | ||
171 | */ | ||
147 | u64 last_period; | 172 | u64 last_period; |
173 | |||
174 | /* | ||
175 | * However much is left of the current period; note that this is | ||
176 | * a full 64bit value and allows for generation of periods longer | ||
177 | * than hardware might allow. | ||
178 | */ | ||
148 | local64_t period_left; | 179 | local64_t period_left; |
180 | |||
181 | /* | ||
182 | * State for throttling the event, see __perf_event_overflow() and | ||
183 | * perf_adjust_freq_unthr_context(). | ||
184 | */ | ||
149 | u64 interrupts_seq; | 185 | u64 interrupts_seq; |
150 | u64 interrupts; | 186 | u64 interrupts; |
151 | 187 | ||
188 | /* | ||
189 | * State for freq target events, see __perf_event_overflow() and | ||
190 | * perf_adjust_freq_unthr_context(). | ||
191 | */ | ||
152 | u64 freq_time_stamp; | 192 | u64 freq_time_stamp; |
153 | u64 freq_count_stamp; | 193 | u64 freq_count_stamp; |
154 | #endif | 194 | #endif |
155 | }; | 195 | }; |
156 | 196 | ||
157 | /* | ||
158 | * hw_perf_event::state flags | ||
159 | */ | ||
160 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
161 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
162 | #define PERF_HES_ARCH 0x04 | ||
163 | |||
164 | struct perf_event; | 197 | struct perf_event; |
165 | 198 | ||
166 | /* | 199 | /* |
167 | * Common implementation detail of pmu::{start,commit,cancel}_txn | 200 | * Common implementation detail of pmu::{start,commit,cancel}_txn |
168 | */ | 201 | */ |
169 | #define PERF_EVENT_TXN 0x1 | 202 | #define PERF_PMU_TXN_ADD 0x1 /* txn to add/schedule event on PMU */ |
203 | #define PERF_PMU_TXN_READ 0x2 /* txn to read event group from PMU */ | ||
170 | 204 | ||
171 | /** | 205 | /** |
172 | * pmu::capabilities flags | 206 | * pmu::capabilities flags |
@@ -210,7 +244,19 @@ struct pmu { | |||
210 | 244 | ||
211 | /* | 245 | /* |
212 | * Try and initialize the event for this PMU. | 246 | * Try and initialize the event for this PMU. |
213 | * Should return -ENOENT when the @event doesn't match this PMU. | 247 | * |
248 | * Returns: | ||
249 | * -ENOENT -- @event is not for this PMU | ||
250 | * | ||
251 | * -ENODEV -- @event is for this PMU but PMU not present | ||
252 | * -EBUSY -- @event is for this PMU but PMU temporarily unavailable | ||
253 | * -EINVAL -- @event is for this PMU but @event is not valid | ||
254 | * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported | ||
255 | * -EACCESS -- @event is for this PMU, @event is valid, but no privilidges | ||
256 | * | ||
257 | * 0 -- @event is for this PMU and valid | ||
258 | * | ||
259 | * Other error return values are allowed. | ||
214 | */ | 260 | */ |
215 | int (*event_init) (struct perf_event *event); | 261 | int (*event_init) (struct perf_event *event); |
216 | 262 | ||
@@ -221,27 +267,61 @@ struct pmu { | |||
221 | void (*event_mapped) (struct perf_event *event); /*optional*/ | 267 | void (*event_mapped) (struct perf_event *event); /*optional*/ |
222 | void (*event_unmapped) (struct perf_event *event); /*optional*/ | 268 | void (*event_unmapped) (struct perf_event *event); /*optional*/ |
223 | 269 | ||
270 | /* | ||
271 | * Flags for ->add()/->del()/ ->start()/->stop(). There are | ||
272 | * matching hw_perf_event::state flags. | ||
273 | */ | ||
224 | #define PERF_EF_START 0x01 /* start the counter when adding */ | 274 | #define PERF_EF_START 0x01 /* start the counter when adding */ |
225 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | 275 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ |
226 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | 276 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ |
227 | 277 | ||
228 | /* | 278 | /* |
229 | * Adds/Removes a counter to/from the PMU, can be done inside | 279 | * Adds/Removes a counter to/from the PMU, can be done inside a |
230 | * a transaction, see the ->*_txn() methods. | 280 | * transaction, see the ->*_txn() methods. |
281 | * | ||
282 | * The add/del callbacks will reserve all hardware resources required | ||
283 | * to service the event, this includes any counter constraint | ||
284 | * scheduling etc. | ||
285 | * | ||
286 | * Called with IRQs disabled and the PMU disabled on the CPU the event | ||
287 | * is on. | ||
288 | * | ||
289 | * ->add() called without PERF_EF_START should result in the same state | ||
290 | * as ->add() followed by ->stop(). | ||
291 | * | ||
292 | * ->del() must always PERF_EF_UPDATE stop an event. If it calls | ||
293 | * ->stop() that must deal with already being stopped without | ||
294 | * PERF_EF_UPDATE. | ||
231 | */ | 295 | */ |
232 | int (*add) (struct perf_event *event, int flags); | 296 | int (*add) (struct perf_event *event, int flags); |
233 | void (*del) (struct perf_event *event, int flags); | 297 | void (*del) (struct perf_event *event, int flags); |
234 | 298 | ||
235 | /* | 299 | /* |
236 | * Starts/Stops a counter present on the PMU. The PMI handler | 300 | * Starts/Stops a counter present on the PMU. |
237 | * should stop the counter when perf_event_overflow() returns | 301 | * |
238 | * !0. ->start() will be used to continue. | 302 | * The PMI handler should stop the counter when perf_event_overflow() |
303 | * returns !0. ->start() will be used to continue. | ||
304 | * | ||
305 | * Also used to change the sample period. | ||
306 | * | ||
307 | * Called with IRQs disabled and the PMU disabled on the CPU the event | ||
308 | * is on -- will be called from NMI context with the PMU generates | ||
309 | * NMIs. | ||
310 | * | ||
311 | * ->stop() with PERF_EF_UPDATE will read the counter and update | ||
312 | * period/count values like ->read() would. | ||
313 | * | ||
314 | * ->start() with PERF_EF_RELOAD will reprogram the the counter | ||
315 | * value, must be preceded by a ->stop() with PERF_EF_UPDATE. | ||
239 | */ | 316 | */ |
240 | void (*start) (struct perf_event *event, int flags); | 317 | void (*start) (struct perf_event *event, int flags); |
241 | void (*stop) (struct perf_event *event, int flags); | 318 | void (*stop) (struct perf_event *event, int flags); |
242 | 319 | ||
243 | /* | 320 | /* |
244 | * Updates the counter value of the event. | 321 | * Updates the counter value of the event. |
322 | * | ||
323 | * For sampling capable PMUs this will also update the software period | ||
324 | * hw_perf_event::period_left field. | ||
245 | */ | 325 | */ |
246 | void (*read) (struct perf_event *event); | 326 | void (*read) (struct perf_event *event); |
247 | 327 | ||
@@ -252,20 +332,26 @@ struct pmu { | |||
252 | * | 332 | * |
253 | * Start the transaction, after this ->add() doesn't need to | 333 | * Start the transaction, after this ->add() doesn't need to |
254 | * do schedulability tests. | 334 | * do schedulability tests. |
335 | * | ||
336 | * Optional. | ||
255 | */ | 337 | */ |
256 | void (*start_txn) (struct pmu *pmu); /* optional */ | 338 | void (*start_txn) (struct pmu *pmu, unsigned int txn_flags); |
257 | /* | 339 | /* |
258 | * If ->start_txn() disabled the ->add() schedulability test | 340 | * If ->start_txn() disabled the ->add() schedulability test |
259 | * then ->commit_txn() is required to perform one. On success | 341 | * then ->commit_txn() is required to perform one. On success |
260 | * the transaction is closed. On error the transaction is kept | 342 | * the transaction is closed. On error the transaction is kept |
261 | * open until ->cancel_txn() is called. | 343 | * open until ->cancel_txn() is called. |
344 | * | ||
345 | * Optional. | ||
262 | */ | 346 | */ |
263 | int (*commit_txn) (struct pmu *pmu); /* optional */ | 347 | int (*commit_txn) (struct pmu *pmu); |
264 | /* | 348 | /* |
265 | * Will cancel the transaction, assumes ->del() is called | 349 | * Will cancel the transaction, assumes ->del() is called |
266 | * for each successful ->add() during the transaction. | 350 | * for each successful ->add() during the transaction. |
351 | * | ||
352 | * Optional. | ||
267 | */ | 353 | */ |
268 | void (*cancel_txn) (struct pmu *pmu); /* optional */ | 354 | void (*cancel_txn) (struct pmu *pmu); |
269 | 355 | ||
270 | /* | 356 | /* |
271 | * Will return the value for perf_event_mmap_page::index for this event, | 357 | * Will return the value for perf_event_mmap_page::index for this event, |