aboutsummaryrefslogtreecommitdiffstats
path: root/include/nvgpu/clk_arb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/nvgpu/clk_arb.h')
-rw-r--r--include/nvgpu/clk_arb.h378
1 files changed, 0 insertions, 378 deletions
diff --git a/include/nvgpu/clk_arb.h b/include/nvgpu/clk_arb.h
deleted file mode 100644
index 43af631..0000000
--- a/include/nvgpu/clk_arb.h
+++ /dev/null
@@ -1,378 +0,0 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef NVGPU_CLK_ARB_H
24#define NVGPU_CLK_ARB_H
25
26struct gk20a;
27
28#include <nvgpu/types.h>
29#include <nvgpu/bitops.h>
30#include <nvgpu/lock.h>
31#include <nvgpu/kmem.h>
32#include <nvgpu/atomic.h>
33#include <nvgpu/bug.h>
34#include <nvgpu/kref.h>
35#include <nvgpu/log.h>
36#include <nvgpu/barrier.h>
37#include <nvgpu/cond.h>
38
39#include "clk/clk.h"
40#include "pstate/pstate.h"
41#include "lpwr/lpwr.h"
42#include "volt/volt.h"
43
44#define MAX_F_POINTS 256
45#define DEFAULT_EVENT_NUMBER 32
46
47struct nvgpu_clk_dev;
48struct nvgpu_clk_arb_target;
49struct nvgpu_clk_notification_queue;
50struct nvgpu_clk_session;
51
52#define VF_POINT_INVALID_PSTATE ~0U
53#define VF_POINT_SET_PSTATE_SUPPORTED(a, b) ((a)->pstates |= (1UL << (b)))
54#define VF_POINT_GET_PSTATE(a) (((a)->pstates) ?\
55 __fls((a)->pstates) :\
56 VF_POINT_INVALID_PSTATE)
57#define VF_POINT_COMMON_PSTATE(a, b) (((a)->pstates & (b)->pstates) ?\
58 __fls((a)->pstates & (b)->pstates) :\
59 VF_POINT_INVALID_PSTATE)
60
61/*
62 * These events, defined in common code are the counterparts of the uapi
63 * events. There should be a conversion function to take care to convert
64 * these to the uapi events.
65 */
66/* Event associated to a VF update */
67#define NVGPU_EVENT_VF_UPDATE 0
68
69/* Recoverable alarms (POLLPRI) */
70/* Alarm when target frequency on any session is not possible */
71#define NVGPU_EVENT_ALARM_TARGET_VF_NOT_POSSIBLE 1
72/* Alarm when target frequency on current session is not possible */
73#define NVGPU_EVENT_ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE 2
74/* Alarm when Clock Arbiter failed */
75#define NVGPU_EVENT_ALARM_CLOCK_ARBITER_FAILED 3
76/* Alarm when VF table update failed */
77#define NVGPU_EVENT_ALARM_VF_TABLE_UPDATE_FAILED 4
78/* Alarm on thermal condition */
79#define NVGPU_EVENT_ALARM_THERMAL_ABOVE_THRESHOLD 5
80/* Alarm on power condition */
81#define NVGPU_EVENT_ALARM_POWER_ABOVE_THRESHOLD 6
82
83/* Non recoverable alarm (POLLHUP) */
84/* Alarm on GPU shutdown/fall from bus */
85#define NVGPU_EVENT_ALARM_GPU_LOST 7
86
87#define NVGPU_EVENT_LAST NVGPU_EVENT_ALARM_GPU_LOST
88
89/* Local Alarms */
90#define EVENT(alarm) (0x1UL << NVGPU_EVENT_##alarm)
91
92#define LOCAL_ALARM_MASK (EVENT(ALARM_LOCAL_TARGET_VF_NOT_POSSIBLE) | \
93 EVENT(VF_UPDATE))
94
95#define _WRAPGTEQ(a, b) ((a-b) > 0)
96
97/*
98 * NVGPU_POLL* defines equivalent to the POLL* linux defines
99 */
100#define NVGPU_POLLIN (1 << 0)
101#define NVGPU_POLLPRI (1 << 1)
102#define NVGPU_POLLOUT (1 << 2)
103#define NVGPU_POLLRDNORM (1 << 3)
104#define NVGPU_POLLHUP (1 << 4)
105
106/* NVGPU_CLK_DOMAIN_* defines equivalent to NVGPU_GPU_CLK_DOMAIN_*
107 * defines in uapi header
108 */
109/* Memory clock */
110#define NVGPU_CLK_DOMAIN_MCLK (0)
111/* Main graphics core clock */
112#define NVGPU_CLK_DOMAIN_GPCCLK (1)
113
114#define NVGPU_CLK_DOMAIN_MAX (NVGPU_CLK_DOMAIN_GPCCLK)
115
116#define clk_arb_dbg(g, fmt, args...) \
117 do { \
118 nvgpu_log(g, gpu_dbg_clk_arb, \
119 fmt, ##args); \
120 } while (0)
121
122struct nvgpu_clk_notification {
123 u32 notification;
124 u64 timestamp;
125};
126
127struct nvgpu_clk_notification_queue {
128 u32 size;
129 nvgpu_atomic_t head;
130 nvgpu_atomic_t tail;
131 struct nvgpu_clk_notification *notifications;
132};
133
134struct nvgpu_clk_vf_point {
135 u16 pstates;
136 union {
137 struct {
138 u16 gpc_mhz;
139 u16 sys_mhz;
140 u16 xbar_mhz;
141 };
142 u16 mem_mhz;
143 };
144 u32 uvolt;
145 u32 uvolt_sram;
146};
147
148struct nvgpu_clk_vf_table {
149 u32 mclk_num_points;
150 struct nvgpu_clk_vf_point *mclk_points;
151 u32 gpc2clk_num_points;
152 struct nvgpu_clk_vf_point *gpc2clk_points;
153};
154#ifdef CONFIG_DEBUG_FS
155struct nvgpu_clk_arb_debug {
156 s64 switch_max;
157 s64 switch_min;
158 u64 switch_num;
159 s64 switch_avg;
160 s64 switch_std;
161};
162#endif
163
164struct nvgpu_clk_arb_target {
165 u16 mclk;
166 u16 gpc2clk;
167 u32 pstate;
168};
169
170enum clk_arb_work_item_type {
171 CLK_ARB_WORK_UPDATE_VF_TABLE,
172 CLK_ARB_WORK_UPDATE_ARB
173};
174
175struct nvgpu_clk_arb_work_item {
176 enum clk_arb_work_item_type item_type;
177 struct nvgpu_clk_arb *arb;
178 struct nvgpu_list_node worker_item;
179};
180
181struct nvgpu_clk_arb {
182 struct nvgpu_spinlock sessions_lock;
183 struct nvgpu_spinlock users_lock;
184 struct nvgpu_spinlock requests_lock;
185
186 struct nvgpu_mutex pstate_lock;
187 struct nvgpu_list_node users;
188 struct nvgpu_list_node sessions;
189 struct nvgpu_list_node requests;
190
191 struct gk20a *g;
192 int status;
193
194 struct nvgpu_clk_arb_target actual_pool[2];
195 struct nvgpu_clk_arb_target *actual;
196
197 u16 gpc2clk_default_mhz;
198 u16 mclk_default_mhz;
199 u32 voltuv_actual;
200
201 u16 gpc2clk_min, gpc2clk_max;
202 u16 mclk_min, mclk_max;
203
204 struct nvgpu_clk_arb_work_item update_vf_table_work_item;
205 struct nvgpu_clk_arb_work_item update_arb_work_item;
206
207 struct nvgpu_cond request_wq;
208
209 struct nvgpu_clk_vf_table *current_vf_table;
210 struct nvgpu_clk_vf_table vf_table_pool[2];
211 u32 vf_table_index;
212
213 u16 *mclk_f_points;
214 nvgpu_atomic_t req_nr;
215
216 u32 mclk_f_numpoints;
217 u16 *gpc2clk_f_points;
218 u32 gpc2clk_f_numpoints;
219
220 bool clk_arb_events_supported;
221
222 nvgpu_atomic64_t alarm_mask;
223 struct nvgpu_clk_notification_queue notification_queue;
224
225#ifdef CONFIG_DEBUG_FS
226 struct nvgpu_clk_arb_debug debug_pool[2];
227 struct nvgpu_clk_arb_debug *debug;
228 bool debugfs_set;
229#endif
230};
231
232struct nvgpu_clk_dev {
233 struct nvgpu_clk_session *session;
234 union {
235 struct nvgpu_list_node link;
236 struct nvgpu_list_node node;
237 };
238 struct nvgpu_cond readout_wq;
239 nvgpu_atomic_t poll_mask;
240 u16 gpc2clk_target_mhz;
241 u16 mclk_target_mhz;
242 u32 alarms_reported;
243 nvgpu_atomic_t enabled_mask;
244 struct nvgpu_clk_notification_queue queue;
245 u32 arb_queue_head;
246 struct nvgpu_ref refcount;
247};
248
249struct nvgpu_clk_session {
250 bool zombie;
251 struct gk20a *g;
252 struct nvgpu_ref refcount;
253 struct nvgpu_list_node link;
254 struct nvgpu_list_node targets;
255
256 struct nvgpu_spinlock session_lock;
257 struct nvgpu_clk_arb_target target_pool[2];
258 struct nvgpu_clk_arb_target *target;
259};
260
261static inline struct nvgpu_clk_session *
262nvgpu_clk_session_from_link(struct nvgpu_list_node *node)
263{
264 return (struct nvgpu_clk_session *)
265 ((uintptr_t)node - offsetof(struct nvgpu_clk_session, link));
266};
267
268static inline struct nvgpu_clk_dev *
269nvgpu_clk_dev_from_node(struct nvgpu_list_node *node)
270{
271 return (struct nvgpu_clk_dev *)
272 ((uintptr_t)node - offsetof(struct nvgpu_clk_dev, node));
273};
274
275static inline struct nvgpu_clk_dev *
276nvgpu_clk_dev_from_link(struct nvgpu_list_node *node)
277{
278 return (struct nvgpu_clk_dev *)
279 ((uintptr_t)node - offsetof(struct nvgpu_clk_dev, link));
280};
281
282static inline struct nvgpu_clk_arb_work_item *
283nvgpu_clk_arb_work_item_from_worker_item(struct nvgpu_list_node *node)
284{
285 return (struct nvgpu_clk_arb_work_item *)
286 ((uintptr_t)node - offsetof(struct nvgpu_clk_arb_work_item, worker_item));
287};
288
289void nvgpu_clk_arb_worker_enqueue(struct gk20a *g,
290 struct nvgpu_clk_arb_work_item *work_item);
291
292int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb);
293
294int nvgpu_clk_arb_worker_init(struct gk20a *g);
295
296int nvgpu_clk_arb_init_arbiter(struct gk20a *g);
297
298bool nvgpu_clk_arb_has_active_req(struct gk20a *g);
299
300int nvgpu_clk_arb_get_arbiter_clk_range(struct gk20a *g, u32 api_domain,
301 u16 *min_mhz, u16 *max_mhz);
302
303int nvgpu_clk_arb_get_arbiter_actual_mhz(struct gk20a *g,
304 u32 api_domain, u16 *actual_mhz);
305
306int nvgpu_clk_arb_get_arbiter_effective_mhz(struct gk20a *g,
307 u32 api_domain, u16 *effective_mhz);
308
309int nvgpu_clk_arb_get_arbiter_clk_f_points(struct gk20a *g,
310 u32 api_domain, u32 *max_points, u16 *fpoints);
311
312u32 nvgpu_clk_arb_get_arbiter_clk_domains(struct gk20a *g);
313bool nvgpu_clk_arb_is_valid_domain(struct gk20a *g, u32 api_domain);
314
315void nvgpu_clk_arb_cleanup_arbiter(struct gk20a *g);
316
317int nvgpu_clk_arb_install_session_fd(struct gk20a *g,
318 struct nvgpu_clk_session *session);
319
320int nvgpu_clk_arb_init_session(struct gk20a *g,
321 struct nvgpu_clk_session **_session);
322
323void nvgpu_clk_arb_release_session(struct gk20a *g,
324 struct nvgpu_clk_session *session);
325
326int nvgpu_clk_arb_commit_request_fd(struct gk20a *g,
327 struct nvgpu_clk_session *session, int request_fd);
328
329int nvgpu_clk_arb_set_session_target_mhz(struct nvgpu_clk_session *session,
330 int fd, u32 api_domain, u16 target_mhz);
331
332int nvgpu_clk_arb_get_session_target_mhz(struct nvgpu_clk_session *session,
333 u32 api_domain, u16 *target_mhz);
334
335int nvgpu_clk_arb_install_event_fd(struct gk20a *g,
336 struct nvgpu_clk_session *session, int *event_fd, u32 alarm_mask);
337
338int nvgpu_clk_arb_install_request_fd(struct gk20a *g,
339 struct nvgpu_clk_session *session, int *event_fd);
340
341void nvgpu_clk_arb_schedule_vf_table_update(struct gk20a *g);
342
343int nvgpu_clk_arb_get_current_pstate(struct gk20a *g);
344
345void nvgpu_clk_arb_pstate_change_lock(struct gk20a *g, bool lock);
346
347void nvgpu_clk_arb_send_thermal_alarm(struct gk20a *g);
348
349void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm);
350
351void nvgpu_clk_arb_schedule_alarm(struct gk20a *g, u32 alarm);
352
353void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm);
354
355void nvgpu_clk_arb_free_session(struct nvgpu_ref *refcount);
356
357void nvgpu_clk_arb_free_fd(struct nvgpu_ref *refcount);
358
359u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
360 struct nvgpu_clk_arb_target *target,
361 u32 alarm);
362
363int nvgpu_clk_notification_queue_alloc(struct gk20a *g,
364 struct nvgpu_clk_notification_queue *queue,
365 size_t events_number);
366
367void nvgpu_clk_notification_queue_free(struct gk20a *g,
368 struct nvgpu_clk_notification_queue *queue);
369
370void nvgpu_clk_arb_event_post_event(struct nvgpu_clk_dev *dev);
371
372unsigned long nvgpu_clk_measure_freq(struct gk20a *g, u32 api_domain);
373
374#ifdef CONFIG_DEBUG_FS
375int nvgpu_clk_arb_debugfs_init(struct gk20a *g);
376#endif
377#endif /* NVGPU_CLK_ARB_H */
378