summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gk20a.h
diff options
context:
space:
mode:
authorArto Merilainen <amerilainen@nvidia.com>2014-03-19 03:38:25 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:08:53 -0400
commita9785995d5f22aaeb659285f8aeb64d8b56982e0 (patch)
treecc75f75bcf43db316a002a7a240b81f299bf6d7f /drivers/gpu/nvgpu/gk20a/gk20a.h
parent61efaf843c22b85424036ec98015121c08f5f16c (diff)
gpu: nvgpu: Add NVIDIA GPU Driver
This patch moves the NVIDIA GPU driver to a new location. Bug 1482562 Change-Id: I24293810b9d0f1504fd9be00135e21dad656ccb6 Signed-off-by: Arto Merilainen <amerilainen@nvidia.com> Reviewed-on: http://git-master/r/383722 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h559
1 files changed, 559 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
new file mode 100644
index 00000000..a9081a9d
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -0,0 +1,559 @@
1/*
2 * drivers/video/tegra/host/gk20a/gk20a.h
3 *
4 * GK20A Graphics
5 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21#ifndef _NVHOST_GK20A_H_
22#define _NVHOST_GK20A_H_
23
24
25struct gk20a;
26struct fifo_gk20a;
27struct channel_gk20a;
28struct gr_gk20a;
29struct sim_gk20a;
30
31#include <linux/sched.h>
32#include <linux/spinlock.h>
33#include <linux/nvhost_gpu_ioctl.h>
34#include <linux/tegra-soc.h>
35
36#include "../../../arch/arm/mach-tegra/iomap.h"
37
38#include "as_gk20a.h"
39#include "clk_gk20a.h"
40#include "fifo_gk20a.h"
41#include "gr_gk20a.h"
42#include "sim_gk20a.h"
43#include "pmu_gk20a.h"
44#include "priv_ring_gk20a.h"
45#include "therm_gk20a.h"
46#include "platform_gk20a.h"
47
48extern struct platform_device tegra_gk20a_device;
49
50bool is_gk20a_module(struct platform_device *dev);
51
52struct cooling_device_gk20a {
53 struct thermal_cooling_device *gk20a_cooling_dev;
54 unsigned int gk20a_freq_state;
55 unsigned int gk20a_freq_table_size;
56 struct gk20a *g;
57};
58
59struct gpu_ops {
60 struct {
61 int (*determine_L2_size_bytes)(struct gk20a *gk20a);
62 void (*set_max_ways_evict_last)(struct gk20a *g, u32 max_ways);
63 int (*init_comptags)(struct gk20a *g, struct gr_gk20a *gr);
64 int (*clear_comptags)(struct gk20a *g, u32 min, u32 max);
65 void (*set_zbc_color_entry)(struct gk20a *g,
66 struct zbc_entry *color_val,
67 u32 index);
68 void (*set_zbc_depth_entry)(struct gk20a *g,
69 struct zbc_entry *depth_val,
70 u32 index);
71 void (*clear_zbc_color_entry)(struct gk20a *g, u32 index);
72 void (*clear_zbc_depth_entry)(struct gk20a *g, u32 index);
73 int (*init_zbc)(struct gk20a *g, struct gr_gk20a *gr);
74 void (*init_cbc)(struct gk20a *g, struct gr_gk20a *gr);
75 void (*sync_debugfs)(struct gk20a *g);
76 void (*elpg_flush)(struct gk20a *g);
77 } ltc;
78 struct {
79 int (*init_fs_state)(struct gk20a *g);
80 void (*access_smpc_reg)(struct gk20a *g, u32 quad, u32 offset);
81 void (*bundle_cb_defaults)(struct gk20a *g);
82 void (*cb_size_default)(struct gk20a *g);
83 int (*calc_global_ctx_buffer_size)(struct gk20a *g);
84 void (*commit_global_attrib_cb)(struct gk20a *g,
85 struct channel_ctx_gk20a *ch_ctx,
86 u64 addr, bool patch);
87 void (*commit_global_bundle_cb)(struct gk20a *g,
88 struct channel_ctx_gk20a *ch_ctx,
89 u64 addr, u64 size, bool patch);
90 int (*commit_global_cb_manager)(struct gk20a *g,
91 struct channel_gk20a *ch,
92 bool patch);
93 void (*commit_global_pagepool)(struct gk20a *g,
94 struct channel_ctx_gk20a *ch_ctx,
95 u64 addr, u32 size, bool patch);
96 void (*init_gpc_mmu)(struct gk20a *g);
97 int (*handle_sw_method)(struct gk20a *g, u32 addr,
98 u32 class_num, u32 offset, u32 data);
99 void (*set_alpha_circular_buffer_size)(struct gk20a *g,
100 u32 data);
101 void (*set_circular_buffer_size)(struct gk20a *g, u32 data);
102 void (*enable_hww_exceptions)(struct gk20a *g);
103 bool (*is_valid_class)(struct gk20a *g, u32 class_num);
104 void (*get_sm_dsm_perf_regs)(struct gk20a *g,
105 u32 *num_sm_dsm_perf_regs,
106 u32 **sm_dsm_perf_regs,
107 u32 *perf_register_stride);
108 void (*get_sm_dsm_perf_ctrl_regs)(struct gk20a *g,
109 u32 *num_sm_dsm_perf_regs,
110 u32 **sm_dsm_perf_regs,
111 u32 *perf_register_stride);
112 void (*set_hww_esr_report_mask)(struct gk20a *g);
113 int (*setup_alpha_beta_tables)(struct gk20a *g,
114 struct gr_gk20a *gr);
115 } gr;
116 const char *name;
117 struct {
118 void (*init_fs_state)(struct gk20a *g);
119 void (*reset)(struct gk20a *g);
120 void (*init_uncompressed_kind_map)(struct gk20a *g);
121 void (*init_kind_attr)(struct gk20a *g);
122 } fb;
123 struct {
124 void (*slcg_gr_load_gating_prod)(struct gk20a *g, bool prod);
125 void (*slcg_perf_load_gating_prod)(struct gk20a *g, bool prod);
126 void (*blcg_gr_load_gating_prod)(struct gk20a *g, bool prod);
127 void (*pg_gr_load_gating_prod)(struct gk20a *g, bool prod);
128 void (*slcg_therm_load_gating_prod)(struct gk20a *g, bool prod);
129 } clock_gating;
130 struct {
131 void (*bind_channel)(struct channel_gk20a *ch_gk20a);
132 } fifo;
133 struct pmu_v {
134 /*used for change of enum zbc update cmd id from ver 0 to ver1*/
135 u32 cmd_id_zbc_table_update;
136 u32 (*get_pmu_cmdline_args_size)(struct pmu_gk20a *pmu);
137 void (*set_pmu_cmdline_args_cpu_freq)(struct pmu_gk20a *pmu,
138 u32 freq);
139 void * (*get_pmu_cmdline_args_ptr)(struct pmu_gk20a *pmu);
140 u32 (*get_pmu_allocation_struct_size)(struct pmu_gk20a *pmu);
141 void (*set_pmu_allocation_ptr)(struct pmu_gk20a *pmu,
142 void **pmu_alloc_ptr, void *assign_ptr);
143 void (*pmu_allocation_set_dmem_size)(struct pmu_gk20a *pmu,
144 void *pmu_alloc_ptr, u16 size);
145 u16 (*pmu_allocation_get_dmem_size)(struct pmu_gk20a *pmu,
146 void *pmu_alloc_ptr);
147 u32 (*pmu_allocation_get_dmem_offset)(struct pmu_gk20a *pmu,
148 void *pmu_alloc_ptr);
149 u32 * (*pmu_allocation_get_dmem_offset_addr)(
150 struct pmu_gk20a *pmu, void *pmu_alloc_ptr);
151 void (*pmu_allocation_set_dmem_offset)(struct pmu_gk20a *pmu,
152 void *pmu_alloc_ptr, u32 offset);
153 void (*get_pmu_init_msg_pmu_queue_params)(
154 struct pmu_queue *queue, u32 id,
155 void *pmu_init_msg);
156 void *(*get_pmu_msg_pmu_init_msg_ptr)(
157 struct pmu_init_msg *init);
158 u16 (*get_pmu_init_msg_pmu_sw_mg_off)(
159 union pmu_init_msg_pmu *init_msg);
160 u16 (*get_pmu_init_msg_pmu_sw_mg_size)(
161 union pmu_init_msg_pmu *init_msg);
162 u32 (*get_pmu_perfmon_cmd_start_size)(void);
163 int (*get_perfmon_cmd_start_offsetofvar)(
164 enum pmu_perfmon_cmd_start_fields field);
165 void (*perfmon_start_set_cmd_type)(struct pmu_perfmon_cmd *pc,
166 u8 value);
167 void (*perfmon_start_set_group_id)(struct pmu_perfmon_cmd *pc,
168 u8 value);
169 void (*perfmon_start_set_state_id)(struct pmu_perfmon_cmd *pc,
170 u8 value);
171 void (*perfmon_start_set_flags)(struct pmu_perfmon_cmd *pc,
172 u8 value);
173 u8 (*perfmon_start_get_flags)(struct pmu_perfmon_cmd *pc);
174 u32 (*get_pmu_perfmon_cmd_init_size)(void);
175 int (*get_perfmon_cmd_init_offsetofvar)(
176 enum pmu_perfmon_cmd_start_fields field);
177 void (*perfmon_cmd_init_set_sample_buffer)(
178 struct pmu_perfmon_cmd *pc, u16 value);
179 void (*perfmon_cmd_init_set_dec_cnt)(
180 struct pmu_perfmon_cmd *pc, u8 value);
181 void (*perfmon_cmd_init_set_base_cnt_id)(
182 struct pmu_perfmon_cmd *pc, u8 value);
183 void (*perfmon_cmd_init_set_samp_period_us)(
184 struct pmu_perfmon_cmd *pc, u32 value);
185 void (*perfmon_cmd_init_set_num_cnt)(struct pmu_perfmon_cmd *pc,
186 u8 value);
187 void (*perfmon_cmd_init_set_mov_avg)(struct pmu_perfmon_cmd *pc,
188 u8 value);
189 void *(*get_pmu_seq_in_a_ptr)(
190 struct pmu_sequence *seq);
191 void *(*get_pmu_seq_out_a_ptr)(
192 struct pmu_sequence *seq);
193 } pmu_ver;
194};
195
196struct gk20a {
197 struct platform_device *dev;
198
199 struct resource *reg_mem;
200 void __iomem *regs;
201
202 struct resource *bar1_mem;
203 void __iomem *bar1;
204
205 bool power_on;
206 bool irq_requested;
207
208 struct clk_gk20a clk;
209 struct fifo_gk20a fifo;
210 struct gr_gk20a gr;
211 struct sim_gk20a sim;
212 struct mm_gk20a mm;
213 struct pmu_gk20a pmu;
214 struct cooling_device_gk20a gk20a_cdev;
215
216 /* Save pmu fw here so that it lives cross suspend/resume.
217 pmu suspend destroys all pmu sw/hw states. Loading pmu
218 fw in resume crashes when the resume is from sys_exit. */
219 const struct firmware *pmu_fw;
220
221 u32 gr_idle_timeout_default;
222 u32 timeouts_enabled;
223
224 bool slcg_enabled;
225 bool blcg_enabled;
226 bool elcg_enabled;
227 bool elpg_enabled;
228 bool aelpg_enabled;
229
230#ifdef CONFIG_DEBUG_FS
231 spinlock_t debugfs_lock;
232 struct dentry *debugfs_ltc_enabled;
233 struct dentry *debugfs_timeouts_enabled;
234 struct dentry *debugfs_gr_idle_timeout_default;
235#endif
236 struct gk20a_ctxsw_ucode_info ctxsw_ucode_info;
237
238 /* held while manipulating # of debug/profiler sessions present */
239 /* also prevents debug sessions from attaching until released */
240 struct mutex dbg_sessions_lock;
241 int dbg_sessions; /* number attached */
242 int dbg_powergating_disabled_refcount; /*refcount for pg disable */
243
244 void (*remove_support)(struct platform_device *);
245
246 u64 pg_ingating_time_us;
247 u64 pg_ungating_time_us;
248 u32 pg_gating_cnt;
249
250 spinlock_t mc_enable_lock;
251
252 struct nvhost_gpu_characteristics gpu_characteristics;
253
254 struct {
255 struct cdev cdev;
256 struct device *node;
257 } channel;
258
259 struct gk20a_as as;
260
261 struct {
262 struct cdev cdev;
263 struct device *node;
264 } ctrl;
265
266 struct {
267 struct cdev cdev;
268 struct device *node;
269 } dbg;
270
271 struct {
272 struct cdev cdev;
273 struct device *node;
274 } prof;
275
276 struct mutex client_lock;
277 int client_refcount; /* open channels and ctrl nodes */
278
279 dev_t cdev_region;
280 struct class *class;
281
282 struct gpu_ops ops;
283
284 int irq_stall;
285 int irq_nonstall;
286
287 struct generic_pm_domain pd;
288
289 struct devfreq *devfreq;
290
291 struct gk20a_scale_profile *scale_profile;
292
293 struct device_dma_parameters dma_parms;
294};
295
296static inline unsigned long gk20a_get_gr_idle_timeout(struct gk20a *g)
297{
298 return g->timeouts_enabled ?
299 g->gr_idle_timeout_default : MAX_SCHEDULE_TIMEOUT;
300}
301
302static inline struct gk20a *get_gk20a(struct platform_device *dev)
303{
304 return gk20a_get_platform(dev)->g;
305}
306
307enum BAR0_DEBUG_OPERATION {
308 BARO_ZERO_NOP = 0,
309 OP_END = 'DONE',
310 BAR0_READ32 = '0R32',
311 BAR0_WRITE32 = '0W32',
312};
313
314struct share_buffer_head {
315 enum BAR0_DEBUG_OPERATION operation;
316/* size of the operation item */
317 u32 size;
318 u32 completed;
319 u32 failed;
320 u64 context;
321 u64 completion_callback;
322};
323
324struct gk20a_cyclestate_buffer_elem {
325 struct share_buffer_head head;
326/* in */
327 u64 p_data;
328 u64 p_done;
329 u32 offset_bar0;
330 u16 first_bit;
331 u16 last_bit;
332/* out */
333/* keep 64 bits to be consistent */
334 u64 data;
335};
336
337/* debug accessories */
338
339#ifdef CONFIG_DEBUG_FS
340 /* debug info, default is compiled-in but effectively disabled (0 mask) */
341 #define GK20A_DEBUG
342 /*e.g: echo 1 > /d/tegra_host/dbg_mask */
343 #define GK20A_DEFAULT_DBG_MASK 0
344#else
345 /* manually enable and turn it on the mask */
346 /*#define NVHOST_DEBUG*/
347 #define GK20A_DEFAULT_DBG_MASK (dbg_info)
348#endif
349
350enum gk20a_dbg_categories {
351 gpu_dbg_info = BIT(0), /* lightly verbose info */
352 gpu_dbg_fn = BIT(2), /* fn name tracing */
353 gpu_dbg_reg = BIT(3), /* register accesses, very verbose */
354 gpu_dbg_pte = BIT(4), /* gmmu ptes */
355 gpu_dbg_intr = BIT(5), /* interrupts */
356 gpu_dbg_pmu = BIT(6), /* gk20a pmu */
357 gpu_dbg_clk = BIT(7), /* gk20a clk */
358 gpu_dbg_map = BIT(8), /* mem mappings */
359 gpu_dbg_gpu_dbg = BIT(9), /* gpu debugger/profiler */
360 gpu_dbg_mem = BIT(31), /* memory accesses, very verbose */
361};
362
363#if defined(GK20A_DEBUG)
364extern u32 gk20a_dbg_mask;
365extern u32 gk20a_dbg_ftrace;
366#define gk20a_dbg(dbg_mask, format, arg...) \
367do { \
368 if (unlikely((dbg_mask) & gk20a_dbg_mask)) { \
369 if (gk20a_dbg_ftrace) \
370 trace_printk(format "\n", ##arg); \
371 else \
372 pr_info("gk20a %s: " format "\n", \
373 __func__, ##arg); \
374 } \
375} while (0)
376
377#else /* GK20A_DEBUG */
378#define gk20a_dbg(dbg_mask, format, arg...) \
379do { \
380 if (0) \
381 pr_info("gk20a %s: " format "\n", __func__, ##arg);\
382} while (0)
383
384#endif
385
386#define gk20a_err(d, fmt, arg...) \
387 dev_err(d, "%s: " fmt "\n", __func__, ##arg)
388
389#define gk20a_warn(d, fmt, arg...) \
390 dev_warn(d, "%s: " fmt "\n", __func__, ##arg)
391
392#define gk20a_dbg_fn(fmt, arg...) \
393 gk20a_dbg(gpu_dbg_fn, fmt, ##arg)
394
395#define gk20a_dbg_info(fmt, arg...) \
396 gk20a_dbg(gpu_dbg_info, fmt, ##arg)
397
398/* mem access with dbg_mem logging */
399static inline u8 gk20a_mem_rd08(void *ptr, int b)
400{
401 u8 _b = ((const u8 *)ptr)[b];
402#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
403 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr+sizeof(u8)*b, _b);
404#endif
405 return _b;
406}
407static inline u16 gk20a_mem_rd16(void *ptr, int s)
408{
409 u16 _s = ((const u16 *)ptr)[s];
410#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
411 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr+sizeof(u16)*s, _s);
412#endif
413 return _s;
414}
415static inline u32 gk20a_mem_rd32(void *ptr, int w)
416{
417 u32 _w = ((const u32 *)ptr)[w];
418#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
419 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + sizeof(u32)*w, _w);
420#endif
421 return _w;
422}
423static inline void gk20a_mem_wr08(void *ptr, int b, u8 data)
424{
425#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
426 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr+sizeof(u8)*b, data);
427#endif
428 ((u8 *)ptr)[b] = data;
429}
430static inline void gk20a_mem_wr16(void *ptr, int s, u16 data)
431{
432#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
433 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr+sizeof(u16)*s, data);
434#endif
435 ((u16 *)ptr)[s] = data;
436}
437static inline void gk20a_mem_wr32(void *ptr, int w, u32 data)
438{
439#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
440 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr+sizeof(u32)*w, data);
441#endif
442 ((u32 *)ptr)[w] = data;
443}
444
445/* register accessors */
446static inline void gk20a_writel(struct gk20a *g, u32 r, u32 v)
447{
448 gk20a_dbg(gpu_dbg_reg, " r=0x%x v=0x%x", r, v);
449 writel(v, g->regs + r);
450}
451static inline u32 gk20a_readl(struct gk20a *g, u32 r)
452{
453 u32 v = readl(g->regs + r);
454 gk20a_dbg(gpu_dbg_reg, " r=0x%x v=0x%x", r, v);
455 return v;
456}
457
458static inline void gk20a_bar1_writel(struct gk20a *g, u32 b, u32 v)
459{
460 gk20a_dbg(gpu_dbg_reg, " b=0x%x v=0x%x", b, v);
461 writel(v, g->bar1 + b);
462}
463
464static inline u32 gk20a_bar1_readl(struct gk20a *g, u32 b)
465{
466 u32 v = readl(g->bar1 + b);
467 gk20a_dbg(gpu_dbg_reg, " b=0x%x v=0x%x", b, v);
468 return v;
469}
470
471/* convenience */
472static inline struct device *dev_from_gk20a(struct gk20a *g)
473{
474 return &g->dev->dev;
475}
476static inline struct gk20a *gk20a_from_as(struct gk20a_as *as)
477{
478 return container_of(as, struct gk20a, as);
479}
480static inline u32 u64_hi32(u64 n)
481{
482 return (u32)((n >> 32) & ~(u32)0);
483}
484
485static inline u32 u64_lo32(u64 n)
486{
487 return (u32)(n & ~(u32)0);
488}
489
490static inline u32 set_field(u32 val, u32 mask, u32 field)
491{
492 return ((val & ~mask) | field);
493}
494
495/* invalidate channel lookup tlb */
496static inline void gk20a_gr_flush_channel_tlb(struct gr_gk20a *gr)
497{
498 spin_lock(&gr->ch_tlb_lock);
499 memset(gr->chid_tlb, 0,
500 sizeof(struct gr_channel_map_tlb_entry) *
501 GR_CHANNEL_MAP_TLB_SIZE);
502 spin_unlock(&gr->ch_tlb_lock);
503}
504
505/* classes that the device supports */
506/* TBD: get these from an open-sourced SDK? */
507enum {
508 KEPLER_C = 0xA297,
509 FERMI_TWOD_A = 0x902D,
510 KEPLER_COMPUTE_A = 0xA0C0,
511 KEPLER_INLINE_TO_MEMORY_A = 0xA040,
512 KEPLER_DMA_COPY_A = 0xA0B5, /*not sure about this one*/
513};
514
515#if defined(CONFIG_GK20A_PMU)
516static inline int support_gk20a_pmu(void)
517{
518 return 1;
519}
520#else
521static inline int support_gk20a_pmu(void){return 0;}
522#endif
523
524void gk20a_create_sysfs(struct platform_device *dev);
525
526#ifdef CONFIG_DEBUG_FS
527int clk_gk20a_debugfs_init(struct platform_device *dev);
528#endif
529
530#define GK20A_BAR0_IORESOURCE_MEM 0
531#define GK20A_BAR1_IORESOURCE_MEM 1
532#define GK20A_SIM_IORESOURCE_MEM 2
533
534void gk20a_busy_noresume(struct platform_device *pdev);
535int gk20a_busy(struct platform_device *pdev);
536void gk20a_idle(struct platform_device *pdev);
537int gk20a_channel_busy(struct platform_device *pdev);
538void gk20a_channel_idle(struct platform_device *pdev);
539void gk20a_disable(struct gk20a *g, u32 units);
540void gk20a_enable(struct gk20a *g, u32 units);
541void gk20a_reset(struct gk20a *g, u32 units);
542int gk20a_get_client(struct gk20a *g);
543void gk20a_put_client(struct gk20a *g);
544
545const struct firmware *
546gk20a_request_firmware(struct gk20a *g, const char *fw_name);
547
548#define NVHOST_GPU_ARCHITECTURE_SHIFT 4
549
550/* constructs unique and compact GPUID from nvhost_gpu_characteristics
551 * arch/impl fields */
552#define GK20A_GPUID(arch, impl) ((u32) ((arch) | (impl)))
553
554#define GK20A_GPUID_GK20A \
555 GK20A_GPUID(NVHOST_GPU_ARCH_GK100, NVHOST_GPU_IMPL_GK20A)
556
557int gk20a_init_gpu_characteristics(struct gk20a *g);
558
559#endif /* _NVHOST_GK20A_H_ */