summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/driver_common.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 15:59:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 20:47:31 -0400
commit2a2c16af5f9f1ccfc93a13e820d5381e5c881e92 (patch)
tree2e5d7b042270a649978e5bb540857012c85fb5b5 /drivers/gpu/nvgpu/common/linux/driver_common.c
parent98d996f4ffb0137d119b5849cae46d7b7e5693e1 (diff)
gpu: nvgpu: Move Linux files away from common
Move all Linux source code files to drivers/gpu/nvgpu/os/linux from drivers/gpu/nvgpu/common/linux. This changes the meaning of common to be OS independent. JIRA NVGPU-598 JIRA NVGPU-601 Change-Id: Ib7f2a43d3688bb0d0b7dcc48469a6783fd988ce9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1747714 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/driver_common.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c334
1 files changed, 0 insertions, 334 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
deleted file mode 100644
index 8f33c5d2..00000000
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ /dev/null
@@ -1,334 +0,0 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/reboot.h>
18#include <linux/dma-mapping.h>
19#include <linux/mm.h>
20#include <uapi/linux/nvgpu.h>
21
22#include <nvgpu/defaults.h>
23#include <nvgpu/kmem.h>
24#include <nvgpu/nvgpu_common.h>
25#include <nvgpu/soc.h>
26#include <nvgpu/bug.h>
27#include <nvgpu/enabled.h>
28#include <nvgpu/debug.h>
29#include <nvgpu/sizes.h>
30
31#include "gk20a/gk20a.h"
32#include "platform_gk20a.h"
33#include "module.h"
34#include "os_linux.h"
35#include "sysfs.h"
36#include "ioctl.h"
37#include "gk20a/regops_gk20a.h"
38
39#define EMC3D_DEFAULT_RATIO 750
40
41void nvgpu_kernel_restart(void *cmd)
42{
43 kernel_restart(cmd);
44}
45
46static void nvgpu_init_vars(struct gk20a *g)
47{
48 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
49 struct device *dev = dev_from_gk20a(g);
50 struct gk20a_platform *platform = dev_get_drvdata(dev);
51
52 nvgpu_cond_init(&l->sw_irq_stall_last_handled_wq);
53 nvgpu_cond_init(&l->sw_irq_nonstall_last_handled_wq);
54
55 init_rwsem(&l->busy_lock);
56 nvgpu_rwsem_init(&g->deterministic_busy);
57
58 nvgpu_spinlock_init(&g->mc_enable_lock);
59
60 nvgpu_mutex_init(&platform->railgate_lock);
61 nvgpu_mutex_init(&g->dbg_sessions_lock);
62 nvgpu_mutex_init(&g->client_lock);
63 nvgpu_mutex_init(&g->poweron_lock);
64 nvgpu_mutex_init(&g->poweroff_lock);
65 nvgpu_mutex_init(&g->ctxsw_disable_lock);
66
67 l->regs_saved = l->regs;
68 l->bar1_saved = l->bar1;
69
70 g->emc3d_ratio = EMC3D_DEFAULT_RATIO;
71
72 /* Set DMA parameters to allow larger sgt lists */
73 dev->dma_parms = &l->dma_parms;
74 dma_set_max_seg_size(dev, UINT_MAX);
75
76 /*
77 * A default of 16GB is the largest supported DMA size that is
78 * acceptable to all currently supported Tegra SoCs.
79 */
80 if (!platform->dma_mask)
81 platform->dma_mask = DMA_BIT_MASK(34);
82
83 dma_set_mask(dev, platform->dma_mask);
84 dma_set_coherent_mask(dev, platform->dma_mask);
85
86 nvgpu_init_list_node(&g->profiler_objects);
87
88 nvgpu_init_list_node(&g->boardobj_head);
89 nvgpu_init_list_node(&g->boardobjgrp_head);
90}
91
92static void nvgpu_init_gr_vars(struct gk20a *g)
93{
94 gk20a_init_gr(g);
95
96 nvgpu_log_info(g, "total ram pages : %lu", totalram_pages);
97 g->gr.max_comptag_mem = totalram_pages
98 >> (10 - (PAGE_SHIFT - 10));
99}
100
101static void nvgpu_init_timeout(struct gk20a *g)
102{
103 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
104
105 g->timeouts_disabled_by_user = false;
106 nvgpu_atomic_set(&g->timeouts_disabled_refcount, 0);
107
108 if (nvgpu_platform_is_silicon(g)) {
109 g->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT;
110 } else if (nvgpu_platform_is_fpga(g)) {
111 g->gr_idle_timeout_default = GK20A_TIMEOUT_FPGA;
112 } else {
113 g->gr_idle_timeout_default = (u32)ULONG_MAX;
114 }
115 g->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms;
116 g->fifo_eng_timeout_us = GRFIFO_TIMEOUT_CHECK_PERIOD_US;
117}
118
119static void nvgpu_init_timeslice(struct gk20a *g)
120{
121 g->runlist_interleave = true;
122
123 g->timeslice_low_priority_us = 1300;
124 g->timeslice_medium_priority_us = 2600;
125 g->timeslice_high_priority_us = 5200;
126
127 g->min_timeslice_us = 1000;
128 g->max_timeslice_us = 50000;
129}
130
131static void nvgpu_init_pm_vars(struct gk20a *g)
132{
133 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
134
135 /*
136 * Set up initial power settings. For non-slicon platforms, disable
137 * power features and for silicon platforms, read from platform data
138 */
139 g->slcg_enabled =
140 nvgpu_platform_is_silicon(g) ? platform->enable_slcg : false;
141 g->blcg_enabled =
142 nvgpu_platform_is_silicon(g) ? platform->enable_blcg : false;
143 g->elcg_enabled =
144 nvgpu_platform_is_silicon(g) ? platform->enable_elcg : false;
145 g->elpg_enabled =
146 nvgpu_platform_is_silicon(g) ? platform->enable_elpg : false;
147 g->aelpg_enabled =
148 nvgpu_platform_is_silicon(g) ? platform->enable_aelpg : false;
149 g->mscg_enabled =
150 nvgpu_platform_is_silicon(g) ? platform->enable_mscg : false;
151 g->can_elpg =
152 nvgpu_platform_is_silicon(g) ? platform->can_elpg_init : false;
153
154 __nvgpu_set_enabled(g, NVGPU_GPU_CAN_ELCG,
155 nvgpu_platform_is_silicon(g) ? platform->can_elcg : false);
156 __nvgpu_set_enabled(g, NVGPU_GPU_CAN_SLCG,
157 nvgpu_platform_is_silicon(g) ? platform->can_slcg : false);
158 __nvgpu_set_enabled(g, NVGPU_GPU_CAN_BLCG,
159 nvgpu_platform_is_silicon(g) ? platform->can_blcg : false);
160
161 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
162 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
163 g->has_syncpoints = platform->has_syncpoints;
164#ifdef CONFIG_NVGPU_SUPPORT_CDE
165 g->has_cde = platform->has_cde;
166#endif
167 g->ptimer_src_freq = platform->ptimer_src_freq;
168 g->support_pmu = support_gk20a_pmu(dev_from_gk20a(g));
169 g->can_railgate = platform->can_railgate_init;
170 g->ldiv_slowdown_factor = platform->ldiv_slowdown_factor_init;
171 /* if default delay is not set, set default delay to 500msec */
172 if (platform->railgate_delay_init)
173 g->railgate_delay = platform->railgate_delay_init;
174 else
175 g->railgate_delay = NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT;
176 __nvgpu_set_enabled(g, NVGPU_PMU_PERFMON, platform->enable_perfmon);
177
178 /* set default values to aelpg parameters */
179 g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
180 g->pmu.aelpg_param[1] = APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US;
181 g->pmu.aelpg_param[2] = APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US;
182 g->pmu.aelpg_param[3] = APCTRL_POWER_BREAKEVEN_DEFAULT_US;
183 g->pmu.aelpg_param[4] = APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT;
184
185 __nvgpu_set_enabled(g, NVGPU_SUPPORT_ASPM, !platform->disable_aspm);
186}
187
188static void nvgpu_init_vbios_vars(struct gk20a *g)
189{
190 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
191
192 __nvgpu_set_enabled(g, NVGPU_PMU_RUN_PREOS, platform->run_preos);
193 g->vbios_min_version = platform->vbios_min_version;
194}
195
196static void nvgpu_init_ltc_vars(struct gk20a *g)
197{
198 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
199
200 g->ltc_streamid = platform->ltc_streamid;
201}
202
203static void nvgpu_init_mm_vars(struct gk20a *g)
204{
205 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
206
207 g->mm.disable_bigpage = platform->disable_bigpage;
208 __nvgpu_set_enabled(g, NVGPU_MM_HONORS_APERTURE,
209 platform->honors_aperture);
210 __nvgpu_set_enabled(g, NVGPU_MM_UNIFIED_MEMORY,
211 platform->unified_memory);
212 __nvgpu_set_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES,
213 platform->unify_address_spaces);
214
215 nvgpu_mutex_init(&g->mm.tlb_lock);
216 nvgpu_mutex_init(&g->mm.priv_lock);
217}
218
219int nvgpu_probe(struct gk20a *g,
220 const char *debugfs_symlink,
221 const char *interface_name,
222 struct class *class)
223{
224 struct device *dev = dev_from_gk20a(g);
225 struct gk20a_platform *platform = dev_get_drvdata(dev);
226 int err = 0;
227
228 nvgpu_init_vars(g);
229 nvgpu_init_gr_vars(g);
230 nvgpu_init_timeout(g);
231 nvgpu_init_timeslice(g);
232 nvgpu_init_pm_vars(g);
233 nvgpu_init_vbios_vars(g);
234 nvgpu_init_ltc_vars(g);
235 err = nvgpu_init_soc_vars(g);
236 if (err) {
237 nvgpu_err(g, "init soc vars failed");
238 return err;
239 }
240
241 /* Initialize the platform interface. */
242 err = platform->probe(dev);
243 if (err) {
244 if (err == -EPROBE_DEFER)
245 nvgpu_info(g, "platform probe failed");
246 else
247 nvgpu_err(g, "platform probe failed");
248 return err;
249 }
250
251 nvgpu_init_mm_vars(g);
252
253 /* platform probe can defer do user init only if probe succeeds */
254 err = gk20a_user_init(dev, interface_name, class);
255 if (err)
256 return err;
257
258 if (platform->late_probe) {
259 err = platform->late_probe(dev);
260 if (err) {
261 nvgpu_err(g, "late probe failed");
262 return err;
263 }
264 }
265
266 nvgpu_create_sysfs(dev);
267 gk20a_debug_init(g, debugfs_symlink);
268
269 g->dbg_regops_tmp_buf = nvgpu_kzalloc(g, SZ_4K);
270 if (!g->dbg_regops_tmp_buf) {
271 nvgpu_err(g, "couldn't allocate regops tmp buf");
272 return -ENOMEM;
273 }
274 g->dbg_regops_tmp_buf_ops =
275 SZ_4K / sizeof(g->dbg_regops_tmp_buf[0]);
276
277 g->remove_support = gk20a_remove_support;
278
279 nvgpu_ref_init(&g->refcount);
280
281 return 0;
282}
283
284/**
285 * cyclic_delta - Returns delta of cyclic integers a and b.
286 *
287 * @a - First integer
288 * @b - Second integer
289 *
290 * Note: if a is ahead of b, delta is positive.
291 */
292static int cyclic_delta(int a, int b)
293{
294 return a - b;
295}
296
297/**
298 * nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
299 *
300 * @g - The GPU to wait on.
301 *
302 * Waits until all interrupt handlers that have been scheduled to run have
303 * completed.
304 */
305void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
306{
307 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
308 int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count);
309 int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
310
311 /* wait until all stalling irqs are handled */
312 NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq,
313 cyclic_delta(stall_irq_threshold,
314 atomic_read(&l->sw_irq_stall_last_handled))
315 <= 0, 0);
316
317 /* wait until all non-stalling irqs are handled */
318 NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq,
319 cyclic_delta(nonstall_irq_threshold,
320 atomic_read(&l->sw_irq_nonstall_last_handled))
321 <= 0, 0);
322}
323
324static void nvgpu_free_gk20a(struct gk20a *g)
325{
326 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
327
328 kfree(l);
329}
330
331void nvgpu_init_gk20a(struct gk20a *g)
332{
333 g->free = nvgpu_free_gk20a;
334}