aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/nvhost.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/nvhost.c')
-rw-r--r--include/os/linux/nvhost.c295
1 files changed, 295 insertions, 0 deletions
diff --git a/include/os/linux/nvhost.c b/include/os/linux/nvhost.c
new file mode 100644
index 0000000..a9341c7
--- /dev/null
+++ b/include/os/linux/nvhost.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/nvhost.h>
18#include <linux/nvhost_t194.h>
19#include <uapi/linux/nvhost_ioctl.h>
20#include <linux/of_platform.h>
21
22#include <nvgpu/gk20a.h>
23#include <nvgpu/nvhost.h>
24#include <nvgpu/enabled.h>
25
26#include "nvhost_priv.h"
27
28#include "os_linux.h"
29#include "module.h"
30
31int nvgpu_get_nvhost_dev(struct gk20a *g)
32{
33 struct device_node *np = nvgpu_get_node(g);
34 struct platform_device *host1x_pdev = NULL;
35 const __be32 *host1x_ptr;
36
37 host1x_ptr = of_get_property(np, "nvidia,host1x", NULL);
38 if (host1x_ptr) {
39 struct device_node *host1x_node =
40 of_find_node_by_phandle(be32_to_cpup(host1x_ptr));
41
42 host1x_pdev = of_find_device_by_node(host1x_node);
43 if (!host1x_pdev) {
44 nvgpu_warn(g, "host1x device not available");
45 return -EPROBE_DEFER;
46 }
47
48 } else {
49 if (nvgpu_has_syncpoints(g)) {
50 nvgpu_warn(g, "host1x reference not found. assuming no syncpoints support");
51 __nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
52 }
53 return 0;
54 }
55
56 g->nvhost_dev = nvgpu_kzalloc(g, sizeof(struct nvgpu_nvhost_dev));
57 if (!g->nvhost_dev)
58 return -ENOMEM;
59
60 g->nvhost_dev->host1x_pdev = host1x_pdev;
61
62 return 0;
63}
64
65void nvgpu_free_nvhost_dev(struct gk20a *g)
66{
67 nvgpu_kfree(g, g->nvhost_dev);
68}
69
70int nvgpu_nvhost_module_busy_ext(
71 struct nvgpu_nvhost_dev *nvhost_dev)
72{
73 return nvhost_module_busy_ext(nvhost_dev->host1x_pdev);
74}
75
76void nvgpu_nvhost_module_idle_ext(
77 struct nvgpu_nvhost_dev *nvhost_dev)
78{
79 nvhost_module_idle_ext(nvhost_dev->host1x_pdev);
80}
81
82void nvgpu_nvhost_debug_dump_device(
83 struct nvgpu_nvhost_dev *nvhost_dev)
84{
85 nvhost_debug_dump_device(nvhost_dev->host1x_pdev);
86}
87
88const char *nvgpu_nvhost_syncpt_get_name(
89 struct nvgpu_nvhost_dev *nvhost_dev, int id)
90{
91 return nvhost_syncpt_get_name(nvhost_dev->host1x_pdev, id);
92}
93
94bool nvgpu_nvhost_syncpt_is_valid_pt_ext(
95 struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
96{
97 return nvhost_syncpt_is_valid_pt_ext(nvhost_dev->host1x_pdev, id);
98}
99
100int nvgpu_nvhost_syncpt_is_expired_ext(
101 struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 thresh)
102{
103 return nvhost_syncpt_is_expired_ext(nvhost_dev->host1x_pdev,
104 id, thresh);
105}
106
107u32 nvgpu_nvhost_syncpt_incr_max_ext(
108 struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 incrs)
109{
110 return nvhost_syncpt_incr_max_ext(nvhost_dev->host1x_pdev, id, incrs);
111}
112
113int nvgpu_nvhost_intr_register_notifier(
114 struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 thresh,
115 void (*callback)(void *, int), void *private_data)
116{
117 return nvhost_intr_register_notifier(nvhost_dev->host1x_pdev,
118 id, thresh,
119 callback, private_data);
120}
121
122void nvgpu_nvhost_syncpt_set_min_eq_max_ext(
123 struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
124{
125 nvhost_syncpt_set_min_eq_max_ext(nvhost_dev->host1x_pdev, id);
126}
127
128void nvgpu_nvhost_syncpt_put_ref_ext(
129 struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
130{
131 nvhost_syncpt_put_ref_ext(nvhost_dev->host1x_pdev, id);
132}
133
134u32 nvgpu_nvhost_get_syncpt_host_managed(
135 struct nvgpu_nvhost_dev *nvhost_dev,
136 u32 param, const char *syncpt_name)
137{
138 return nvhost_get_syncpt_host_managed(nvhost_dev->host1x_pdev,
139 param, syncpt_name);
140}
141
142u32 nvgpu_nvhost_get_syncpt_client_managed(
143 struct nvgpu_nvhost_dev *nvhost_dev,
144 const char *syncpt_name)
145{
146 return nvhost_get_syncpt_client_managed(nvhost_dev->host1x_pdev,
147 syncpt_name);
148}
149
150int nvgpu_nvhost_syncpt_wait_timeout_ext(
151 struct nvgpu_nvhost_dev *nvhost_dev, u32 id,
152 u32 thresh, u32 timeout, u32 *value, struct timespec *ts)
153{
154 return nvhost_syncpt_wait_timeout_ext(nvhost_dev->host1x_pdev,
155 id, thresh, timeout, value, ts);
156}
157
158int nvgpu_nvhost_syncpt_read_ext_check(
159 struct nvgpu_nvhost_dev *nvhost_dev, u32 id, u32 *val)
160{
161 return nvhost_syncpt_read_ext_check(nvhost_dev->host1x_pdev, id, val);
162}
163
164u32 nvgpu_nvhost_syncpt_read_maxval(
165 struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
166{
167 return nvhost_syncpt_read_maxval(nvhost_dev->host1x_pdev, id);
168}
169
170void nvgpu_nvhost_syncpt_set_safe_state(
171 struct nvgpu_nvhost_dev *nvhost_dev, u32 id)
172{
173 u32 val;
174
175 /*
176 * Add large number of increments to current value
177 * so that all waiters on this syncpoint are released
178 *
179 * We don't expect any case where more than 0x10000 increments
180 * are pending
181 */
182 val = nvhost_syncpt_read_minval(nvhost_dev->host1x_pdev, id);
183 val += 0x10000;
184
185 nvhost_syncpt_set_minval(nvhost_dev->host1x_pdev, id, val);
186 nvhost_syncpt_set_maxval(nvhost_dev->host1x_pdev, id, val);
187}
188
189int nvgpu_nvhost_create_symlink(struct gk20a *g)
190{
191 struct device *dev = dev_from_gk20a(g);
192 int err = 0;
193
194 if (g->nvhost_dev &&
195 (dev->parent != &g->nvhost_dev->host1x_pdev->dev)) {
196 err = sysfs_create_link(&g->nvhost_dev->host1x_pdev->dev.kobj,
197 &dev->kobj,
198 dev_name(dev));
199 }
200
201 return err;
202}
203
204void nvgpu_nvhost_remove_symlink(struct gk20a *g)
205{
206 struct device *dev = dev_from_gk20a(g);
207
208 if (g->nvhost_dev &&
209 (dev->parent != &g->nvhost_dev->host1x_pdev->dev)) {
210 sysfs_remove_link(&g->nvhost_dev->host1x_pdev->dev.kobj,
211 dev_name(dev));
212 }
213}
214
215#ifdef CONFIG_SYNC
216u32 nvgpu_nvhost_sync_pt_id(struct sync_pt *pt)
217{
218 return nvhost_sync_pt_id(pt);
219}
220
221u32 nvgpu_nvhost_sync_pt_thresh(struct sync_pt *pt)
222{
223 return nvhost_sync_pt_thresh(pt);
224}
225
226struct sync_fence *nvgpu_nvhost_sync_fdget(int fd)
227{
228 return nvhost_sync_fdget(fd);
229}
230
231int nvgpu_nvhost_sync_num_pts(struct sync_fence *fence)
232{
233 return nvhost_sync_num_pts(fence);
234}
235
236struct sync_fence *nvgpu_nvhost_sync_create_fence(
237 struct nvgpu_nvhost_dev *nvhost_dev,
238 u32 id, u32 thresh, const char *name)
239{
240 struct nvhost_ctrl_sync_fence_info pt = {
241 .id = id,
242 .thresh = thresh,
243 };
244
245 return nvhost_sync_create_fence(nvhost_dev->host1x_pdev, &pt, 1, name);
246}
247#endif /* CONFIG_SYNC */
248
249#ifdef CONFIG_TEGRA_T19X_GRHOST
250int nvgpu_nvhost_syncpt_unit_interface_get_aperture(
251 struct nvgpu_nvhost_dev *nvhost_dev,
252 u64 *base, size_t *size)
253{
254 return nvhost_syncpt_unit_interface_get_aperture(
255 nvhost_dev->host1x_pdev, (phys_addr_t *)base, size);
256}
257
258u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id)
259{
260 return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id);
261}
262
263int nvgpu_nvhost_syncpt_init(struct gk20a *g)
264{
265 int err = 0;
266
267 if (!nvgpu_has_syncpoints(g))
268 return -ENOSYS;
269
270 err = nvgpu_get_nvhost_dev(g);
271 if (err) {
272 nvgpu_err(g, "host1x device not available");
273 __nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
274 return -ENOSYS;
275 }
276
277 err = nvgpu_nvhost_syncpt_unit_interface_get_aperture(
278 g->nvhost_dev,
279 &g->syncpt_unit_base,
280 &g->syncpt_unit_size);
281 if (err) {
282 nvgpu_err(g, "Failed to get syncpt interface");
283 __nvgpu_set_enabled(g, NVGPU_HAS_SYNCPOINTS, false);
284 return -ENOSYS;
285 }
286
287 g->syncpt_size =
288 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1);
289 nvgpu_info(g, "syncpt_unit_base %llx syncpt_unit_size %zx size %x\n",
290 g->syncpt_unit_base, g->syncpt_unit_size,
291 g->syncpt_size);
292
293 return 0;
294}
295#endif