diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_channel.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_channel.c | 396 |
1 files changed, 0 insertions, 396 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c deleted file mode 100644 index cd180c678c13..000000000000 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ /dev/null | |||
@@ -1,396 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2005-2006 Stephane Marchesin | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include <drm/drmP.h> | ||
26 | #include "nouveau_drv.h" | ||
27 | #include <drm/nouveau_drm.h> | ||
28 | #include "nouveau_dma.h" | ||
29 | #include "nouveau_fifo.h" | ||
30 | #include "nouveau_ramht.h" | ||
31 | #include "nouveau_fence.h" | ||
32 | #include "nouveau_software.h" | ||
33 | |||
34 | static int | ||
35 | nouveau_channel_pushbuf_init(struct nouveau_channel *chan) | ||
36 | { | ||
37 | u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT; | ||
38 | struct drm_device *dev = chan->dev; | ||
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
40 | int ret; | ||
41 | |||
42 | /* allocate buffer object */ | ||
43 | ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo); | ||
44 | if (ret) | ||
45 | goto out; | ||
46 | |||
47 | ret = nouveau_bo_pin(chan->pushbuf_bo, mem); | ||
48 | if (ret) | ||
49 | goto out; | ||
50 | |||
51 | ret = nouveau_bo_map(chan->pushbuf_bo); | ||
52 | if (ret) | ||
53 | goto out; | ||
54 | |||
55 | /* create DMA object covering the entire memtype where the push | ||
56 | * buffer resides, userspace can submit its own push buffers from | ||
57 | * anywhere within the same memtype. | ||
58 | */ | ||
59 | chan->pushbuf_base = chan->pushbuf_bo->bo.offset; | ||
60 | if (dev_priv->card_type >= NV_50) { | ||
61 | ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm, | ||
62 | &chan->pushbuf_vma); | ||
63 | if (ret) | ||
64 | goto out; | ||
65 | |||
66 | if (dev_priv->card_type < NV_C0) { | ||
67 | ret = nouveau_gpuobj_dma_new(chan, | ||
68 | NV_CLASS_DMA_IN_MEMORY, 0, | ||
69 | (1ULL << 40), | ||
70 | NV_MEM_ACCESS_RO, | ||
71 | NV_MEM_TARGET_VM, | ||
72 | &chan->pushbuf); | ||
73 | } | ||
74 | chan->pushbuf_base = chan->pushbuf_vma.offset; | ||
75 | } else | ||
76 | if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) { | ||
77 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | ||
78 | dev_priv->gart_info.aper_size, | ||
79 | NV_MEM_ACCESS_RO, | ||
80 | NV_MEM_TARGET_GART, | ||
81 | &chan->pushbuf); | ||
82 | } else | ||
83 | if (dev_priv->card_type != NV_04) { | ||
84 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, | ||
85 | dev_priv->fb_available_size, | ||
86 | NV_MEM_ACCESS_RO, | ||
87 | NV_MEM_TARGET_VRAM, | ||
88 | &chan->pushbuf); | ||
89 | } else { | ||
90 | /* NV04 cmdbuf hack, from original ddx.. not sure of it's | ||
91 | * exact reason for existing :) PCI access to cmdbuf in | ||
92 | * VRAM. | ||
93 | */ | ||
94 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
95 | pci_resource_start(dev->pdev, 1), | ||
96 | dev_priv->fb_available_size, | ||
97 | NV_MEM_ACCESS_RO, | ||
98 | NV_MEM_TARGET_PCI, | ||
99 | &chan->pushbuf); | ||
100 | } | ||
101 | |||
102 | out: | ||
103 | if (ret) { | ||
104 | NV_ERROR(dev, "error initialising pushbuf: %d\n", ret); | ||
105 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); | ||
106 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | ||
107 | if (chan->pushbuf_bo) { | ||
108 | nouveau_bo_unmap(chan->pushbuf_bo); | ||
109 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* allocates and initializes a fifo for user space consumption */ | ||
117 | int | ||
118 | nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, | ||
119 | struct drm_file *file_priv, | ||
120 | uint32_t vram_handle, uint32_t gart_handle) | ||
121 | { | ||
122 | struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE); | ||
123 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
124 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
125 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
126 | struct nouveau_channel *chan; | ||
127 | unsigned long flags; | ||
128 | int ret, i; | ||
129 | |||
130 | /* allocate and lock channel structure */ | ||
131 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | ||
132 | if (!chan) | ||
133 | return -ENOMEM; | ||
134 | chan->dev = dev; | ||
135 | chan->file_priv = file_priv; | ||
136 | chan->vram_handle = vram_handle; | ||
137 | chan->gart_handle = gart_handle; | ||
138 | |||
139 | kref_init(&chan->ref); | ||
140 | atomic_set(&chan->users, 1); | ||
141 | mutex_init(&chan->mutex); | ||
142 | mutex_lock(&chan->mutex); | ||
143 | |||
144 | /* allocate hw channel id */ | ||
145 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
146 | for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { | ||
147 | if (!dev_priv->channels.ptr[chan->id]) { | ||
148 | nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); | ||
149 | break; | ||
150 | } | ||
151 | } | ||
152 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
153 | |||
154 | if (chan->id == pfifo->channels) { | ||
155 | mutex_unlock(&chan->mutex); | ||
156 | kfree(chan); | ||
157 | return -ENODEV; | ||
158 | } | ||
159 | |||
160 | NV_DEBUG(dev, "initialising channel %d\n", chan->id); | ||
161 | |||
162 | /* setup channel's memory and vm */ | ||
163 | ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); | ||
164 | if (ret) { | ||
165 | NV_ERROR(dev, "gpuobj %d\n", ret); | ||
166 | nouveau_channel_put(&chan); | ||
167 | return ret; | ||
168 | } | ||
169 | |||
170 | /* Allocate space for per-channel fixed notifier memory */ | ||
171 | ret = nouveau_notifier_init_channel(chan); | ||
172 | if (ret) { | ||
173 | NV_ERROR(dev, "ntfy %d\n", ret); | ||
174 | nouveau_channel_put(&chan); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | /* Allocate DMA push buffer */ | ||
179 | ret = nouveau_channel_pushbuf_init(chan); | ||
180 | if (ret) { | ||
181 | NV_ERROR(dev, "pushbuf %d\n", ret); | ||
182 | nouveau_channel_put(&chan); | ||
183 | return ret; | ||
184 | } | ||
185 | |||
186 | nouveau_dma_init(chan); | ||
187 | chan->user_put = 0x40; | ||
188 | chan->user_get = 0x44; | ||
189 | if (dev_priv->card_type >= NV_50) | ||
190 | chan->user_get_hi = 0x60; | ||
191 | |||
192 | /* create fifo context */ | ||
193 | ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO); | ||
194 | if (ret) { | ||
195 | nouveau_channel_put(&chan); | ||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | /* Insert NOPs for NOUVEAU_DMA_SKIPS */ | ||
200 | ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); | ||
201 | if (ret) { | ||
202 | nouveau_channel_put(&chan); | ||
203 | return ret; | ||
204 | } | ||
205 | |||
206 | for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) | ||
207 | OUT_RING (chan, 0x00000000); | ||
208 | |||
209 | ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev)); | ||
210 | if (ret) { | ||
211 | nouveau_channel_put(&chan); | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | if (dev_priv->card_type < NV_C0) { | ||
216 | ret = RING_SPACE(chan, 2); | ||
217 | if (ret) { | ||
218 | nouveau_channel_put(&chan); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1); | ||
223 | OUT_RING (chan, NvSw); | ||
224 | FIRE_RING (chan); | ||
225 | } | ||
226 | |||
227 | FIRE_RING(chan); | ||
228 | |||
229 | ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE); | ||
230 | if (ret) { | ||
231 | nouveau_channel_put(&chan); | ||
232 | return ret; | ||
233 | } | ||
234 | |||
235 | nouveau_debugfs_channel_init(chan); | ||
236 | |||
237 | NV_DEBUG(dev, "channel %d initialised\n", chan->id); | ||
238 | if (fpriv) { | ||
239 | spin_lock(&fpriv->lock); | ||
240 | list_add(&chan->list, &fpriv->channels); | ||
241 | spin_unlock(&fpriv->lock); | ||
242 | } | ||
243 | *chan_ret = chan; | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | struct nouveau_channel * | ||
248 | nouveau_channel_get_unlocked(struct nouveau_channel *ref) | ||
249 | { | ||
250 | struct nouveau_channel *chan = NULL; | ||
251 | |||
252 | if (likely(ref && atomic_inc_not_zero(&ref->users))) | ||
253 | nouveau_channel_ref(ref, &chan); | ||
254 | |||
255 | return chan; | ||
256 | } | ||
257 | |||
258 | struct nouveau_channel * | ||
259 | nouveau_channel_get(struct drm_file *file_priv, int id) | ||
260 | { | ||
261 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | ||
262 | struct nouveau_channel *chan; | ||
263 | |||
264 | spin_lock(&fpriv->lock); | ||
265 | list_for_each_entry(chan, &fpriv->channels, list) { | ||
266 | if (chan->id == id) { | ||
267 | chan = nouveau_channel_get_unlocked(chan); | ||
268 | spin_unlock(&fpriv->lock); | ||
269 | mutex_lock(&chan->mutex); | ||
270 | return chan; | ||
271 | } | ||
272 | } | ||
273 | spin_unlock(&fpriv->lock); | ||
274 | |||
275 | return ERR_PTR(-EINVAL); | ||
276 | } | ||
277 | |||
278 | void | ||
279 | nouveau_channel_put_unlocked(struct nouveau_channel **pchan) | ||
280 | { | ||
281 | struct nouveau_channel *chan = *pchan; | ||
282 | struct drm_device *dev = chan->dev; | ||
283 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
284 | unsigned long flags; | ||
285 | int i; | ||
286 | |||
287 | /* decrement the refcount, and we're done if there's still refs */ | ||
288 | if (likely(!atomic_dec_and_test(&chan->users))) { | ||
289 | nouveau_channel_ref(NULL, pchan); | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | /* no one wants the channel anymore */ | ||
294 | NV_DEBUG(dev, "freeing channel %d\n", chan->id); | ||
295 | nouveau_debugfs_channel_fini(chan); | ||
296 | |||
297 | /* give it chance to idle */ | ||
298 | nouveau_channel_idle(chan); | ||
299 | |||
300 | /* destroy the engine specific contexts */ | ||
301 | for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) { | ||
302 | if (chan->engctx[i]) | ||
303 | dev_priv->eng[i]->context_del(chan, i); | ||
304 | } | ||
305 | |||
306 | /* aside from its resources, the channel should now be dead, | ||
307 | * remove it from the channel list | ||
308 | */ | ||
309 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | ||
310 | nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); | ||
311 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | ||
312 | |||
313 | /* destroy any resources the channel owned */ | ||
314 | nouveau_gpuobj_ref(NULL, &chan->pushbuf); | ||
315 | if (chan->pushbuf_bo) { | ||
316 | nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma); | ||
317 | nouveau_bo_unmap(chan->pushbuf_bo); | ||
318 | nouveau_bo_unpin(chan->pushbuf_bo); | ||
319 | nouveau_bo_ref(NULL, &chan->pushbuf_bo); | ||
320 | } | ||
321 | nouveau_ramht_ref(NULL, &chan->ramht, chan); | ||
322 | nouveau_notifier_takedown_channel(chan); | ||
323 | nouveau_gpuobj_channel_takedown(chan); | ||
324 | |||
325 | nouveau_channel_ref(NULL, pchan); | ||
326 | } | ||
327 | |||
328 | void | ||
329 | nouveau_channel_put(struct nouveau_channel **pchan) | ||
330 | { | ||
331 | mutex_unlock(&(*pchan)->mutex); | ||
332 | nouveau_channel_put_unlocked(pchan); | ||
333 | } | ||
334 | |||
335 | static void | ||
336 | nouveau_channel_del(struct kref *ref) | ||
337 | { | ||
338 | struct nouveau_channel *chan = | ||
339 | container_of(ref, struct nouveau_channel, ref); | ||
340 | |||
341 | kfree(chan); | ||
342 | } | ||
343 | |||
344 | void | ||
345 | nouveau_channel_ref(struct nouveau_channel *chan, | ||
346 | struct nouveau_channel **pchan) | ||
347 | { | ||
348 | if (chan) | ||
349 | kref_get(&chan->ref); | ||
350 | |||
351 | if (*pchan) | ||
352 | kref_put(&(*pchan)->ref, nouveau_channel_del); | ||
353 | |||
354 | *pchan = chan; | ||
355 | } | ||
356 | |||
357 | int | ||
358 | nouveau_channel_idle(struct nouveau_channel *chan) | ||
359 | { | ||
360 | struct drm_device *dev = chan->dev; | ||
361 | struct nouveau_fence *fence = NULL; | ||
362 | int ret; | ||
363 | |||
364 | ret = nouveau_fence_new(chan, &fence); | ||
365 | if (!ret) { | ||
366 | ret = nouveau_fence_wait(fence, false, false); | ||
367 | nouveau_fence_unref(&fence); | ||
368 | } | ||
369 | |||
370 | if (ret) | ||
371 | NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); | ||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | /* cleans up all the fifos from file_priv */ | ||
376 | void | ||
377 | nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) | ||
378 | { | ||
379 | struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO); | ||
380 | struct nouveau_channel *chan; | ||
381 | int i; | ||
382 | |||
383 | if (!pfifo) | ||
384 | return; | ||
385 | |||
386 | NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); | ||
387 | for (i = 0; i < pfifo->channels; i++) { | ||
388 | chan = nouveau_channel_get(file_priv, i); | ||
389 | if (IS_ERR(chan)) | ||
390 | continue; | ||
391 | |||
392 | list_del(&chan->list); | ||
393 | atomic_dec(&chan->users); | ||
394 | nouveau_channel_put(&chan); | ||
395 | } | ||
396 | } | ||