diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 1294 |
1 files changed, 1294 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c new file mode 100644 index 00000000000..93379bb81be --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -0,0 +1,1294 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Ben Skeggs. | ||
3 | * | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining | ||
7 | * a copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial | ||
16 | * portions of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
20 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
21 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
22 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
23 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
24 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Authors: | ||
30 | * Ben Skeggs <darktama@iinet.net.au> | ||
31 | */ | ||
32 | |||
33 | #include "drmP.h" | ||
34 | #include "drm.h" | ||
35 | #include "nouveau_drv.h" | ||
36 | #include "nouveau_drm.h" | ||
37 | |||
38 | /* NVidia uses context objects to drive drawing operations. | ||
39 | |||
40 | Context objects can be selected into 8 subchannels in the FIFO, | ||
41 | and then used via DMA command buffers. | ||
42 | |||
43 | A context object is referenced by a user defined handle (CARD32). The HW | ||
44 | looks up graphics objects in a hash table in the instance RAM. | ||
45 | |||
46 | An entry in the hash table consists of 2 CARD32. The first CARD32 contains | ||
47 | the handle, the second one a bitfield, that contains the address of the | ||
48 | object in instance RAM. | ||
49 | |||
50 | The format of the second CARD32 seems to be: | ||
51 | |||
52 | NV4 to NV30: | ||
53 | |||
54 | 15: 0 instance_addr >> 4 | ||
55 | 17:16 engine (here uses 1 = graphics) | ||
56 | 28:24 channel id (here uses 0) | ||
57 | 31 valid (use 1) | ||
58 | |||
59 | NV40: | ||
60 | |||
61 | 15: 0 instance_addr >> 4 (maybe 19-0) | ||
62 | 21:20 engine (here uses 1 = graphics) | ||
63 | I'm unsure about the other bits, but using 0 seems to work. | ||
64 | |||
65 | The key into the hash table depends on the object handle and channel id and | ||
66 | is given as: | ||
67 | */ | ||
68 | static uint32_t | ||
69 | nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle) | ||
70 | { | ||
71 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
72 | uint32_t hash = 0; | ||
73 | int i; | ||
74 | |||
75 | NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle); | ||
76 | |||
77 | for (i = 32; i > 0; i -= dev_priv->ramht_bits) { | ||
78 | hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); | ||
79 | handle >>= dev_priv->ramht_bits; | ||
80 | } | ||
81 | |||
82 | if (dev_priv->card_type < NV_50) | ||
83 | hash ^= channel << (dev_priv->ramht_bits - 4); | ||
84 | hash <<= 3; | ||
85 | |||
86 | NV_DEBUG(dev, "hash=0x%08x\n", hash); | ||
87 | return hash; | ||
88 | } | ||
89 | |||
90 | static int | ||
91 | nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht, | ||
92 | uint32_t offset) | ||
93 | { | ||
94 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
95 | uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4); | ||
96 | |||
97 | if (dev_priv->card_type < NV_40) | ||
98 | return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0); | ||
99 | return (ctx != 0); | ||
100 | } | ||
101 | |||
102 | static int | ||
103 | nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | ||
104 | { | ||
105 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
106 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
107 | struct nouveau_channel *chan = ref->channel; | ||
108 | struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; | ||
109 | uint32_t ctx, co, ho; | ||
110 | |||
111 | if (!ramht) { | ||
112 | NV_ERROR(dev, "No hash table!\n"); | ||
113 | return -EINVAL; | ||
114 | } | ||
115 | |||
116 | if (dev_priv->card_type < NV_40) { | ||
117 | ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) | | ||
118 | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | | ||
119 | (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); | ||
120 | } else | ||
121 | if (dev_priv->card_type < NV_50) { | ||
122 | ctx = (ref->instance >> 4) | | ||
123 | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | | ||
124 | (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); | ||
125 | } else { | ||
126 | if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { | ||
127 | ctx = (ref->instance << 10) | 2; | ||
128 | } else { | ||
129 | ctx = (ref->instance >> 4) | | ||
130 | ((ref->gpuobj->engine << | ||
131 | NV40_RAMHT_CONTEXT_ENGINE_SHIFT)); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | instmem->prepare_access(dev, true); | ||
136 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); | ||
137 | do { | ||
138 | if (!nouveau_ramht_entry_valid(dev, ramht, co)) { | ||
139 | NV_DEBUG(dev, | ||
140 | "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | ||
141 | chan->id, co, ref->handle, ctx); | ||
142 | nv_wo32(dev, ramht, (co + 0)/4, ref->handle); | ||
143 | nv_wo32(dev, ramht, (co + 4)/4, ctx); | ||
144 | |||
145 | list_add_tail(&ref->list, &chan->ramht_refs); | ||
146 | instmem->finish_access(dev); | ||
147 | return 0; | ||
148 | } | ||
149 | NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", | ||
150 | chan->id, co, nv_ro32(dev, ramht, co/4)); | ||
151 | |||
152 | co += 8; | ||
153 | if (co >= dev_priv->ramht_size) | ||
154 | co = 0; | ||
155 | } while (co != ho); | ||
156 | instmem->finish_access(dev); | ||
157 | |||
158 | NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); | ||
159 | return -ENOMEM; | ||
160 | } | ||
161 | |||
162 | static void | ||
163 | nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) | ||
164 | { | ||
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
166 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
167 | struct nouveau_channel *chan = ref->channel; | ||
168 | struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL; | ||
169 | uint32_t co, ho; | ||
170 | |||
171 | if (!ramht) { | ||
172 | NV_ERROR(dev, "No hash table!\n"); | ||
173 | return; | ||
174 | } | ||
175 | |||
176 | instmem->prepare_access(dev, true); | ||
177 | co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); | ||
178 | do { | ||
179 | if (nouveau_ramht_entry_valid(dev, ramht, co) && | ||
180 | (ref->handle == nv_ro32(dev, ramht, (co/4)))) { | ||
181 | NV_DEBUG(dev, | ||
182 | "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n", | ||
183 | chan->id, co, ref->handle, | ||
184 | nv_ro32(dev, ramht, (co + 4))); | ||
185 | nv_wo32(dev, ramht, (co + 0)/4, 0x00000000); | ||
186 | nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); | ||
187 | |||
188 | list_del(&ref->list); | ||
189 | instmem->finish_access(dev); | ||
190 | return; | ||
191 | } | ||
192 | |||
193 | co += 8; | ||
194 | if (co >= dev_priv->ramht_size) | ||
195 | co = 0; | ||
196 | } while (co != ho); | ||
197 | list_del(&ref->list); | ||
198 | instmem->finish_access(dev); | ||
199 | |||
200 | NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", | ||
201 | chan->id, ref->handle); | ||
202 | } | ||
203 | |||
204 | int | ||
205 | nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | ||
206 | uint32_t size, int align, uint32_t flags, | ||
207 | struct nouveau_gpuobj **gpuobj_ret) | ||
208 | { | ||
209 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
210 | struct nouveau_engine *engine = &dev_priv->engine; | ||
211 | struct nouveau_gpuobj *gpuobj; | ||
212 | struct mem_block *pramin = NULL; | ||
213 | int ret; | ||
214 | |||
215 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | ||
216 | chan ? chan->id : -1, size, align, flags); | ||
217 | |||
218 | if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) | ||
219 | return -EINVAL; | ||
220 | |||
221 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
222 | if (!gpuobj) | ||
223 | return -ENOMEM; | ||
224 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | ||
225 | gpuobj->flags = flags; | ||
226 | gpuobj->im_channel = chan; | ||
227 | |||
228 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
229 | |||
230 | /* Choose between global instmem heap, and per-channel private | ||
231 | * instmem heap. On <NV50 allow requests for private instmem | ||
232 | * to be satisfied from global heap if no per-channel area | ||
233 | * available. | ||
234 | */ | ||
235 | if (chan) { | ||
236 | if (chan->ramin_heap) { | ||
237 | NV_DEBUG(dev, "private heap\n"); | ||
238 | pramin = chan->ramin_heap; | ||
239 | } else | ||
240 | if (dev_priv->card_type < NV_50) { | ||
241 | NV_DEBUG(dev, "global heap fallback\n"); | ||
242 | pramin = dev_priv->ramin_heap; | ||
243 | } | ||
244 | } else { | ||
245 | NV_DEBUG(dev, "global heap\n"); | ||
246 | pramin = dev_priv->ramin_heap; | ||
247 | } | ||
248 | |||
249 | if (!pramin) { | ||
250 | NV_ERROR(dev, "No PRAMIN heap!\n"); | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | |||
254 | if (!chan) { | ||
255 | ret = engine->instmem.populate(dev, gpuobj, &size); | ||
256 | if (ret) { | ||
257 | nouveau_gpuobj_del(dev, &gpuobj); | ||
258 | return ret; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | /* Allocate a chunk of the PRAMIN aperture */ | ||
263 | gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, | ||
264 | drm_order(align), | ||
265 | (struct drm_file *)-2, 0); | ||
266 | if (!gpuobj->im_pramin) { | ||
267 | nouveau_gpuobj_del(dev, &gpuobj); | ||
268 | return -ENOMEM; | ||
269 | } | ||
270 | |||
271 | if (!chan) { | ||
272 | ret = engine->instmem.bind(dev, gpuobj); | ||
273 | if (ret) { | ||
274 | nouveau_gpuobj_del(dev, &gpuobj); | ||
275 | return ret; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | ||
280 | int i; | ||
281 | |||
282 | engine->instmem.prepare_access(dev, true); | ||
283 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | ||
284 | nv_wo32(dev, gpuobj, i/4, 0); | ||
285 | engine->instmem.finish_access(dev); | ||
286 | } | ||
287 | |||
288 | *gpuobj_ret = gpuobj; | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | int | ||
293 | nouveau_gpuobj_early_init(struct drm_device *dev) | ||
294 | { | ||
295 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
296 | |||
297 | NV_DEBUG(dev, "\n"); | ||
298 | |||
299 | INIT_LIST_HEAD(&dev_priv->gpuobj_list); | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | int | ||
305 | nouveau_gpuobj_init(struct drm_device *dev) | ||
306 | { | ||
307 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
308 | int ret; | ||
309 | |||
310 | NV_DEBUG(dev, "\n"); | ||
311 | |||
312 | if (dev_priv->card_type < NV_50) { | ||
313 | ret = nouveau_gpuobj_new_fake(dev, | ||
314 | dev_priv->ramht_offset, ~0, dev_priv->ramht_size, | ||
315 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS, | ||
316 | &dev_priv->ramht, NULL); | ||
317 | if (ret) | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | void | ||
325 | nouveau_gpuobj_takedown(struct drm_device *dev) | ||
326 | { | ||
327 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
328 | |||
329 | NV_DEBUG(dev, "\n"); | ||
330 | |||
331 | nouveau_gpuobj_del(dev, &dev_priv->ramht); | ||
332 | } | ||
333 | |||
334 | void | ||
335 | nouveau_gpuobj_late_takedown(struct drm_device *dev) | ||
336 | { | ||
337 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
338 | struct nouveau_gpuobj *gpuobj = NULL; | ||
339 | struct list_head *entry, *tmp; | ||
340 | |||
341 | NV_DEBUG(dev, "\n"); | ||
342 | |||
343 | list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) { | ||
344 | gpuobj = list_entry(entry, struct nouveau_gpuobj, list); | ||
345 | |||
346 | NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n", | ||
347 | gpuobj, gpuobj->refcount); | ||
348 | gpuobj->refcount = 0; | ||
349 | nouveau_gpuobj_del(dev, &gpuobj); | ||
350 | } | ||
351 | } | ||
352 | |||
353 | int | ||
354 | nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | ||
355 | { | ||
356 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
357 | struct nouveau_engine *engine = &dev_priv->engine; | ||
358 | struct nouveau_gpuobj *gpuobj; | ||
359 | int i; | ||
360 | |||
361 | NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL); | ||
362 | |||
363 | if (!dev_priv || !pgpuobj || !(*pgpuobj)) | ||
364 | return -EINVAL; | ||
365 | gpuobj = *pgpuobj; | ||
366 | |||
367 | if (gpuobj->refcount != 0) { | ||
368 | NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount); | ||
369 | return -EINVAL; | ||
370 | } | ||
371 | |||
372 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | ||
373 | engine->instmem.prepare_access(dev, true); | ||
374 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | ||
375 | nv_wo32(dev, gpuobj, i/4, 0); | ||
376 | engine->instmem.finish_access(dev); | ||
377 | } | ||
378 | |||
379 | if (gpuobj->dtor) | ||
380 | gpuobj->dtor(dev, gpuobj); | ||
381 | |||
382 | if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE)) | ||
383 | engine->instmem.clear(dev, gpuobj); | ||
384 | |||
385 | if (gpuobj->im_pramin) { | ||
386 | if (gpuobj->flags & NVOBJ_FLAG_FAKE) | ||
387 | kfree(gpuobj->im_pramin); | ||
388 | else | ||
389 | nouveau_mem_free_block(gpuobj->im_pramin); | ||
390 | } | ||
391 | |||
392 | list_del(&gpuobj->list); | ||
393 | |||
394 | *pgpuobj = NULL; | ||
395 | kfree(gpuobj); | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static int | ||
400 | nouveau_gpuobj_instance_get(struct drm_device *dev, | ||
401 | struct nouveau_channel *chan, | ||
402 | struct nouveau_gpuobj *gpuobj, uint32_t *inst) | ||
403 | { | ||
404 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
405 | struct nouveau_gpuobj *cpramin; | ||
406 | |||
407 | /* <NV50 use PRAMIN address everywhere */ | ||
408 | if (dev_priv->card_type < NV_50) { | ||
409 | *inst = gpuobj->im_pramin->start; | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | if (chan && gpuobj->im_channel != chan) { | ||
414 | NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n", | ||
415 | gpuobj->im_channel->id, chan->id); | ||
416 | return -EINVAL; | ||
417 | } | ||
418 | |||
419 | /* NV50 channel-local instance */ | ||
420 | if (chan) { | ||
421 | cpramin = chan->ramin->gpuobj; | ||
422 | *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | /* NV50 global (VRAM) instance */ | ||
427 | if (!gpuobj->im_channel) { | ||
428 | /* ...from global heap */ | ||
429 | if (!gpuobj->im_backing) { | ||
430 | NV_ERROR(dev, "AII, no VRAM backing gpuobj\n"); | ||
431 | return -EINVAL; | ||
432 | } | ||
433 | *inst = gpuobj->im_backing_start; | ||
434 | return 0; | ||
435 | } else { | ||
436 | /* ...from local heap */ | ||
437 | cpramin = gpuobj->im_channel->ramin->gpuobj; | ||
438 | *inst = cpramin->im_backing_start + | ||
439 | (gpuobj->im_pramin->start - cpramin->im_pramin->start); | ||
440 | return 0; | ||
441 | } | ||
442 | |||
443 | return -EINVAL; | ||
444 | } | ||
445 | |||
446 | int | ||
447 | nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan, | ||
448 | uint32_t handle, struct nouveau_gpuobj *gpuobj, | ||
449 | struct nouveau_gpuobj_ref **ref_ret) | ||
450 | { | ||
451 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
452 | struct nouveau_gpuobj_ref *ref; | ||
453 | uint32_t instance; | ||
454 | int ret; | ||
455 | |||
456 | NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n", | ||
457 | chan ? chan->id : -1, handle, gpuobj); | ||
458 | |||
459 | if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL)) | ||
460 | return -EINVAL; | ||
461 | |||
462 | if (!chan && !ref_ret) | ||
463 | return -EINVAL; | ||
464 | |||
465 | if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) { | ||
466 | /* sw object */ | ||
467 | instance = 0x40; | ||
468 | } else { | ||
469 | ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance); | ||
470 | if (ret) | ||
471 | return ret; | ||
472 | } | ||
473 | |||
474 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
475 | if (!ref) | ||
476 | return -ENOMEM; | ||
477 | INIT_LIST_HEAD(&ref->list); | ||
478 | ref->gpuobj = gpuobj; | ||
479 | ref->channel = chan; | ||
480 | ref->instance = instance; | ||
481 | |||
482 | if (!ref_ret) { | ||
483 | ref->handle = handle; | ||
484 | |||
485 | ret = nouveau_ramht_insert(dev, ref); | ||
486 | if (ret) { | ||
487 | kfree(ref); | ||
488 | return ret; | ||
489 | } | ||
490 | } else { | ||
491 | ref->handle = ~0; | ||
492 | *ref_ret = ref; | ||
493 | } | ||
494 | |||
495 | ref->gpuobj->refcount++; | ||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref) | ||
500 | { | ||
501 | struct nouveau_gpuobj_ref *ref; | ||
502 | |||
503 | NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL); | ||
504 | |||
505 | if (!dev || !pref || *pref == NULL) | ||
506 | return -EINVAL; | ||
507 | ref = *pref; | ||
508 | |||
509 | if (ref->handle != ~0) | ||
510 | nouveau_ramht_remove(dev, ref); | ||
511 | |||
512 | if (ref->gpuobj) { | ||
513 | ref->gpuobj->refcount--; | ||
514 | |||
515 | if (ref->gpuobj->refcount == 0) { | ||
516 | if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS)) | ||
517 | nouveau_gpuobj_del(dev, &ref->gpuobj); | ||
518 | } | ||
519 | } | ||
520 | |||
521 | *pref = NULL; | ||
522 | kfree(ref); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | int | ||
527 | nouveau_gpuobj_new_ref(struct drm_device *dev, | ||
528 | struct nouveau_channel *oc, struct nouveau_channel *rc, | ||
529 | uint32_t handle, uint32_t size, int align, | ||
530 | uint32_t flags, struct nouveau_gpuobj_ref **ref) | ||
531 | { | ||
532 | struct nouveau_gpuobj *gpuobj = NULL; | ||
533 | int ret; | ||
534 | |||
535 | ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj); | ||
536 | if (ret) | ||
537 | return ret; | ||
538 | |||
539 | ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref); | ||
540 | if (ret) { | ||
541 | nouveau_gpuobj_del(dev, &gpuobj); | ||
542 | return ret; | ||
543 | } | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | int | ||
549 | nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle, | ||
550 | struct nouveau_gpuobj_ref **ref_ret) | ||
551 | { | ||
552 | struct nouveau_gpuobj_ref *ref; | ||
553 | struct list_head *entry, *tmp; | ||
554 | |||
555 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | ||
556 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | ||
557 | |||
558 | if (ref->handle == handle) { | ||
559 | if (ref_ret) | ||
560 | *ref_ret = ref; | ||
561 | return 0; | ||
562 | } | ||
563 | } | ||
564 | |||
565 | return -EINVAL; | ||
566 | } | ||
567 | |||
568 | int | ||
569 | nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | ||
570 | uint32_t b_offset, uint32_t size, | ||
571 | uint32_t flags, struct nouveau_gpuobj **pgpuobj, | ||
572 | struct nouveau_gpuobj_ref **pref) | ||
573 | { | ||
574 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
575 | struct nouveau_gpuobj *gpuobj = NULL; | ||
576 | int i; | ||
577 | |||
578 | NV_DEBUG(dev, | ||
579 | "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n", | ||
580 | p_offset, b_offset, size, flags); | ||
581 | |||
582 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
583 | if (!gpuobj) | ||
584 | return -ENOMEM; | ||
585 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | ||
586 | gpuobj->im_channel = NULL; | ||
587 | gpuobj->flags = flags | NVOBJ_FLAG_FAKE; | ||
588 | |||
589 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
590 | |||
591 | if (p_offset != ~0) { | ||
592 | gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), | ||
593 | GFP_KERNEL); | ||
594 | if (!gpuobj->im_pramin) { | ||
595 | nouveau_gpuobj_del(dev, &gpuobj); | ||
596 | return -ENOMEM; | ||
597 | } | ||
598 | gpuobj->im_pramin->start = p_offset; | ||
599 | gpuobj->im_pramin->size = size; | ||
600 | } | ||
601 | |||
602 | if (b_offset != ~0) { | ||
603 | gpuobj->im_backing = (struct nouveau_bo *)-1; | ||
604 | gpuobj->im_backing_start = b_offset; | ||
605 | } | ||
606 | |||
607 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | ||
608 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
609 | for (i = 0; i < gpuobj->im_pramin->size; i += 4) | ||
610 | nv_wo32(dev, gpuobj, i/4, 0); | ||
611 | dev_priv->engine.instmem.finish_access(dev); | ||
612 | } | ||
613 | |||
614 | if (pref) { | ||
615 | i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref); | ||
616 | if (i) { | ||
617 | nouveau_gpuobj_del(dev, &gpuobj); | ||
618 | return i; | ||
619 | } | ||
620 | } | ||
621 | |||
622 | if (pgpuobj) | ||
623 | *pgpuobj = gpuobj; | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | |||
628 | static uint32_t | ||
629 | nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) | ||
630 | { | ||
631 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
632 | |||
633 | /*XXX: dodgy hack for now */ | ||
634 | if (dev_priv->card_type >= NV_50) | ||
635 | return 24; | ||
636 | if (dev_priv->card_type >= NV_40) | ||
637 | return 32; | ||
638 | return 16; | ||
639 | } | ||
640 | |||
641 | /* | ||
642 | DMA objects are used to reference a piece of memory in the | ||
643 | framebuffer, PCI or AGP address space. Each object is 16 bytes big | ||
644 | and looks as follows: | ||
645 | |||
646 | entry[0] | ||
647 | 11:0 class (seems like I can always use 0 here) | ||
648 | 12 page table present? | ||
649 | 13 page entry linear? | ||
650 | 15:14 access: 0 rw, 1 ro, 2 wo | ||
651 | 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP | ||
652 | 31:20 dma adjust (bits 0-11 of the address) | ||
653 | entry[1] | ||
654 | dma limit (size of transfer) | ||
655 | entry[X] | ||
656 | 1 0 readonly, 1 readwrite | ||
657 | 31:12 dma frame address of the page (bits 12-31 of the address) | ||
658 | entry[N] | ||
659 | page table terminator, same value as the first pte, as does nvidia | ||
660 | rivatv uses 0xffffffff | ||
661 | |||
662 | Non linear page tables need a list of frame addresses afterwards, | ||
663 | the rivatv project has some info on this. | ||
664 | |||
665 | The method below creates a DMA object in instance RAM and returns a handle | ||
666 | to it that can be used to set up context objects. | ||
667 | */ | ||
668 | int | ||
669 | nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, | ||
670 | uint64_t offset, uint64_t size, int access, | ||
671 | int target, struct nouveau_gpuobj **gpuobj) | ||
672 | { | ||
673 | struct drm_device *dev = chan->dev; | ||
674 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
675 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
676 | int ret; | ||
677 | |||
678 | NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", | ||
679 | chan->id, class, offset, size); | ||
680 | NV_DEBUG(dev, "access=%d target=%d\n", access, target); | ||
681 | |||
682 | switch (target) { | ||
683 | case NV_DMA_TARGET_AGP: | ||
684 | offset += dev_priv->gart_info.aper_base; | ||
685 | break; | ||
686 | default: | ||
687 | break; | ||
688 | } | ||
689 | |||
690 | ret = nouveau_gpuobj_new(dev, chan, | ||
691 | nouveau_gpuobj_class_instmem_size(dev, class), | ||
692 | 16, NVOBJ_FLAG_ZERO_ALLOC | | ||
693 | NVOBJ_FLAG_ZERO_FREE, gpuobj); | ||
694 | if (ret) { | ||
695 | NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); | ||
696 | return ret; | ||
697 | } | ||
698 | |||
699 | instmem->prepare_access(dev, true); | ||
700 | |||
701 | if (dev_priv->card_type < NV_50) { | ||
702 | uint32_t frame, adjust, pte_flags = 0; | ||
703 | |||
704 | if (access != NV_DMA_ACCESS_RO) | ||
705 | pte_flags |= (1<<1); | ||
706 | adjust = offset & 0x00000fff; | ||
707 | frame = offset & ~0x00000fff; | ||
708 | |||
709 | nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) | | ||
710 | (adjust << 20) | | ||
711 | (access << 14) | | ||
712 | (target << 16) | | ||
713 | class)); | ||
714 | nv_wo32(dev, *gpuobj, 1, size - 1); | ||
715 | nv_wo32(dev, *gpuobj, 2, frame | pte_flags); | ||
716 | nv_wo32(dev, *gpuobj, 3, frame | pte_flags); | ||
717 | } else { | ||
718 | uint64_t limit = offset + size - 1; | ||
719 | uint32_t flags0, flags5; | ||
720 | |||
721 | if (target == NV_DMA_TARGET_VIDMEM) { | ||
722 | flags0 = 0x00190000; | ||
723 | flags5 = 0x00010000; | ||
724 | } else { | ||
725 | flags0 = 0x7fc00000; | ||
726 | flags5 = 0x00080000; | ||
727 | } | ||
728 | |||
729 | nv_wo32(dev, *gpuobj, 0, flags0 | class); | ||
730 | nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit)); | ||
731 | nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset)); | ||
732 | nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) | | ||
733 | (upper_32_bits(offset) & 0xff)); | ||
734 | nv_wo32(dev, *gpuobj, 5, flags5); | ||
735 | } | ||
736 | |||
737 | instmem->finish_access(dev); | ||
738 | |||
739 | (*gpuobj)->engine = NVOBJ_ENGINE_SW; | ||
740 | (*gpuobj)->class = class; | ||
741 | return 0; | ||
742 | } | ||
743 | |||
744 | int | ||
745 | nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, | ||
746 | uint64_t offset, uint64_t size, int access, | ||
747 | struct nouveau_gpuobj **gpuobj, | ||
748 | uint32_t *o_ret) | ||
749 | { | ||
750 | struct drm_device *dev = chan->dev; | ||
751 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
752 | int ret; | ||
753 | |||
754 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || | ||
755 | (dev_priv->card_type >= NV_50 && | ||
756 | dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { | ||
757 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
758 | offset + dev_priv->vm_gart_base, | ||
759 | size, access, NV_DMA_TARGET_AGP, | ||
760 | gpuobj); | ||
761 | if (o_ret) | ||
762 | *o_ret = 0; | ||
763 | } else | ||
764 | if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { | ||
765 | *gpuobj = dev_priv->gart_info.sg_ctxdma; | ||
766 | if (offset & ~0xffffffffULL) { | ||
767 | NV_ERROR(dev, "obj offset exceeds 32-bits\n"); | ||
768 | return -EINVAL; | ||
769 | } | ||
770 | if (o_ret) | ||
771 | *o_ret = (uint32_t)offset; | ||
772 | ret = (*gpuobj != NULL) ? 0 : -EINVAL; | ||
773 | } else { | ||
774 | NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); | ||
775 | return -EINVAL; | ||
776 | } | ||
777 | |||
778 | return ret; | ||
779 | } | ||
780 | |||
781 | /* Context objects in the instance RAM have the following structure. | ||
782 | * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. | ||
783 | |||
784 | NV4 - NV30: | ||
785 | |||
786 | entry[0] | ||
787 | 11:0 class | ||
788 | 12 chroma key enable | ||
789 | 13 user clip enable | ||
790 | 14 swizzle enable | ||
791 | 17:15 patch config: | ||
792 | scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre | ||
793 | 18 synchronize enable | ||
794 | 19 endian: 1 big, 0 little | ||
795 | 21:20 dither mode | ||
796 | 23 single step enable | ||
797 | 24 patch status: 0 invalid, 1 valid | ||
798 | 25 context_surface 0: 1 valid | ||
799 | 26 context surface 1: 1 valid | ||
800 | 27 context pattern: 1 valid | ||
801 | 28 context rop: 1 valid | ||
802 | 29,30 context beta, beta4 | ||
803 | entry[1] | ||
804 | 7:0 mono format | ||
805 | 15:8 color format | ||
806 | 31:16 notify instance address | ||
807 | entry[2] | ||
808 | 15:0 dma 0 instance address | ||
809 | 31:16 dma 1 instance address | ||
810 | entry[3] | ||
811 | dma method traps | ||
812 | |||
813 | NV40: | ||
814 | No idea what the exact format is. Here's what can be deducted: | ||
815 | |||
816 | entry[0]: | ||
817 | 11:0 class (maybe uses more bits here?) | ||
818 | 17 user clip enable | ||
819 | 21:19 patch config | ||
820 | 25 patch status valid ? | ||
821 | entry[1]: | ||
822 | 15:0 DMA notifier (maybe 20:0) | ||
823 | entry[2]: | ||
824 | 15:0 DMA 0 instance (maybe 20:0) | ||
825 | 24 big endian | ||
826 | entry[3]: | ||
827 | 15:0 DMA 1 instance (maybe 20:0) | ||
828 | entry[4]: | ||
829 | entry[5]: | ||
830 | set to 0? | ||
831 | */ | ||
832 | int | ||
833 | nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, | ||
834 | struct nouveau_gpuobj **gpuobj) | ||
835 | { | ||
836 | struct drm_device *dev = chan->dev; | ||
837 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
838 | int ret; | ||
839 | |||
840 | NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); | ||
841 | |||
842 | ret = nouveau_gpuobj_new(dev, chan, | ||
843 | nouveau_gpuobj_class_instmem_size(dev, class), | ||
844 | 16, | ||
845 | NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, | ||
846 | gpuobj); | ||
847 | if (ret) { | ||
848 | NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); | ||
849 | return ret; | ||
850 | } | ||
851 | |||
852 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
853 | if (dev_priv->card_type >= NV_50) { | ||
854 | nv_wo32(dev, *gpuobj, 0, class); | ||
855 | nv_wo32(dev, *gpuobj, 5, 0x00010000); | ||
856 | } else { | ||
857 | switch (class) { | ||
858 | case NV_CLASS_NULL: | ||
859 | nv_wo32(dev, *gpuobj, 0, 0x00001030); | ||
860 | nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF); | ||
861 | break; | ||
862 | default: | ||
863 | if (dev_priv->card_type >= NV_40) { | ||
864 | nv_wo32(dev, *gpuobj, 0, class); | ||
865 | #ifdef __BIG_ENDIAN | ||
866 | nv_wo32(dev, *gpuobj, 2, 0x01000000); | ||
867 | #endif | ||
868 | } else { | ||
869 | #ifdef __BIG_ENDIAN | ||
870 | nv_wo32(dev, *gpuobj, 0, class | 0x00080000); | ||
871 | #else | ||
872 | nv_wo32(dev, *gpuobj, 0, class); | ||
873 | #endif | ||
874 | } | ||
875 | } | ||
876 | } | ||
877 | dev_priv->engine.instmem.finish_access(dev); | ||
878 | |||
879 | (*gpuobj)->engine = NVOBJ_ENGINE_GR; | ||
880 | (*gpuobj)->class = class; | ||
881 | return 0; | ||
882 | } | ||
883 | |||
884 | static int | ||
885 | nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, | ||
886 | struct nouveau_gpuobj **gpuobj_ret) | ||
887 | { | ||
888 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
889 | struct nouveau_gpuobj *gpuobj; | ||
890 | |||
891 | if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) | ||
892 | return -EINVAL; | ||
893 | |||
894 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | ||
895 | if (!gpuobj) | ||
896 | return -ENOMEM; | ||
897 | gpuobj->engine = NVOBJ_ENGINE_SW; | ||
898 | gpuobj->class = class; | ||
899 | |||
900 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | ||
901 | *gpuobj_ret = gpuobj; | ||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static int | ||
906 | nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | ||
907 | { | ||
908 | struct drm_device *dev = chan->dev; | ||
909 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
910 | struct nouveau_gpuobj *pramin = NULL; | ||
911 | uint32_t size; | ||
912 | uint32_t base; | ||
913 | int ret; | ||
914 | |||
915 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
916 | |||
917 | /* Base amount for object storage (4KiB enough?) */ | ||
918 | size = 0x1000; | ||
919 | base = 0; | ||
920 | |||
921 | /* PGRAPH context */ | ||
922 | |||
923 | if (dev_priv->card_type == NV_50) { | ||
924 | /* Various fixed table thingos */ | ||
925 | size += 0x1400; /* mostly unknown stuff */ | ||
926 | size += 0x4000; /* vm pd */ | ||
927 | base = 0x6000; | ||
928 | /* RAMHT, not sure about setting size yet, 32KiB to be safe */ | ||
929 | size += 0x8000; | ||
930 | /* RAMFC */ | ||
931 | size += 0x1000; | ||
932 | /* PGRAPH context */ | ||
933 | size += 0x70000; | ||
934 | } | ||
935 | |||
936 | NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", | ||
937 | chan->id, size, base); | ||
938 | ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, | ||
939 | &chan->ramin); | ||
940 | if (ret) { | ||
941 | NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret); | ||
942 | return ret; | ||
943 | } | ||
944 | pramin = chan->ramin->gpuobj; | ||
945 | |||
946 | ret = nouveau_mem_init_heap(&chan->ramin_heap, | ||
947 | pramin->im_pramin->start + base, size); | ||
948 | if (ret) { | ||
949 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | ||
950 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | ||
951 | return ret; | ||
952 | } | ||
953 | |||
954 | return 0; | ||
955 | } | ||
956 | |||
957 | int | ||
958 | nouveau_gpuobj_channel_init(struct nouveau_channel *chan, | ||
959 | uint32_t vram_h, uint32_t tt_h) | ||
960 | { | ||
961 | struct drm_device *dev = chan->dev; | ||
962 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
963 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; | ||
964 | struct nouveau_gpuobj *vram = NULL, *tt = NULL; | ||
965 | int ret, i; | ||
966 | |||
967 | INIT_LIST_HEAD(&chan->ramht_refs); | ||
968 | |||
969 | NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); | ||
970 | |||
971 | /* Reserve a block of PRAMIN for the channel | ||
972 | *XXX: maybe on <NV50 too at some point | ||
973 | */ | ||
974 | if (0 || dev_priv->card_type == NV_50) { | ||
975 | ret = nouveau_gpuobj_channel_init_pramin(chan); | ||
976 | if (ret) { | ||
977 | NV_ERROR(dev, "init pramin\n"); | ||
978 | return ret; | ||
979 | } | ||
980 | } | ||
981 | |||
982 | /* NV50 VM | ||
983 | * - Allocate per-channel page-directory | ||
984 | * - Map GART and VRAM into the channel's address space at the | ||
985 | * locations determined during init. | ||
986 | */ | ||
987 | if (dev_priv->card_type >= NV_50) { | ||
988 | uint32_t vm_offset, pde; | ||
989 | |||
990 | instmem->prepare_access(dev, true); | ||
991 | |||
992 | vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; | ||
993 | vm_offset += chan->ramin->gpuobj->im_pramin->start; | ||
994 | |||
995 | ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, | ||
996 | 0, &chan->vm_pd, NULL); | ||
997 | if (ret) { | ||
998 | instmem->finish_access(dev); | ||
999 | return ret; | ||
1000 | } | ||
1001 | for (i = 0; i < 0x4000; i += 8) { | ||
1002 | nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); | ||
1003 | nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); | ||
1004 | } | ||
1005 | |||
1006 | pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2; | ||
1007 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | ||
1008 | dev_priv->gart_info.sg_ctxdma, | ||
1009 | &chan->vm_gart_pt); | ||
1010 | if (ret) { | ||
1011 | instmem->finish_access(dev); | ||
1012 | return ret; | ||
1013 | } | ||
1014 | nv_wo32(dev, chan->vm_pd, pde++, | ||
1015 | chan->vm_gart_pt->instance | 0x03); | ||
1016 | nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); | ||
1017 | |||
1018 | pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2; | ||
1019 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { | ||
1020 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, | ||
1021 | dev_priv->vm_vram_pt[i], | ||
1022 | &chan->vm_vram_pt[i]); | ||
1023 | if (ret) { | ||
1024 | instmem->finish_access(dev); | ||
1025 | return ret; | ||
1026 | } | ||
1027 | |||
1028 | nv_wo32(dev, chan->vm_pd, pde++, | ||
1029 | chan->vm_vram_pt[i]->instance | 0x61); | ||
1030 | nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); | ||
1031 | } | ||
1032 | |||
1033 | instmem->finish_access(dev); | ||
1034 | } | ||
1035 | |||
1036 | /* RAMHT */ | ||
1037 | if (dev_priv->card_type < NV_50) { | ||
1038 | ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht, | ||
1039 | &chan->ramht); | ||
1040 | if (ret) | ||
1041 | return ret; | ||
1042 | } else { | ||
1043 | ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, | ||
1044 | 0x8000, 16, | ||
1045 | NVOBJ_FLAG_ZERO_ALLOC, | ||
1046 | &chan->ramht); | ||
1047 | if (ret) | ||
1048 | return ret; | ||
1049 | } | ||
1050 | |||
1051 | /* VRAM ctxdma */ | ||
1052 | if (dev_priv->card_type >= NV_50) { | ||
1053 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
1054 | 0, dev_priv->vm_end, | ||
1055 | NV_DMA_ACCESS_RW, | ||
1056 | NV_DMA_TARGET_AGP, &vram); | ||
1057 | if (ret) { | ||
1058 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
1059 | return ret; | ||
1060 | } | ||
1061 | } else { | ||
1062 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, | ||
1063 | 0, dev_priv->fb_available_size, | ||
1064 | NV_DMA_ACCESS_RW, | ||
1065 | NV_DMA_TARGET_VIDMEM, &vram); | ||
1066 | if (ret) { | ||
1067 | NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); | ||
1068 | return ret; | ||
1069 | } | ||
1070 | } | ||
1071 | |||
1072 | ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL); | ||
1073 | if (ret) { | ||
1074 | NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret); | ||
1075 | return ret; | ||
1076 | } | ||
1077 | |||
1078 | /* TT memory ctxdma */ | ||
1079 | if (dev_priv->card_type >= NV_50) { | ||
1080 | tt = vram; | ||
1081 | } else | ||
1082 | if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { | ||
1083 | ret = nouveau_gpuobj_gart_dma_new(chan, 0, | ||
1084 | dev_priv->gart_info.aper_size, | ||
1085 | NV_DMA_ACCESS_RW, &tt, NULL); | ||
1086 | } else { | ||
1087 | NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); | ||
1088 | ret = -EINVAL; | ||
1089 | } | ||
1090 | |||
1091 | if (ret) { | ||
1092 | NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret); | ||
1093 | return ret; | ||
1094 | } | ||
1095 | |||
1096 | ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL); | ||
1097 | if (ret) { | ||
1098 | NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret); | ||
1099 | return ret; | ||
1100 | } | ||
1101 | |||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | void | ||
1106 | nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | ||
1107 | { | ||
1108 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
1109 | struct drm_device *dev = chan->dev; | ||
1110 | struct list_head *entry, *tmp; | ||
1111 | struct nouveau_gpuobj_ref *ref; | ||
1112 | int i; | ||
1113 | |||
1114 | NV_DEBUG(dev, "ch%d\n", chan->id); | ||
1115 | |||
1116 | if (!chan->ramht_refs.next) | ||
1117 | return; | ||
1118 | |||
1119 | list_for_each_safe(entry, tmp, &chan->ramht_refs) { | ||
1120 | ref = list_entry(entry, struct nouveau_gpuobj_ref, list); | ||
1121 | |||
1122 | nouveau_gpuobj_ref_del(dev, &ref); | ||
1123 | } | ||
1124 | |||
1125 | nouveau_gpuobj_ref_del(dev, &chan->ramht); | ||
1126 | |||
1127 | nouveau_gpuobj_del(dev, &chan->vm_pd); | ||
1128 | nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt); | ||
1129 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | ||
1130 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); | ||
1131 | |||
1132 | if (chan->ramin_heap) | ||
1133 | nouveau_mem_takedown(&chan->ramin_heap); | ||
1134 | if (chan->ramin) | ||
1135 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | ||
1136 | |||
1137 | } | ||
1138 | |||
1139 | int | ||
1140 | nouveau_gpuobj_suspend(struct drm_device *dev) | ||
1141 | { | ||
1142 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1143 | struct nouveau_gpuobj *gpuobj; | ||
1144 | int i; | ||
1145 | |||
1146 | if (dev_priv->card_type < NV_50) { | ||
1147 | dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); | ||
1148 | if (!dev_priv->susres.ramin_copy) | ||
1149 | return -ENOMEM; | ||
1150 | |||
1151 | for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) | ||
1152 | dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); | ||
1153 | return 0; | ||
1154 | } | ||
1155 | |||
1156 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | ||
1157 | if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE)) | ||
1158 | continue; | ||
1159 | |||
1160 | gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size); | ||
1161 | if (!gpuobj->im_backing_suspend) { | ||
1162 | nouveau_gpuobj_resume(dev); | ||
1163 | return -ENOMEM; | ||
1164 | } | ||
1165 | |||
1166 | dev_priv->engine.instmem.prepare_access(dev, false); | ||
1167 | for (i = 0; i < gpuobj->im_pramin->size / 4; i++) | ||
1168 | gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); | ||
1169 | dev_priv->engine.instmem.finish_access(dev); | ||
1170 | } | ||
1171 | |||
1172 | return 0; | ||
1173 | } | ||
1174 | |||
1175 | void | ||
1176 | nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) | ||
1177 | { | ||
1178 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1179 | struct nouveau_gpuobj *gpuobj; | ||
1180 | |||
1181 | if (dev_priv->card_type < NV_50) { | ||
1182 | vfree(dev_priv->susres.ramin_copy); | ||
1183 | dev_priv->susres.ramin_copy = NULL; | ||
1184 | return; | ||
1185 | } | ||
1186 | |||
1187 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | ||
1188 | if (!gpuobj->im_backing_suspend) | ||
1189 | continue; | ||
1190 | |||
1191 | vfree(gpuobj->im_backing_suspend); | ||
1192 | gpuobj->im_backing_suspend = NULL; | ||
1193 | } | ||
1194 | } | ||
1195 | |||
1196 | void | ||
1197 | nouveau_gpuobj_resume(struct drm_device *dev) | ||
1198 | { | ||
1199 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1200 | struct nouveau_gpuobj *gpuobj; | ||
1201 | int i; | ||
1202 | |||
1203 | if (dev_priv->card_type < NV_50) { | ||
1204 | for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) | ||
1205 | nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); | ||
1206 | nouveau_gpuobj_suspend_cleanup(dev); | ||
1207 | return; | ||
1208 | } | ||
1209 | |||
1210 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | ||
1211 | if (!gpuobj->im_backing_suspend) | ||
1212 | continue; | ||
1213 | |||
1214 | dev_priv->engine.instmem.prepare_access(dev, true); | ||
1215 | for (i = 0; i < gpuobj->im_pramin->size / 4; i++) | ||
1216 | nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); | ||
1217 | dev_priv->engine.instmem.finish_access(dev); | ||
1218 | } | ||
1219 | |||
1220 | nouveau_gpuobj_suspend_cleanup(dev); | ||
1221 | } | ||
1222 | |||
1223 | int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, | ||
1224 | struct drm_file *file_priv) | ||
1225 | { | ||
1226 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
1227 | struct drm_nouveau_grobj_alloc *init = data; | ||
1228 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
1229 | struct nouveau_pgraph_object_class *grc; | ||
1230 | struct nouveau_gpuobj *gr = NULL; | ||
1231 | struct nouveau_channel *chan; | ||
1232 | int ret; | ||
1233 | |||
1234 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | ||
1235 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); | ||
1236 | |||
1237 | if (init->handle == ~0) | ||
1238 | return -EINVAL; | ||
1239 | |||
1240 | grc = pgraph->grclass; | ||
1241 | while (grc->id) { | ||
1242 | if (grc->id == init->class) | ||
1243 | break; | ||
1244 | grc++; | ||
1245 | } | ||
1246 | |||
1247 | if (!grc->id) { | ||
1248 | NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); | ||
1249 | return -EPERM; | ||
1250 | } | ||
1251 | |||
1252 | if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0) | ||
1253 | return -EEXIST; | ||
1254 | |||
1255 | if (!grc->software) | ||
1256 | ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); | ||
1257 | else | ||
1258 | ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); | ||
1259 | |||
1260 | if (ret) { | ||
1261 | NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", | ||
1262 | ret, init->channel, init->handle); | ||
1263 | return ret; | ||
1264 | } | ||
1265 | |||
1266 | ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL); | ||
1267 | if (ret) { | ||
1268 | NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", | ||
1269 | ret, init->channel, init->handle); | ||
1270 | nouveau_gpuobj_del(dev, &gr); | ||
1271 | return ret; | ||
1272 | } | ||
1273 | |||
1274 | return 0; | ||
1275 | } | ||
1276 | |||
1277 | int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, | ||
1278 | struct drm_file *file_priv) | ||
1279 | { | ||
1280 | struct drm_nouveau_gpuobj_free *objfree = data; | ||
1281 | struct nouveau_gpuobj_ref *ref; | ||
1282 | struct nouveau_channel *chan; | ||
1283 | int ret; | ||
1284 | |||
1285 | NOUVEAU_CHECK_INITIALISED_WITH_RETURN; | ||
1286 | NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); | ||
1287 | |||
1288 | ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); | ||
1289 | if (ret) | ||
1290 | return ret; | ||
1291 | nouveau_gpuobj_ref_del(dev, &ref); | ||
1292 | |||
1293 | return 0; | ||
1294 | } | ||