aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_object.c
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2009-06-05 08:42:42 -0400
committerDave Airlie <airlied@redhat.com>2009-06-14 22:01:53 -0400
commit771fe6b912fca54f03e8a72eb63058b582775362 (patch)
tree58aa5469ba8058c2b564d50807395ad6cd7bd7e4 /drivers/gpu/drm/radeon/radeon_object.c
parentba4e7d973dd09b66912ac4c0856add8b0703a997 (diff)
drm/radeon: introduce kernel modesetting for radeon hardware
Add kernel modesetting support to radeon driver, use the ttm memory manager to manage memory and DRM/GEM to provide userspace API. In order to avoid backward compatibility issue and to allow clean design and code the radeon kernel modesetting use different code path than old radeon/drm driver. When kernel modesetting is enabled the IOCTL of radeon/drm driver are considered as invalid and an error message is printed in the log and they return failure. KMS enabled userspace will use new API to talk with the radeon/drm driver. The new API provide functions to create/destroy/share/mmap buffer object which are then managed by the kernel memory manager (here TTM). In order to submit command to the GPU the userspace provide a buffer holding the command stream, along this buffer userspace have to provide a list of buffer object used by the command stream. The kernel radeon driver will then place buffer in GPU accessible memory and will update command stream to reflect the position of the different buffers. The kernel will also perform security check on command stream provided by the user, we want to catch and forbid any illegal use of the GPU such as DMA into random system memory or into memory not owned by the process supplying the command stream. This part of the code is still incomplete and this why we propose that patch as a staging driver addition, future security might forbid current experimental userspace to run. This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX (radeon up to X1950). Works is underway to provide support for R6XX, R7XX and newer hardware (radeon from HD2XXX to HD4XXX). Authors: Jerome Glisse <jglisse@redhat.com> Dave Airlie <airlied@redhat.com> Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c511
1 files changed, 511 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
new file mode 100644
index 000000000000..983e8df5e000
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -0,0 +1,511 @@
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
33#include <drm/drmP.h>
34#include "radeon_drm.h"
35#include "radeon.h"
36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47};
48
49int radeon_ttm_init(struct radeon_device *rdev);
50void radeon_ttm_fini(struct radeon_device *rdev);
51
52/*
53 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
54 * function are calling it.
55 */
56
57static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
58{
59 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
60}
61
62static void radeon_object_unreserve(struct radeon_object *robj)
63{
64 ttm_bo_unreserve(&robj->tobj);
65}
66
67static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
68{
69 struct radeon_object *robj;
70
71 robj = container_of(tobj, struct radeon_object, tobj);
72 list_del_init(&robj->list);
73 kfree(robj);
74}
75
76static inline void radeon_object_gpu_addr(struct radeon_object *robj)
77{
78 /* Default gpu address */
79 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
80 if (robj->tobj.mem.mm_node == NULL) {
81 return;
82 }
83 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
84 switch (robj->tobj.mem.mem_type) {
85 case TTM_PL_VRAM:
86 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
87 break;
88 case TTM_PL_TT:
89 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
90 break;
91 default:
92 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
93 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
94 return;
95 }
96}
97
98static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
99{
100 uint32_t flags = 0;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 flags |= TTM_PL_FLAG_VRAM;
103 }
104 if (domain & RADEON_GEM_DOMAIN_GTT) {
105 flags |= TTM_PL_FLAG_TT;
106 }
107 if (domain & RADEON_GEM_DOMAIN_CPU) {
108 flags |= TTM_PL_FLAG_SYSTEM;
109 }
110 if (!flags) {
111 flags |= TTM_PL_FLAG_SYSTEM;
112 }
113 return flags;
114}
115
116int radeon_object_create(struct radeon_device *rdev,
117 struct drm_gem_object *gobj,
118 unsigned long size,
119 bool kernel,
120 uint32_t domain,
121 bool interruptible,
122 struct radeon_object **robj_ptr)
123{
124 struct radeon_object *robj;
125 enum ttm_bo_type type;
126 uint32_t flags;
127 int r;
128
129 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
130 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
131 }
132 if (kernel) {
133 type = ttm_bo_type_kernel;
134 } else {
135 type = ttm_bo_type_device;
136 }
137 *robj_ptr = NULL;
138 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
139 if (robj == NULL) {
140 return -ENOMEM;
141 }
142 robj->rdev = rdev;
143 robj->gobj = gobj;
144 INIT_LIST_HEAD(&robj->list);
145
146 flags = radeon_object_flags_from_domain(domain);
147 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
148 0, 0, false, NULL, size,
149 &radeon_ttm_object_object_destroy);
150 if (unlikely(r != 0)) {
151 /* ttm call radeon_ttm_object_object_destroy if error happen */
152 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
153 size, flags, 0);
154 return r;
155 }
156 *robj_ptr = robj;
157 if (gobj) {
158 list_add_tail(&robj->list, &rdev->gem.objects);
159 }
160 return 0;
161}
162
163int radeon_object_kmap(struct radeon_object *robj, void **ptr)
164{
165 int r;
166
167 spin_lock(&robj->tobj.lock);
168 if (robj->kptr) {
169 if (ptr) {
170 *ptr = robj->kptr;
171 }
172 spin_unlock(&robj->tobj.lock);
173 return 0;
174 }
175 spin_unlock(&robj->tobj.lock);
176 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
177 if (r) {
178 return r;
179 }
180 spin_lock(&robj->tobj.lock);
181 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
182 spin_unlock(&robj->tobj.lock);
183 if (ptr) {
184 *ptr = robj->kptr;
185 }
186 return 0;
187}
188
189void radeon_object_kunmap(struct radeon_object *robj)
190{
191 spin_lock(&robj->tobj.lock);
192 if (robj->kptr == NULL) {
193 spin_unlock(&robj->tobj.lock);
194 return;
195 }
196 robj->kptr = NULL;
197 spin_unlock(&robj->tobj.lock);
198 ttm_bo_kunmap(&robj->kmap);
199}
200
201void radeon_object_unref(struct radeon_object **robj)
202{
203 struct ttm_buffer_object *tobj;
204
205 if ((*robj) == NULL) {
206 return;
207 }
208 tobj = &((*robj)->tobj);
209 ttm_bo_unref(&tobj);
210 if (tobj == NULL) {
211 *robj = NULL;
212 }
213}
214
215int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
216{
217 *offset = robj->tobj.addr_space_offset;
218 return 0;
219}
220
221int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
222 uint64_t *gpu_addr)
223{
224 uint32_t flags;
225 uint32_t tmp;
226 void *fbptr;
227 int r;
228
229 flags = radeon_object_flags_from_domain(domain);
230 spin_lock(&robj->tobj.lock);
231 if (robj->pin_count) {
232 robj->pin_count++;
233 if (gpu_addr != NULL) {
234 *gpu_addr = robj->gpu_addr;
235 }
236 spin_unlock(&robj->tobj.lock);
237 return 0;
238 }
239 spin_unlock(&robj->tobj.lock);
240 r = radeon_object_reserve(robj, false);
241 if (unlikely(r != 0)) {
242 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
243 return r;
244 }
245 if (robj->rdev->fbdev_robj == robj) {
246 mutex_lock(&robj->rdev->fbdev_info->lock);
247 radeon_object_kunmap(robj);
248 }
249 tmp = robj->tobj.mem.placement;
250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
252 r = ttm_buffer_object_validate(&robj->tobj,
253 robj->tobj.proposed_placement,
254 false, false);
255 radeon_object_gpu_addr(robj);
256 if (gpu_addr != NULL) {
257 *gpu_addr = robj->gpu_addr;
258 }
259 robj->pin_count = 1;
260 if (unlikely(r != 0)) {
261 DRM_ERROR("radeon: failed to pin object.\n");
262 }
263 radeon_object_unreserve(robj);
264 if (robj->rdev->fbdev_robj == robj) {
265 if (!r) {
266 r = radeon_object_kmap(robj, &fbptr);
267 }
268 if (!r) {
269 robj->rdev->fbdev_info->screen_base = fbptr;
270 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
271 }
272 mutex_unlock(&robj->rdev->fbdev_info->lock);
273 }
274 return r;
275}
276
277void radeon_object_unpin(struct radeon_object *robj)
278{
279 uint32_t flags;
280 void *fbptr;
281 int r;
282
283 spin_lock(&robj->tobj.lock);
284 if (!robj->pin_count) {
285 spin_unlock(&robj->tobj.lock);
286 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
287 return;
288 }
289 robj->pin_count--;
290 if (robj->pin_count) {
291 spin_unlock(&robj->tobj.lock);
292 return;
293 }
294 spin_unlock(&robj->tobj.lock);
295 r = radeon_object_reserve(robj, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
298 return;
299 }
300 if (robj->rdev->fbdev_robj == robj) {
301 mutex_lock(&robj->rdev->fbdev_info->lock);
302 radeon_object_kunmap(robj);
303 }
304 flags = robj->tobj.mem.placement;
305 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
306 r = ttm_buffer_object_validate(&robj->tobj,
307 robj->tobj.proposed_placement,
308 false, false);
309 if (unlikely(r != 0)) {
310 DRM_ERROR("radeon: failed to unpin buffer.\n");
311 }
312 radeon_object_unreserve(robj);
313 if (robj->rdev->fbdev_robj == robj) {
314 if (!r) {
315 r = radeon_object_kmap(robj, &fbptr);
316 }
317 if (!r) {
318 robj->rdev->fbdev_info->screen_base = fbptr;
319 robj->rdev->fbdev_info->fix.smem_start = (unsigned long)fbptr;
320 }
321 mutex_unlock(&robj->rdev->fbdev_info->lock);
322 }
323}
324
325int radeon_object_wait(struct radeon_object *robj)
326{
327 int r = 0;
328
329 /* FIXME: should use block reservation instead */
330 r = radeon_object_reserve(robj, true);
331 if (unlikely(r != 0)) {
332 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
333 return r;
334 }
335 spin_lock(&robj->tobj.lock);
336 if (robj->tobj.sync_obj) {
337 r = ttm_bo_wait(&robj->tobj, true, false, false);
338 }
339 spin_unlock(&robj->tobj.lock);
340 radeon_object_unreserve(robj);
341 return r;
342}
343
344int radeon_object_evict_vram(struct radeon_device *rdev)
345{
346 if (rdev->flags & RADEON_IS_IGP) {
347 /* Useless to evict on IGP chips */
348 return 0;
349 }
350 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
351}
352
353void radeon_object_force_delete(struct radeon_device *rdev)
354{
355 struct radeon_object *robj, *n;
356 struct drm_gem_object *gobj;
357
358 if (list_empty(&rdev->gem.objects)) {
359 return;
360 }
361 DRM_ERROR("Userspace still has active objects !\n");
362 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
363 mutex_lock(&rdev->ddev->struct_mutex);
364 gobj = robj->gobj;
365 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
366 gobj, robj, (unsigned long)gobj->size,
367 *((unsigned long *)&gobj->refcount));
368 list_del_init(&robj->list);
369 radeon_object_unref(&robj);
370 gobj->driver_private = NULL;
371 drm_gem_object_unreference(gobj);
372 mutex_unlock(&rdev->ddev->struct_mutex);
373 }
374}
375
376int radeon_object_init(struct radeon_device *rdev)
377{
378 return radeon_ttm_init(rdev);
379}
380
381void radeon_object_fini(struct radeon_device *rdev)
382{
383 radeon_ttm_fini(rdev);
384}
385
386void radeon_object_list_add_object(struct radeon_object_list *lobj,
387 struct list_head *head)
388{
389 if (lobj->wdomain) {
390 list_add(&lobj->list, head);
391 } else {
392 list_add_tail(&lobj->list, head);
393 }
394}
395
396int radeon_object_list_reserve(struct list_head *head)
397{
398 struct radeon_object_list *lobj;
399 struct list_head *i;
400 int r;
401
402 list_for_each(i, head) {
403 lobj = list_entry(i, struct radeon_object_list, list);
404 if (!lobj->robj->pin_count) {
405 r = radeon_object_reserve(lobj->robj, true);
406 if (unlikely(r != 0)) {
407 DRM_ERROR("radeon: failed to reserve object.\n");
408 return r;
409 }
410 } else {
411 }
412 }
413 return 0;
414}
415
416void radeon_object_list_unreserve(struct list_head *head)
417{
418 struct radeon_object_list *lobj;
419 struct list_head *i;
420
421 list_for_each(i, head) {
422 lobj = list_entry(i, struct radeon_object_list, list);
423 if (!lobj->robj->pin_count) {
424 radeon_object_unreserve(lobj->robj);
425 } else {
426 }
427 }
428}
429
430int radeon_object_list_validate(struct list_head *head, void *fence)
431{
432 struct radeon_object_list *lobj;
433 struct radeon_object *robj;
434 struct radeon_fence *old_fence = NULL;
435 struct list_head *i;
436 uint32_t flags;
437 int r;
438
439 r = radeon_object_list_reserve(head);
440 if (unlikely(r != 0)) {
441 radeon_object_list_unreserve(head);
442 return r;
443 }
444 list_for_each(i, head) {
445 lobj = list_entry(i, struct radeon_object_list, list);
446 robj = lobj->robj;
447 if (lobj->wdomain) {
448 flags = radeon_object_flags_from_domain(lobj->wdomain);
449 flags |= TTM_PL_FLAG_TT;
450 } else {
451 flags = radeon_object_flags_from_domain(lobj->rdomain);
452 flags |= TTM_PL_FLAG_TT;
453 flags |= TTM_PL_FLAG_VRAM;
454 }
455 if (!robj->pin_count) {
456 robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING;
457 r = ttm_buffer_object_validate(&robj->tobj,
458 robj->tobj.proposed_placement,
459 true, false);
460 if (unlikely(r)) {
461 radeon_object_list_unreserve(head);
462 DRM_ERROR("radeon: failed to validate.\n");
463 return r;
464 }
465 radeon_object_gpu_addr(robj);
466 }
467 lobj->gpu_offset = robj->gpu_addr;
468 if (fence) {
469 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
470 robj->tobj.sync_obj = radeon_fence_ref(fence);
471 robj->tobj.sync_obj_arg = NULL;
472 }
473 if (old_fence) {
474 radeon_fence_unref(&old_fence);
475 }
476 }
477 return 0;
478}
479
480void radeon_object_list_unvalidate(struct list_head *head)
481{
482 struct radeon_object_list *lobj;
483 struct radeon_fence *old_fence = NULL;
484 struct list_head *i;
485
486 list_for_each(i, head) {
487 lobj = list_entry(i, struct radeon_object_list, list);
488 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
489 lobj->robj->tobj.sync_obj = NULL;
490 if (old_fence) {
491 radeon_fence_unref(&old_fence);
492 }
493 }
494 radeon_object_list_unreserve(head);
495}
496
497void radeon_object_list_clean(struct list_head *head)
498{
499 radeon_object_list_unreserve(head);
500}
501
502int radeon_object_fbdev_mmap(struct radeon_object *robj,
503 struct vm_area_struct *vma)
504{
505 return ttm_fbdev_mmap(vma, &robj->tobj);
506}
507
508unsigned long radeon_object_size(struct radeon_object *robj)
509{
510 return robj->tobj.num_pages << PAGE_SHIFT;
511}