diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 582 |
1 files changed, 232 insertions, 350 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f056dadc5c2..122774742bd5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -30,104 +30,67 @@ | |||
30 | * Dave Airlie | 30 | * Dave Airlie |
31 | */ | 31 | */ |
32 | #include <linux/list.h> | 32 | #include <linux/list.h> |
33 | #include <linux/slab.h> | ||
33 | #include <drm/drmP.h> | 34 | #include <drm/drmP.h> |
34 | #include "radeon_drm.h" | 35 | #include "radeon_drm.h" |
35 | #include "radeon.h" | 36 | #include "radeon.h" |
36 | 37 | ||
37 | struct radeon_object { | ||
38 | struct ttm_buffer_object tobj; | ||
39 | struct list_head list; | ||
40 | struct radeon_device *rdev; | ||
41 | struct drm_gem_object *gobj; | ||
42 | struct ttm_bo_kmap_obj kmap; | ||
43 | unsigned pin_count; | ||
44 | uint64_t gpu_addr; | ||
45 | void *kptr; | ||
46 | bool is_iomem; | ||
47 | uint32_t tiling_flags; | ||
48 | uint32_t pitch; | ||
49 | int surface_reg; | ||
50 | }; | ||
51 | 38 | ||
52 | int radeon_ttm_init(struct radeon_device *rdev); | 39 | int radeon_ttm_init(struct radeon_device *rdev); |
53 | void radeon_ttm_fini(struct radeon_device *rdev); | 40 | void radeon_ttm_fini(struct radeon_device *rdev); |
41 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); | ||
54 | 42 | ||
55 | /* | 43 | /* |
56 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | 44 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
57 | * function are calling it. | 45 | * function are calling it. |
58 | */ | 46 | */ |
59 | 47 | ||
60 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) | 48 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
61 | { | 49 | { |
62 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); | 50 | struct radeon_bo *bo; |
63 | } | ||
64 | 51 | ||
65 | static void radeon_object_unreserve(struct radeon_object *robj) | 52 | bo = container_of(tbo, struct radeon_bo, tbo); |
66 | { | 53 | mutex_lock(&bo->rdev->gem.mutex); |
67 | ttm_bo_unreserve(&robj->tobj); | 54 | list_del_init(&bo->list); |
55 | mutex_unlock(&bo->rdev->gem.mutex); | ||
56 | radeon_bo_clear_surface_reg(bo); | ||
57 | kfree(bo); | ||
68 | } | 58 | } |
69 | 59 | ||
70 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | 60 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
71 | { | 61 | { |
72 | struct radeon_object *robj; | 62 | if (bo->destroy == &radeon_ttm_bo_destroy) |
73 | 63 | return true; | |
74 | robj = container_of(tobj, struct radeon_object, tobj); | 64 | return false; |
75 | list_del_init(&robj->list); | ||
76 | radeon_object_clear_surface_reg(robj); | ||
77 | kfree(robj); | ||
78 | } | 65 | } |
79 | 66 | ||
80 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) | 67 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
81 | { | 68 | { |
82 | /* Default gpu address */ | 69 | u32 c = 0; |
83 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | ||
84 | if (robj->tobj.mem.mm_node == NULL) { | ||
85 | return; | ||
86 | } | ||
87 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; | ||
88 | switch (robj->tobj.mem.mem_type) { | ||
89 | case TTM_PL_VRAM: | ||
90 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; | ||
91 | break; | ||
92 | case TTM_PL_TT: | ||
93 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; | ||
94 | break; | ||
95 | default: | ||
96 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); | ||
97 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | ||
98 | return; | ||
99 | } | ||
100 | } | ||
101 | 70 | ||
102 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | 71 | rbo->placement.fpfn = 0; |
103 | { | 72 | rbo->placement.lpfn = 0; |
104 | uint32_t flags = 0; | 73 | rbo->placement.placement = rbo->placements; |
105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 74 | rbo->placement.busy_placement = rbo->placements; |
106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; | 75 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
107 | } | 76 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
108 | if (domain & RADEON_GEM_DOMAIN_GTT) { | 77 | TTM_PL_FLAG_VRAM; |
109 | flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | 78 | if (domain & RADEON_GEM_DOMAIN_GTT) |
110 | } | 79 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
111 | if (domain & RADEON_GEM_DOMAIN_CPU) { | 80 | if (domain & RADEON_GEM_DOMAIN_CPU) |
112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; | 81 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
113 | } | 82 | if (!c) |
114 | if (!flags) { | 83 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
115 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; | 84 | rbo->placement.num_placement = c; |
116 | } | 85 | rbo->placement.num_busy_placement = c; |
117 | return flags; | ||
118 | } | 86 | } |
119 | 87 | ||
120 | int radeon_object_create(struct radeon_device *rdev, | 88 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, |
121 | struct drm_gem_object *gobj, | 89 | unsigned long size, bool kernel, u32 domain, |
122 | unsigned long size, | 90 | struct radeon_bo **bo_ptr) |
123 | bool kernel, | ||
124 | uint32_t domain, | ||
125 | bool interruptible, | ||
126 | struct radeon_object **robj_ptr) | ||
127 | { | 91 | { |
128 | struct radeon_object *robj; | 92 | struct radeon_bo *bo; |
129 | enum ttm_bo_type type; | 93 | enum ttm_bo_type type; |
130 | uint32_t flags; | ||
131 | int r; | 94 | int r; |
132 | 95 | ||
133 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 96 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
@@ -138,238 +101,164 @@ int radeon_object_create(struct radeon_device *rdev, | |||
138 | } else { | 101 | } else { |
139 | type = ttm_bo_type_device; | 102 | type = ttm_bo_type_device; |
140 | } | 103 | } |
141 | *robj_ptr = NULL; | 104 | *bo_ptr = NULL; |
142 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); | 105 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
143 | if (robj == NULL) { | 106 | if (bo == NULL) |
144 | return -ENOMEM; | 107 | return -ENOMEM; |
145 | } | 108 | bo->rdev = rdev; |
146 | robj->rdev = rdev; | 109 | bo->gobj = gobj; |
147 | robj->gobj = gobj; | 110 | bo->surface_reg = -1; |
148 | robj->surface_reg = -1; | 111 | INIT_LIST_HEAD(&bo->list); |
149 | INIT_LIST_HEAD(&robj->list); | 112 | |
150 | 113 | radeon_ttm_placement_from_domain(bo, domain); | |
151 | flags = radeon_object_flags_from_domain(domain); | 114 | /* Kernel allocation are uninterruptible */ |
152 | r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, | 115 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
153 | 0, 0, false, NULL, size, | 116 | &bo->placement, 0, 0, !kernel, NULL, size, |
154 | &radeon_ttm_object_object_destroy); | 117 | &radeon_ttm_bo_destroy); |
155 | if (unlikely(r != 0)) { | 118 | if (unlikely(r != 0)) { |
156 | /* ttm call radeon_ttm_object_object_destroy if error happen */ | 119 | if (r != -ERESTARTSYS) |
157 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", | 120 | dev_err(rdev->dev, |
158 | size, flags, 0); | 121 | "object_init failed for (%lu, 0x%08X)\n", |
122 | size, domain); | ||
159 | return r; | 123 | return r; |
160 | } | 124 | } |
161 | *robj_ptr = robj; | 125 | *bo_ptr = bo; |
162 | if (gobj) { | 126 | if (gobj) { |
163 | list_add_tail(&robj->list, &rdev->gem.objects); | 127 | mutex_lock(&bo->rdev->gem.mutex); |
128 | list_add_tail(&bo->list, &rdev->gem.objects); | ||
129 | mutex_unlock(&bo->rdev->gem.mutex); | ||
164 | } | 130 | } |
165 | return 0; | 131 | return 0; |
166 | } | 132 | } |
167 | 133 | ||
168 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) | 134 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
169 | { | 135 | { |
136 | bool is_iomem; | ||
170 | int r; | 137 | int r; |
171 | 138 | ||
172 | spin_lock(&robj->tobj.lock); | 139 | if (bo->kptr) { |
173 | if (robj->kptr) { | ||
174 | if (ptr) { | 140 | if (ptr) { |
175 | *ptr = robj->kptr; | 141 | *ptr = bo->kptr; |
176 | } | 142 | } |
177 | spin_unlock(&robj->tobj.lock); | ||
178 | return 0; | 143 | return 0; |
179 | } | 144 | } |
180 | spin_unlock(&robj->tobj.lock); | 145 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
181 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); | ||
182 | if (r) { | 146 | if (r) { |
183 | return r; | 147 | return r; |
184 | } | 148 | } |
185 | spin_lock(&robj->tobj.lock); | 149 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
186 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); | ||
187 | spin_unlock(&robj->tobj.lock); | ||
188 | if (ptr) { | 150 | if (ptr) { |
189 | *ptr = robj->kptr; | 151 | *ptr = bo->kptr; |
190 | } | 152 | } |
191 | radeon_object_check_tiling(robj, 0, 0); | 153 | radeon_bo_check_tiling(bo, 0, 0); |
192 | return 0; | 154 | return 0; |
193 | } | 155 | } |
194 | 156 | ||
195 | void radeon_object_kunmap(struct radeon_object *robj) | 157 | void radeon_bo_kunmap(struct radeon_bo *bo) |
196 | { | 158 | { |
197 | spin_lock(&robj->tobj.lock); | 159 | if (bo->kptr == NULL) |
198 | if (robj->kptr == NULL) { | ||
199 | spin_unlock(&robj->tobj.lock); | ||
200 | return; | 160 | return; |
201 | } | 161 | bo->kptr = NULL; |
202 | robj->kptr = NULL; | 162 | radeon_bo_check_tiling(bo, 0, 0); |
203 | spin_unlock(&robj->tobj.lock); | 163 | ttm_bo_kunmap(&bo->kmap); |
204 | radeon_object_check_tiling(robj, 0, 0); | ||
205 | ttm_bo_kunmap(&robj->kmap); | ||
206 | } | 164 | } |
207 | 165 | ||
208 | void radeon_object_unref(struct radeon_object **robj) | 166 | void radeon_bo_unref(struct radeon_bo **bo) |
209 | { | 167 | { |
210 | struct ttm_buffer_object *tobj; | 168 | struct ttm_buffer_object *tbo; |
211 | 169 | ||
212 | if ((*robj) == NULL) { | 170 | if ((*bo) == NULL) |
213 | return; | 171 | return; |
214 | } | 172 | tbo = &((*bo)->tbo); |
215 | tobj = &((*robj)->tobj); | 173 | ttm_bo_unref(&tbo); |
216 | ttm_bo_unref(&tobj); | 174 | if (tbo == NULL) |
217 | if (tobj == NULL) { | 175 | *bo = NULL; |
218 | *robj = NULL; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) | ||
223 | { | ||
224 | *offset = robj->tobj.addr_space_offset; | ||
225 | return 0; | ||
226 | } | 176 | } |
227 | 177 | ||
228 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | 178 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
229 | uint64_t *gpu_addr) | ||
230 | { | 179 | { |
231 | uint32_t flags; | 180 | int r, i; |
232 | uint32_t tmp; | ||
233 | int r; | ||
234 | 181 | ||
235 | flags = radeon_object_flags_from_domain(domain); | 182 | if (bo->pin_count) { |
236 | spin_lock(&robj->tobj.lock); | 183 | bo->pin_count++; |
237 | if (robj->pin_count) { | 184 | if (gpu_addr) |
238 | robj->pin_count++; | 185 | *gpu_addr = radeon_bo_gpu_offset(bo); |
239 | if (gpu_addr != NULL) { | ||
240 | *gpu_addr = robj->gpu_addr; | ||
241 | } | ||
242 | spin_unlock(&robj->tobj.lock); | ||
243 | return 0; | 186 | return 0; |
244 | } | 187 | } |
245 | spin_unlock(&robj->tobj.lock); | 188 | radeon_ttm_placement_from_domain(bo, domain); |
246 | r = radeon_object_reserve(robj, false); | 189 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
247 | if (unlikely(r != 0)) { | 190 | /* force to pin into visible video ram */ |
248 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); | 191 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
249 | return r; | 192 | } |
250 | } | 193 | for (i = 0; i < bo->placement.num_placement; i++) |
251 | tmp = robj->tobj.mem.placement; | 194 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
252 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); | 195 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
253 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; | 196 | if (likely(r == 0)) { |
254 | r = ttm_buffer_object_validate(&robj->tobj, | 197 | bo->pin_count = 1; |
255 | robj->tobj.proposed_placement, | 198 | if (gpu_addr != NULL) |
256 | false, false); | 199 | *gpu_addr = radeon_bo_gpu_offset(bo); |
257 | radeon_object_gpu_addr(robj); | 200 | } |
258 | if (gpu_addr != NULL) { | 201 | if (unlikely(r != 0)) |
259 | *gpu_addr = robj->gpu_addr; | 202 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
260 | } | ||
261 | robj->pin_count = 1; | ||
262 | if (unlikely(r != 0)) { | ||
263 | DRM_ERROR("radeon: failed to pin object.\n"); | ||
264 | } | ||
265 | radeon_object_unreserve(robj); | ||
266 | return r; | 203 | return r; |
267 | } | 204 | } |
268 | 205 | ||
269 | void radeon_object_unpin(struct radeon_object *robj) | 206 | int radeon_bo_unpin(struct radeon_bo *bo) |
270 | { | 207 | { |
271 | uint32_t flags; | 208 | int r, i; |
272 | int r; | ||
273 | 209 | ||
274 | spin_lock(&robj->tobj.lock); | 210 | if (!bo->pin_count) { |
275 | if (!robj->pin_count) { | 211 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); |
276 | spin_unlock(&robj->tobj.lock); | 212 | return 0; |
277 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); | ||
278 | return; | ||
279 | } | ||
280 | robj->pin_count--; | ||
281 | if (robj->pin_count) { | ||
282 | spin_unlock(&robj->tobj.lock); | ||
283 | return; | ||
284 | } | ||
285 | spin_unlock(&robj->tobj.lock); | ||
286 | r = radeon_object_reserve(robj, false); | ||
287 | if (unlikely(r != 0)) { | ||
288 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); | ||
289 | return; | ||
290 | } | ||
291 | flags = robj->tobj.mem.placement; | ||
292 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; | ||
293 | r = ttm_buffer_object_validate(&robj->tobj, | ||
294 | robj->tobj.proposed_placement, | ||
295 | false, false); | ||
296 | if (unlikely(r != 0)) { | ||
297 | DRM_ERROR("radeon: failed to unpin buffer.\n"); | ||
298 | } | ||
299 | radeon_object_unreserve(robj); | ||
300 | } | ||
301 | |||
302 | int radeon_object_wait(struct radeon_object *robj) | ||
303 | { | ||
304 | int r = 0; | ||
305 | |||
306 | /* FIXME: should use block reservation instead */ | ||
307 | r = radeon_object_reserve(robj, true); | ||
308 | if (unlikely(r != 0)) { | ||
309 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | ||
310 | return r; | ||
311 | } | ||
312 | spin_lock(&robj->tobj.lock); | ||
313 | if (robj->tobj.sync_obj) { | ||
314 | r = ttm_bo_wait(&robj->tobj, true, true, false); | ||
315 | } | ||
316 | spin_unlock(&robj->tobj.lock); | ||
317 | radeon_object_unreserve(robj); | ||
318 | return r; | ||
319 | } | ||
320 | |||
321 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) | ||
322 | { | ||
323 | int r = 0; | ||
324 | |||
325 | r = radeon_object_reserve(robj, true); | ||
326 | if (unlikely(r != 0)) { | ||
327 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | ||
328 | return r; | ||
329 | } | ||
330 | spin_lock(&robj->tobj.lock); | ||
331 | *cur_placement = robj->tobj.mem.mem_type; | ||
332 | if (robj->tobj.sync_obj) { | ||
333 | r = ttm_bo_wait(&robj->tobj, true, true, true); | ||
334 | } | 213 | } |
335 | spin_unlock(&robj->tobj.lock); | 214 | bo->pin_count--; |
336 | radeon_object_unreserve(robj); | 215 | if (bo->pin_count) |
216 | return 0; | ||
217 | for (i = 0; i < bo->placement.num_placement; i++) | ||
218 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | ||
219 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
220 | if (unlikely(r != 0)) | ||
221 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); | ||
337 | return r; | 222 | return r; |
338 | } | 223 | } |
339 | 224 | ||
340 | int radeon_object_evict_vram(struct radeon_device *rdev) | 225 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
341 | { | 226 | { |
342 | if (rdev->flags & RADEON_IS_IGP) { | 227 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
343 | /* Useless to evict on IGP chips */ | 228 | if (0 && (rdev->flags & RADEON_IS_IGP)) { |
344 | return 0; | 229 | if (rdev->mc.igp_sideport_enabled == false) |
230 | /* Useless to evict on IGP chips */ | ||
231 | return 0; | ||
345 | } | 232 | } |
346 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 233 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
347 | } | 234 | } |
348 | 235 | ||
349 | void radeon_object_force_delete(struct radeon_device *rdev) | 236 | void radeon_bo_force_delete(struct radeon_device *rdev) |
350 | { | 237 | { |
351 | struct radeon_object *robj, *n; | 238 | struct radeon_bo *bo, *n; |
352 | struct drm_gem_object *gobj; | 239 | struct drm_gem_object *gobj; |
353 | 240 | ||
354 | if (list_empty(&rdev->gem.objects)) { | 241 | if (list_empty(&rdev->gem.objects)) { |
355 | return; | 242 | return; |
356 | } | 243 | } |
357 | DRM_ERROR("Userspace still has active objects !\n"); | 244 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
358 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { | 245 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
359 | mutex_lock(&rdev->ddev->struct_mutex); | 246 | mutex_lock(&rdev->ddev->struct_mutex); |
360 | gobj = robj->gobj; | 247 | gobj = bo->gobj; |
361 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", | 248 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
362 | gobj, robj, (unsigned long)gobj->size, | 249 | gobj, bo, (unsigned long)gobj->size, |
363 | *((unsigned long *)&gobj->refcount)); | 250 | *((unsigned long *)&gobj->refcount)); |
364 | list_del_init(&robj->list); | 251 | mutex_lock(&bo->rdev->gem.mutex); |
365 | radeon_object_unref(&robj); | 252 | list_del_init(&bo->list); |
253 | mutex_unlock(&bo->rdev->gem.mutex); | ||
254 | radeon_bo_unref(&bo); | ||
366 | gobj->driver_private = NULL; | 255 | gobj->driver_private = NULL; |
367 | drm_gem_object_unreference(gobj); | 256 | drm_gem_object_unreference(gobj); |
368 | mutex_unlock(&rdev->ddev->struct_mutex); | 257 | mutex_unlock(&rdev->ddev->struct_mutex); |
369 | } | 258 | } |
370 | } | 259 | } |
371 | 260 | ||
372 | int radeon_object_init(struct radeon_device *rdev) | 261 | int radeon_bo_init(struct radeon_device *rdev) |
373 | { | 262 | { |
374 | /* Add an MTRR for the VRAM */ | 263 | /* Add an MTRR for the VRAM */ |
375 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 264 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
@@ -382,13 +271,13 @@ int radeon_object_init(struct radeon_device *rdev) | |||
382 | return radeon_ttm_init(rdev); | 271 | return radeon_ttm_init(rdev); |
383 | } | 272 | } |
384 | 273 | ||
385 | void radeon_object_fini(struct radeon_device *rdev) | 274 | void radeon_bo_fini(struct radeon_device *rdev) |
386 | { | 275 | { |
387 | radeon_ttm_fini(rdev); | 276 | radeon_ttm_fini(rdev); |
388 | } | 277 | } |
389 | 278 | ||
390 | void radeon_object_list_add_object(struct radeon_object_list *lobj, | 279 | void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
391 | struct list_head *head) | 280 | struct list_head *head) |
392 | { | 281 | { |
393 | if (lobj->wdomain) { | 282 | if (lobj->wdomain) { |
394 | list_add(&lobj->list, head); | 283 | list_add(&lobj->list, head); |
@@ -397,125 +286,102 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, | |||
397 | } | 286 | } |
398 | } | 287 | } |
399 | 288 | ||
400 | int radeon_object_list_reserve(struct list_head *head) | 289 | int radeon_bo_list_reserve(struct list_head *head) |
401 | { | 290 | { |
402 | struct radeon_object_list *lobj; | 291 | struct radeon_bo_list *lobj; |
403 | int r; | 292 | int r; |
404 | 293 | ||
405 | list_for_each_entry(lobj, head, list){ | 294 | list_for_each_entry(lobj, head, list){ |
406 | if (!lobj->robj->pin_count) { | 295 | r = radeon_bo_reserve(lobj->bo, false); |
407 | r = radeon_object_reserve(lobj->robj, true); | 296 | if (unlikely(r != 0)) |
408 | if (unlikely(r != 0)) { | 297 | return r; |
409 | DRM_ERROR("radeon: failed to reserve object.\n"); | ||
410 | return r; | ||
411 | } | ||
412 | } else { | ||
413 | } | ||
414 | } | 298 | } |
415 | return 0; | 299 | return 0; |
416 | } | 300 | } |
417 | 301 | ||
418 | void radeon_object_list_unreserve(struct list_head *head) | 302 | void radeon_bo_list_unreserve(struct list_head *head) |
419 | { | 303 | { |
420 | struct radeon_object_list *lobj; | 304 | struct radeon_bo_list *lobj; |
421 | 305 | ||
422 | list_for_each_entry(lobj, head, list) { | 306 | list_for_each_entry(lobj, head, list) { |
423 | if (!lobj->robj->pin_count) { | 307 | /* only unreserve object we successfully reserved */ |
424 | radeon_object_unreserve(lobj->robj); | 308 | if (radeon_bo_is_reserved(lobj->bo)) |
425 | } | 309 | radeon_bo_unreserve(lobj->bo); |
426 | } | 310 | } |
427 | } | 311 | } |
428 | 312 | ||
429 | int radeon_object_list_validate(struct list_head *head, void *fence) | 313 | int radeon_bo_list_validate(struct list_head *head) |
430 | { | 314 | { |
431 | struct radeon_object_list *lobj; | 315 | struct radeon_bo_list *lobj; |
432 | struct radeon_object *robj; | 316 | struct radeon_bo *bo; |
433 | struct radeon_fence *old_fence = NULL; | ||
434 | int r; | 317 | int r; |
435 | 318 | ||
436 | r = radeon_object_list_reserve(head); | 319 | r = radeon_bo_list_reserve(head); |
437 | if (unlikely(r != 0)) { | 320 | if (unlikely(r != 0)) { |
438 | radeon_object_list_unreserve(head); | ||
439 | return r; | 321 | return r; |
440 | } | 322 | } |
441 | list_for_each_entry(lobj, head, list) { | 323 | list_for_each_entry(lobj, head, list) { |
442 | robj = lobj->robj; | 324 | bo = lobj->bo; |
443 | if (!robj->pin_count) { | 325 | if (!bo->pin_count) { |
444 | if (lobj->wdomain) { | 326 | if (lobj->wdomain) { |
445 | robj->tobj.proposed_placement = | 327 | radeon_ttm_placement_from_domain(bo, |
446 | radeon_object_flags_from_domain(lobj->wdomain); | 328 | lobj->wdomain); |
447 | } else { | 329 | } else { |
448 | robj->tobj.proposed_placement = | 330 | radeon_ttm_placement_from_domain(bo, |
449 | radeon_object_flags_from_domain(lobj->rdomain); | 331 | lobj->rdomain); |
450 | } | 332 | } |
451 | r = ttm_buffer_object_validate(&robj->tobj, | 333 | r = ttm_bo_validate(&bo->tbo, &bo->placement, |
452 | robj->tobj.proposed_placement, | 334 | true, false); |
453 | true, false); | 335 | if (unlikely(r)) |
454 | if (unlikely(r)) { | ||
455 | DRM_ERROR("radeon: failed to validate.\n"); | ||
456 | return r; | 336 | return r; |
457 | } | ||
458 | radeon_object_gpu_addr(robj); | ||
459 | } | ||
460 | lobj->gpu_offset = robj->gpu_addr; | ||
461 | lobj->tiling_flags = robj->tiling_flags; | ||
462 | if (fence) { | ||
463 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | ||
464 | robj->tobj.sync_obj = radeon_fence_ref(fence); | ||
465 | robj->tobj.sync_obj_arg = NULL; | ||
466 | } | ||
467 | if (old_fence) { | ||
468 | radeon_fence_unref(&old_fence); | ||
469 | } | 337 | } |
338 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); | ||
339 | lobj->tiling_flags = bo->tiling_flags; | ||
470 | } | 340 | } |
471 | return 0; | 341 | return 0; |
472 | } | 342 | } |
473 | 343 | ||
474 | void radeon_object_list_unvalidate(struct list_head *head) | 344 | void radeon_bo_list_fence(struct list_head *head, void *fence) |
475 | { | 345 | { |
476 | struct radeon_object_list *lobj; | 346 | struct radeon_bo_list *lobj; |
347 | struct radeon_bo *bo; | ||
477 | struct radeon_fence *old_fence = NULL; | 348 | struct radeon_fence *old_fence = NULL; |
478 | 349 | ||
479 | list_for_each_entry(lobj, head, list) { | 350 | list_for_each_entry(lobj, head, list) { |
480 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | 351 | bo = lobj->bo; |
481 | lobj->robj->tobj.sync_obj = NULL; | 352 | spin_lock(&bo->tbo.lock); |
353 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; | ||
354 | bo->tbo.sync_obj = radeon_fence_ref(fence); | ||
355 | bo->tbo.sync_obj_arg = NULL; | ||
356 | spin_unlock(&bo->tbo.lock); | ||
482 | if (old_fence) { | 357 | if (old_fence) { |
483 | radeon_fence_unref(&old_fence); | 358 | radeon_fence_unref(&old_fence); |
484 | } | 359 | } |
485 | } | 360 | } |
486 | radeon_object_list_unreserve(head); | ||
487 | } | 361 | } |
488 | 362 | ||
489 | void radeon_object_list_clean(struct list_head *head) | 363 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
490 | { | ||
491 | radeon_object_list_unreserve(head); | ||
492 | } | ||
493 | |||
494 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | ||
495 | struct vm_area_struct *vma) | 364 | struct vm_area_struct *vma) |
496 | { | 365 | { |
497 | return ttm_fbdev_mmap(vma, &robj->tobj); | 366 | return ttm_fbdev_mmap(vma, &bo->tbo); |
498 | } | ||
499 | |||
500 | unsigned long radeon_object_size(struct radeon_object *robj) | ||
501 | { | ||
502 | return robj->tobj.num_pages << PAGE_SHIFT; | ||
503 | } | 367 | } |
504 | 368 | ||
505 | int radeon_object_get_surface_reg(struct radeon_object *robj) | 369 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
506 | { | 370 | { |
507 | struct radeon_device *rdev = robj->rdev; | 371 | struct radeon_device *rdev = bo->rdev; |
508 | struct radeon_surface_reg *reg; | 372 | struct radeon_surface_reg *reg; |
509 | struct radeon_object *old_object; | 373 | struct radeon_bo *old_object; |
510 | int steal; | 374 | int steal; |
511 | int i; | 375 | int i; |
512 | 376 | ||
513 | if (!robj->tiling_flags) | 377 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
378 | |||
379 | if (!bo->tiling_flags) | ||
514 | return 0; | 380 | return 0; |
515 | 381 | ||
516 | if (robj->surface_reg >= 0) { | 382 | if (bo->surface_reg >= 0) { |
517 | reg = &rdev->surface_regs[robj->surface_reg]; | 383 | reg = &rdev->surface_regs[bo->surface_reg]; |
518 | i = robj->surface_reg; | 384 | i = bo->surface_reg; |
519 | goto out; | 385 | goto out; |
520 | } | 386 | } |
521 | 387 | ||
@@ -523,10 +389,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) | |||
523 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | 389 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
524 | 390 | ||
525 | reg = &rdev->surface_regs[i]; | 391 | reg = &rdev->surface_regs[i]; |
526 | if (!reg->robj) | 392 | if (!reg->bo) |
527 | break; | 393 | break; |
528 | 394 | ||
529 | old_object = reg->robj; | 395 | old_object = reg->bo; |
530 | if (old_object->pin_count == 0) | 396 | if (old_object->pin_count == 0) |
531 | steal = i; | 397 | steal = i; |
532 | } | 398 | } |
@@ -537,91 +403,107 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) | |||
537 | return -ENOMEM; | 403 | return -ENOMEM; |
538 | /* find someone with a surface reg and nuke their BO */ | 404 | /* find someone with a surface reg and nuke their BO */ |
539 | reg = &rdev->surface_regs[steal]; | 405 | reg = &rdev->surface_regs[steal]; |
540 | old_object = reg->robj; | 406 | old_object = reg->bo; |
541 | /* blow away the mapping */ | 407 | /* blow away the mapping */ |
542 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | 408 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); |
543 | ttm_bo_unmap_virtual(&old_object->tobj); | 409 | ttm_bo_unmap_virtual(&old_object->tbo); |
544 | old_object->surface_reg = -1; | 410 | old_object->surface_reg = -1; |
545 | i = steal; | 411 | i = steal; |
546 | } | 412 | } |
547 | 413 | ||
548 | robj->surface_reg = i; | 414 | bo->surface_reg = i; |
549 | reg->robj = robj; | 415 | reg->bo = bo; |
550 | 416 | ||
551 | out: | 417 | out: |
552 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | 418 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
553 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | 419 | bo->tbo.mem.mm_node->start << PAGE_SHIFT, |
554 | robj->tobj.num_pages << PAGE_SHIFT); | 420 | bo->tbo.num_pages << PAGE_SHIFT); |
555 | return 0; | 421 | return 0; |
556 | } | 422 | } |
557 | 423 | ||
558 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | 424 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
559 | { | 425 | { |
560 | struct radeon_device *rdev = robj->rdev; | 426 | struct radeon_device *rdev = bo->rdev; |
561 | struct radeon_surface_reg *reg; | 427 | struct radeon_surface_reg *reg; |
562 | 428 | ||
563 | if (robj->surface_reg == -1) | 429 | if (bo->surface_reg == -1) |
564 | return; | 430 | return; |
565 | 431 | ||
566 | reg = &rdev->surface_regs[robj->surface_reg]; | 432 | reg = &rdev->surface_regs[bo->surface_reg]; |
567 | radeon_clear_surface_reg(rdev, robj->surface_reg); | 433 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
568 | 434 | ||
569 | reg->robj = NULL; | 435 | reg->bo = NULL; |
570 | robj->surface_reg = -1; | 436 | bo->surface_reg = -1; |
571 | } | 437 | } |
572 | 438 | ||
573 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | 439 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
574 | uint32_t tiling_flags, uint32_t pitch) | 440 | uint32_t tiling_flags, uint32_t pitch) |
575 | { | 441 | { |
576 | robj->tiling_flags = tiling_flags; | 442 | int r; |
577 | robj->pitch = pitch; | 443 | |
444 | r = radeon_bo_reserve(bo, false); | ||
445 | if (unlikely(r != 0)) | ||
446 | return r; | ||
447 | bo->tiling_flags = tiling_flags; | ||
448 | bo->pitch = pitch; | ||
449 | radeon_bo_unreserve(bo); | ||
450 | return 0; | ||
578 | } | 451 | } |
579 | 452 | ||
580 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | 453 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
581 | uint32_t *tiling_flags, | 454 | uint32_t *tiling_flags, |
582 | uint32_t *pitch) | 455 | uint32_t *pitch) |
583 | { | 456 | { |
457 | BUG_ON(!atomic_read(&bo->tbo.reserved)); | ||
584 | if (tiling_flags) | 458 | if (tiling_flags) |
585 | *tiling_flags = robj->tiling_flags; | 459 | *tiling_flags = bo->tiling_flags; |
586 | if (pitch) | 460 | if (pitch) |
587 | *pitch = robj->pitch; | 461 | *pitch = bo->pitch; |
588 | } | 462 | } |
589 | 463 | ||
590 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | 464 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
591 | bool force_drop) | 465 | bool force_drop) |
592 | { | 466 | { |
593 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | 467 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
468 | |||
469 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | ||
594 | return 0; | 470 | return 0; |
595 | 471 | ||
596 | if (force_drop) { | 472 | if (force_drop) { |
597 | radeon_object_clear_surface_reg(robj); | 473 | radeon_bo_clear_surface_reg(bo); |
598 | return 0; | 474 | return 0; |
599 | } | 475 | } |
600 | 476 | ||
601 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | 477 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
602 | if (!has_moved) | 478 | if (!has_moved) |
603 | return 0; | 479 | return 0; |
604 | 480 | ||
605 | if (robj->surface_reg >= 0) | 481 | if (bo->surface_reg >= 0) |
606 | radeon_object_clear_surface_reg(robj); | 482 | radeon_bo_clear_surface_reg(bo); |
607 | return 0; | 483 | return 0; |
608 | } | 484 | } |
609 | 485 | ||
610 | if ((robj->surface_reg >= 0) && !has_moved) | 486 | if ((bo->surface_reg >= 0) && !has_moved) |
611 | return 0; | 487 | return 0; |
612 | 488 | ||
613 | return radeon_object_get_surface_reg(robj); | 489 | return radeon_bo_get_surface_reg(bo); |
614 | } | 490 | } |
615 | 491 | ||
616 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | 492 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
617 | struct ttm_mem_reg *mem) | 493 | struct ttm_mem_reg *mem) |
618 | { | 494 | { |
619 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | 495 | struct radeon_bo *rbo; |
620 | radeon_object_check_tiling(robj, 0, 1); | 496 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
497 | return; | ||
498 | rbo = container_of(bo, struct radeon_bo, tbo); | ||
499 | radeon_bo_check_tiling(rbo, 0, 1); | ||
621 | } | 500 | } |
622 | 501 | ||
623 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 502 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
624 | { | 503 | { |
625 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | 504 | struct radeon_bo *rbo; |
626 | radeon_object_check_tiling(robj, 0, 0); | 505 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
506 | return; | ||
507 | rbo = container_of(bo, struct radeon_bo, tbo); | ||
508 | radeon_bo_check_tiling(rbo, 0, 0); | ||
627 | } | 509 | } |