aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-02-16 05:24:58 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-03-08 11:01:47 -0500
commit257bf15a4b9795f8b352beb6e72a7e3e5aab8d27 (patch)
tree56acfd26f3fc367ad831829001ab5540de6b8942 /drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
parent336d1f5efe93db3d997a6d105760dd613d7ecdce (diff)
drm/amdgpu: add slap cache for sync objects as well
We need them all the time. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c36
1 files changed, 32 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index e3673422aac8..c48b4fce5e57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -37,6 +37,8 @@ struct amdgpu_sync_entry {
37 struct fence *fence; 37 struct fence *fence;
38}; 38};
39 39
40static struct kmem_cache *amdgpu_sync_slab;
41
40/** 42/**
41 * amdgpu_sync_create - zero init sync object 43 * amdgpu_sync_create - zero init sync object
42 * 44 *
@@ -133,7 +135,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
133 return 0; 135 return 0;
134 } 136 }
135 137
136 e = kmalloc(sizeof(struct amdgpu_sync_entry), GFP_KERNEL); 138 e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
137 if (!e) 139 if (!e)
138 return -ENOMEM; 140 return -ENOMEM;
139 141
@@ -214,7 +216,7 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
214 f = e->fence; 216 f = e->fence;
215 217
216 hash_del(&e->node); 218 hash_del(&e->node);
217 kfree(e); 219 kmem_cache_free(amdgpu_sync_slab, e);
218 220
219 if (!fence_is_signaled(f)) 221 if (!fence_is_signaled(f))
220 return f; 222 return f;
@@ -237,7 +239,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
237 239
238 hash_del(&e->node); 240 hash_del(&e->node);
239 fence_put(e->fence); 241 fence_put(e->fence);
240 kfree(e); 242 kmem_cache_free(amdgpu_sync_slab, e);
241 } 243 }
242 244
243 return 0; 245 return 0;
@@ -259,8 +261,34 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
259 hash_for_each_safe(sync->fences, i, tmp, e, node) { 261 hash_for_each_safe(sync->fences, i, tmp, e, node) {
260 hash_del(&e->node); 262 hash_del(&e->node);
261 fence_put(e->fence); 263 fence_put(e->fence);
262 kfree(e); 264 kmem_cache_free(amdgpu_sync_slab, e);
263 } 265 }
264 266
265 fence_put(sync->last_vm_update); 267 fence_put(sync->last_vm_update);
266} 268}
269
270/**
271 * amdgpu_sync_init - init sync object subsystem
272 *
273 * Allocate the slab allocator.
274 */
275int amdgpu_sync_init(void)
276{
277 amdgpu_sync_slab = kmem_cache_create(
278 "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
279 SLAB_HWCACHE_ALIGN, NULL);
280 if (!amdgpu_sync_slab)
281 return -ENOMEM;
282
283 return 0;
284}
285
286/**
287 * amdgpu_sync_fini - fini sync object subsystem
288 *
289 * Free the slab allocator.
290 */
291void amdgpu_sync_fini(void)
292{
293 kmem_cache_destroy(amdgpu_sync_slab);
294}