summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-06-01 09:10:02 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-06-02 02:27:41 -0400
commit76bf0db5543976ef50362db7071da367cb118532 (patch)
treec8f94084239df5d095c80d87d423d44d95081379
parent3377900791ea48a638fb9b70869258332951271d (diff)
dma-buf/fence: make fence context 64 bit v2
Fence contexts are created on the fly (for example) by the GPU scheduler used in the amdgpu driver as a result of an userspace request. Because of this userspace could in theory force a wrap around of the 32bit context number if it doesn't behave well. Avoid this by increasing the context number to 64bits. This way even when userspace manages to allocate a billion contexts per second it takes more than 500 years for the context number to wrap around. v2: fix printf formats as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1464786612-5010-2-git-send-email-deathsimple@vodafone.de
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/staging/android/sync.h3
-rw-r--r--include/linux/fence.h13
10 files changed, 21 insertions, 18 deletions
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b296..4d51f9e83fa8 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
35 * context or not. One device can have multiple separate contexts, 35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another. 36 * and they're used if some engine can run independently of another.
37 */ 37 */
38static atomic_t fence_context_counter = ATOMIC_INIT(0); 38static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
39 39
40/** 40/**
41 * fence_context_alloc - allocate an array of fence contexts 41 * fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
44 * This function will return the first index of the number of fences allocated. 44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number. 45 * The fence context is used for setting fence->context to a unique number.
46 */ 46 */
47unsigned fence_context_alloc(unsigned num) 47u64 fence_context_alloc(unsigned num)
48{ 48{
49 BUG_ON(!num); 49 BUG_ON(!num);
50 return atomic_add_return(num, &fence_context_counter) - num; 50 return atomic64_add_return(num, &fence_context_counter) - num;
51} 51}
52EXPORT_SYMBOL(fence_context_alloc); 52EXPORT_SYMBOL(fence_context_alloc);
53 53
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
513 */ 513 */
514void 514void
515fence_init(struct fence *fence, const struct fence_ops *ops, 515fence_init(struct fence *fence, const struct fence_ops *ops,
516 spinlock_t *lock, unsigned context, unsigned seqno) 516 spinlock_t *lock, u64 context, unsigned seqno)
517{ 517{
518 BUG_ON(!lock); 518 BUG_ON(!lock);
519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling || 519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 992f00b65be4..da3d02154fa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -2032,7 +2032,7 @@ struct amdgpu_device {
2032 struct amdgpu_irq_src hpd_irq; 2032 struct amdgpu_irq_src hpd_irq;
2033 2033
2034 /* rings */ 2034 /* rings */
2035 unsigned fence_context; 2035 u64 fence_context;
2036 unsigned num_rings; 2036 unsigned num_rings;
2037 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2037 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
2038 bool ib_pool_ready; 2038 bool ib_pool_ready;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8bf84efafb04..b16366c2b4a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -427,7 +427,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
427 soffset, eoffset, eoffset - soffset); 427 soffset, eoffset, eoffset - soffset);
428 428
429 if (i->fence) 429 if (i->fence)
430 seq_printf(m, " protected by 0x%08x on context %d", 430 seq_printf(m, " protected by 0x%08x on context %llu",
431 i->fence->seqno, i->fence->context); 431 i->fence->seqno, i->fence->context);
432 432
433 seq_printf(m, "\n"); 433 seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f25ff..a69cdd526bf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
125 u32 completed_fence; 125 u32 completed_fence;
126 u32 retired_fence; 126 u32 retired_fence;
127 wait_queue_head_t fence_event; 127 wait_queue_head_t fence_event;
128 unsigned int fence_context; 128 u64 fence_context;
129 spinlock_t fence_spinlock; 129 spinlock_t fence_spinlock;
130 130
131 /* worker for handling active-list retiring: */ 131 /* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38fe9..64c4ce7115ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
57 int (*context_new)(struct nouveau_channel *); 57 int (*context_new)(struct nouveau_channel *);
58 void (*context_del)(struct nouveau_channel *); 58 void (*context_del)(struct nouveau_channel *);
59 59
60 u32 contexts, context_base; 60 u32 contexts;
61 u64 context_base;
61 bool uevent; 62 bool uevent;
62}; 63};
63 64
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261baf..f599cd073b72 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
96 return 0; 96 return 0;
97 97
98 if (have_drawable_releases && sc > 300) { 98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d " 99 FENCE_WARN(fence, "failed to wait on release %llu "
100 "after spincount %d\n", 100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc); 101 fence->context & ~0xf0000000, sc);
102 goto signaled; 102 goto signaled;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495d6c..5633ee3eb46e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
2386 struct radeon_mman mman; 2386 struct radeon_mman mman;
2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
2388 wait_queue_head_t fence_queue; 2388 wait_queue_head_t fence_queue;
2389 unsigned fence_context; 2389 u64 fence_context;
2390 struct mutex ring_lock; 2390 struct mutex ring_lock;
2391 struct radeon_ring ring[RADEON_NUM_RINGS]; 2391 struct radeon_ring ring[RADEON_NUM_RINGS];
2392 bool ib_pool_ready; 2392 bool ib_pool_ready;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ede83..26ac8e80a478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true 47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */ 48 without the @goal_irq_mutex held. */
49 unsigned ctx; 49 u64 ctx;
50}; 50};
51 51
52struct vmw_user_fence { 52struct vmw_user_fence {
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index b56885c14839..ebb34dca60df 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -68,7 +68,8 @@ struct sync_timeline {
68 68
69 /* protected by child_list_lock */ 69 /* protected by child_list_lock */
70 bool destroyed; 70 bool destroyed;
71 int context, value; 71 u64 context;
72 int value;
72 73
73 struct list_head child_list_head; 74 struct list_head child_list_head;
74 spinlock_t child_list_lock; 75 spinlock_t child_list_lock;
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 2b17698b60b8..18a97c6b79db 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -75,7 +75,8 @@ struct fence {
75 struct rcu_head rcu; 75 struct rcu_head rcu;
76 struct list_head cb_list; 76 struct list_head cb_list;
77 spinlock_t *lock; 77 spinlock_t *lock;
78 unsigned context, seqno; 78 u64 context;
79 unsigned seqno;
79 unsigned long flags; 80 unsigned long flags;
80 ktime_t timestamp; 81 ktime_t timestamp;
81 int status; 82 int status;
@@ -178,7 +179,7 @@ struct fence_ops {
178}; 179};
179 180
180void fence_init(struct fence *fence, const struct fence_ops *ops, 181void fence_init(struct fence *fence, const struct fence_ops *ops,
181 spinlock_t *lock, unsigned context, unsigned seqno); 182 spinlock_t *lock, u64 context, unsigned seqno);
182 183
183void fence_release(struct kref *kref); 184void fence_release(struct kref *kref);
184void fence_free(struct fence *fence); 185void fence_free(struct fence *fence);
@@ -352,27 +353,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
352 return ret < 0 ? ret : 0; 353 return ret < 0 ? ret : 0;
353} 354}
354 355
355unsigned fence_context_alloc(unsigned num); 356u64 fence_context_alloc(unsigned num);
356 357
357#define FENCE_TRACE(f, fmt, args...) \ 358#define FENCE_TRACE(f, fmt, args...) \
358 do { \ 359 do { \
359 struct fence *__ff = (f); \ 360 struct fence *__ff = (f); \
360 if (config_enabled(CONFIG_FENCE_TRACE)) \ 361 if (config_enabled(CONFIG_FENCE_TRACE)) \
361 pr_info("f %u#%u: " fmt, \ 362 pr_info("f %llu#%u: " fmt, \
362 __ff->context, __ff->seqno, ##args); \ 363 __ff->context, __ff->seqno, ##args); \
363 } while (0) 364 } while (0)
364 365
365#define FENCE_WARN(f, fmt, args...) \ 366#define FENCE_WARN(f, fmt, args...) \
366 do { \ 367 do { \
367 struct fence *__ff = (f); \ 368 struct fence *__ff = (f); \
368 pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 369 pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
369 ##args); \ 370 ##args); \
370 } while (0) 371 } while (0)
371 372
372#define FENCE_ERR(f, fmt, args...) \ 373#define FENCE_ERR(f, fmt, args...) \
373 do { \ 374 do { \
374 struct fence *__ff = (f); \ 375 struct fence *__ff = (f); \
375 pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ 376 pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
376 ##args); \ 377 ##args); \
377 } while (0) 378 } while (0)
378 379