From e7a0c0ae8b6791c6b8ee30270ebdbe6e95fbbc71 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 12 Jan 2017 19:18:21 -0800 Subject: gpu: nvgpu: Move from gk20a_ to nvgpu_ in semaphore code Change the prefix in the semaphore code to 'nvgpu_' since this code is global to all chips. Bug 1799159 Change-Id: Ic1f3e13428882019e5d1f547acfe95271cc10da5 Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1284628 Reviewed-by: Varun Colbert Tested-by: Varun Colbert --- drivers/gpu/nvgpu/include/nvgpu/semaphore.h | 86 ++++++++++++++--------------- 1 file changed, 43 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/nvgpu/include') diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h index 07a27584..cc4921d3 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h +++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h @@ -37,19 +37,19 @@ #define SEMAPHORE_SIZE 16 #define SEMAPHORE_SEA_GROWTH_RATE 32 -struct gk20a_semaphore_sea; +struct nvgpu_semaphore_sea; /* * Underlying semaphore data structure. This semaphore can be shared amongst * other semaphore instances. */ -struct gk20a_semaphore_int { +struct nvgpu_semaphore_int { int idx; /* Semaphore index. */ u32 offset; /* Offset into the pool. */ atomic_t next_value; /* Next available value. */ u32 *value; /* Current value (access w/ readl()). */ u32 nr_incrs; /* Number of increments programmed. */ - struct gk20a_semaphore_pool *p; /* Pool that owns this sema. */ + struct nvgpu_semaphore_pool *p; /* Pool that owns this sema. */ struct channel_gk20a *ch; /* Channel that owns this sema. */ struct list_head hw_sema_list; /* List of HW semaphores. */ }; @@ -59,8 +59,8 @@ struct gk20a_semaphore_int { * pointer to a real semaphore and a value to wait for. This allows one physical * semaphore to be shared among an essentially infinite number of submits. */ -struct gk20a_semaphore { - struct gk20a_semaphore_int *hw_sema; +struct nvgpu_semaphore { + struct nvgpu_semaphore_int *hw_sema; atomic_t value; int incremented; @@ -71,7 +71,7 @@ struct gk20a_semaphore { /* * A semaphore pool. Each address space will own exactly one of these. */ -struct gk20a_semaphore_pool { +struct nvgpu_semaphore_pool { struct page *page; /* This pool's page of memory */ struct list_head pool_list_entry; /* Node for list of pools. */ void *cpu_va; /* CPU access to the pool. */ @@ -82,7 +82,7 @@ struct gk20a_semaphore_pool { struct list_head hw_semas; /* List of HW semas. */ DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE); - struct gk20a_semaphore_sea *sema_sea; /* Sea that owns this pool. */ + struct nvgpu_semaphore_sea *sema_sea; /* Sea that owns this pool. */ struct mutex pool_lock; @@ -114,7 +114,7 @@ struct gk20a_semaphore_pool { * pool. Channels then allocate regular semaphores - basically just a value that * signifies when a particular job is done. */ -struct gk20a_semaphore_sea { +struct nvgpu_semaphore_sea { struct list_head pool_list; /* List of pools in this sea. */ struct gk20a *gk20a; @@ -149,33 +149,33 @@ struct gk20a_semaphore_sea { /* * Semaphore sea functions. */ -struct gk20a_semaphore_sea *gk20a_semaphore_sea_create(struct gk20a *gk20a); -int gk20a_semaphore_sea_map(struct gk20a_semaphore_pool *sea, +struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *gk20a); +int nvgpu_semaphore_sea_map(struct nvgpu_semaphore_pool *sea, struct vm_gk20a *vm); -void gk20a_semaphore_sea_unmap(struct gk20a_semaphore_pool *sea, +void nvgpu_semaphore_sea_unmap(struct nvgpu_semaphore_pool *sea, struct vm_gk20a *vm); -struct gk20a_semaphore_sea *gk20a_semaphore_get_sea(struct gk20a *g); +struct nvgpu_semaphore_sea *nvgpu_semaphore_get_sea(struct gk20a *g); /* * Semaphore pool functions. */ -struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc( - struct gk20a_semaphore_sea *sea); -int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *pool, +struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc( + struct nvgpu_semaphore_sea *sea); +int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *pool, struct vm_gk20a *vm); -void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *pool, +void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *pool, struct vm_gk20a *vm); -u64 __gk20a_semaphore_pool_gpu_va(struct gk20a_semaphore_pool *p, bool global); -void gk20a_semaphore_pool_get(struct gk20a_semaphore_pool *p); -void gk20a_semaphore_pool_put(struct gk20a_semaphore_pool *p); +u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global); +void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p); +void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p); /* * Semaphore functions. */ -struct gk20a_semaphore *gk20a_semaphore_alloc(struct channel_gk20a *ch); -void gk20a_semaphore_put(struct gk20a_semaphore *s); -void gk20a_semaphore_get(struct gk20a_semaphore *s); -void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch); +struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch); +void nvgpu_semaphore_put(struct nvgpu_semaphore *s); +void nvgpu_semaphore_get(struct nvgpu_semaphore *s); +void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch); /* * Return the address of a specific semaphore. @@ -183,9 +183,9 @@ void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch); * Don't call this on a semaphore you don't own - the VA returned will make no * sense in your specific channel's VM. */ -static inline u64 gk20a_semaphore_gpu_rw_va(struct gk20a_semaphore *s) +static inline u64 nvgpu_semaphore_gpu_rw_va(struct nvgpu_semaphore *s) { - return __gk20a_semaphore_pool_gpu_va(s->hw_sema->p, false) + + return __nvgpu_semaphore_pool_gpu_va(s->hw_sema->p, false) + s->hw_sema->offset; } @@ -193,22 +193,22 @@ static inline u64 gk20a_semaphore_gpu_rw_va(struct gk20a_semaphore *s) * Get the global RO address for the semaphore. Can be called on any semaphore * regardless of whether you own it. */ -static inline u64 gk20a_semaphore_gpu_ro_va(struct gk20a_semaphore *s) +static inline u64 nvgpu_semaphore_gpu_ro_va(struct nvgpu_semaphore *s) { - return __gk20a_semaphore_pool_gpu_va(s->hw_sema->p, true) + + return __nvgpu_semaphore_pool_gpu_va(s->hw_sema->p, true) + s->hw_sema->offset; } -static inline u64 gk20a_hw_sema_addr(struct gk20a_semaphore_int *hw_sema) +static inline u64 nvgpu_hw_sema_addr(struct nvgpu_semaphore_int *hw_sema) { - return __gk20a_semaphore_pool_gpu_va(hw_sema->p, true) + + return __nvgpu_semaphore_pool_gpu_va(hw_sema->p, true) + hw_sema->offset; } /* * TODO: handle wrap around... Hmm, how to do this? */ -static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s) +static inline bool nvgpu_semaphore_is_released(struct nvgpu_semaphore *s) { u32 sema_val = readl(s->hw_sema->value); @@ -220,25 +220,25 @@ static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s) return (int)sema_val >= atomic_read(&s->value); } -static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s) +static inline bool nvgpu_semaphore_is_acquired(struct nvgpu_semaphore *s) { - return !gk20a_semaphore_is_released(s); + return !nvgpu_semaphore_is_released(s); } /* * Read the underlying value from a semaphore. */ -static inline u32 gk20a_semaphore_read(struct gk20a_semaphore *s) +static inline u32 nvgpu_semaphore_read(struct nvgpu_semaphore *s) { return readl(s->hw_sema->value); } -static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s) +static inline u32 nvgpu_semaphore_get_value(struct nvgpu_semaphore *s) { return (u32)atomic_read(&s->value); } -static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) +static inline u32 nvgpu_semaphore_next_value(struct nvgpu_semaphore *s) { return (u32)atomic_read(&s->hw_sema->next_value); } @@ -247,11 +247,11 @@ static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) * If @force is set then this will not wait for the underlying semaphore to * catch up to the passed semaphore. */ -static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s, +static inline void __nvgpu_semaphore_release(struct nvgpu_semaphore *s, bool force) { u32 current_val; - u32 val = gk20a_semaphore_get_value(s); + u32 val = nvgpu_semaphore_get_value(s); int attempts = 0; /* @@ -260,7 +260,7 @@ static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s, * * TODO: tune the wait a little better. */ - while ((current_val = gk20a_semaphore_read(s)) < (val - 1)) { + while ((current_val = nvgpu_semaphore_read(s)) < (val - 1)) { if (force) break; msleep(100); @@ -284,21 +284,21 @@ static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s, s->hw_sema->ch->hw_chid, val); } -static inline void gk20a_semaphore_release(struct gk20a_semaphore *s) +static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s) { - __gk20a_semaphore_release(s, false); + __nvgpu_semaphore_release(s, false); } /* * Configure a software based increment on this semaphore. This is useful for * when we want the GPU to wait on a SW event before processing a channel. * Another way to describe this is when the GPU needs to wait on a SW pre-fence. - * The pre-fence signals SW which in turn calls gk20a_semaphore_release() which + * The pre-fence signals SW which in turn calls nvgpu_semaphore_release() which * then allows the GPU to continue. * * Also used to prep a semaphore for an INCR by the GPU. */ -static inline void gk20a_semaphore_incr(struct gk20a_semaphore *s) +static inline void nvgpu_semaphore_incr(struct nvgpu_semaphore *s) { BUG_ON(s->incremented); @@ -307,6 +307,6 @@ static inline void gk20a_semaphore_incr(struct gk20a_semaphore *s) gpu_sema_verbose_dbg("INCR sema for c=%d (%u)", s->hw_sema->ch->hw_chid, - gk20a_semaphore_next_value(s)); + nvgpu_semaphore_next_value(s)); } #endif -- cgit v1.2.2