summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-14 16:36:05 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-24 16:24:50 -0400
commit8478f6356a94651dca1aed8c65df0ca0f0a50016 (patch)
tree882047337ffebd68dcd373a1c357406ceae82a1c /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parentb3e1ce04b963e91b9b425b3c35cc4eff11db7543 (diff)
gpu: nvgpu: Move aggressive_sync_destroy to gk20a
Copy aggressive_sync_destroy* to struct gk20a at probe time, and access it from gk20a instead of platform_gk20a. JIRA NVGPU-16 Change-Id: I6a1c04d85580cb62ab9a52bae08a0f99fe7cfef9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1463542 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c32
1 files changed, 11 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 90efc658..eafa17fb 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -81,9 +81,7 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
81static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f) 81static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
82{ 82{
83 struct channel_gk20a *ch = NULL; 83 struct channel_gk20a *ch = NULL;
84 struct gk20a_platform *platform; 84 struct gk20a *g = f->g;
85
86 platform = gk20a_get_platform(f->g->dev);
87 85
88 nvgpu_mutex_acquire(&f->free_chs_mutex); 86 nvgpu_mutex_acquire(&f->free_chs_mutex);
89 if (!nvgpu_list_empty(&f->free_chs)) { 87 if (!nvgpu_list_empty(&f->free_chs)) {
@@ -96,10 +94,10 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
96 } 94 }
97 nvgpu_mutex_release(&f->free_chs_mutex); 95 nvgpu_mutex_release(&f->free_chs_mutex);
98 96
99 if (platform->aggressive_sync_destroy_thresh && 97 if (g->aggressive_sync_destroy_thresh &&
100 (f->used_channels > 98 (f->used_channels >
101 platform->aggressive_sync_destroy_thresh)) 99 g->aggressive_sync_destroy_thresh))
102 platform->aggressive_sync_destroy = true; 100 g->aggressive_sync_destroy = true;
103 101
104 return ch; 102 return ch;
105} 103}
@@ -107,7 +105,6 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
107static void free_channel(struct fifo_gk20a *f, 105static void free_channel(struct fifo_gk20a *f,
108 struct channel_gk20a *ch) 106 struct channel_gk20a *ch)
109{ 107{
110 struct gk20a_platform *platform;
111 struct gk20a *g = f->g; 108 struct gk20a *g = f->g;
112 109
113 trace_gk20a_release_used_channel(ch->hw_chid); 110 trace_gk20a_release_used_channel(ch->hw_chid);
@@ -123,12 +120,10 @@ static void free_channel(struct fifo_gk20a *f,
123 * this is fine then because no new channels would be created. 120 * this is fine then because no new channels would be created.
124 */ 121 */
125 if (!g->driver_is_dying) { 122 if (!g->driver_is_dying) {
126 platform = gk20a_get_platform(g->dev); 123 if (g->aggressive_sync_destroy_thresh &&
127
128 if (platform->aggressive_sync_destroy_thresh &&
129 (f->used_channels < 124 (f->used_channels <
130 platform->aggressive_sync_destroy_thresh)) 125 g->aggressive_sync_destroy_thresh))
131 platform->aggressive_sync_destroy = false; 126 g->aggressive_sync_destroy = false;
132 } 127 }
133} 128}
134 129
@@ -1218,8 +1213,6 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1218 u32 flags) 1213 u32 flags)
1219{ 1214{
1220 struct gk20a *g = c->g; 1215 struct gk20a *g = c->g;
1221 struct device *d = dev_from_gk20a(g);
1222 struct gk20a_platform *platform = gk20a_get_platform(d);
1223 struct vm_gk20a *ch_vm; 1216 struct vm_gk20a *ch_vm;
1224 u32 gpfifo_size; 1217 u32 gpfifo_size;
1225 int err = 0; 1218 int err = 0;
@@ -1273,7 +1266,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1273 1266
1274 g->ops.fifo.setup_userd(c); 1267 g->ops.fifo.setup_userd(c);
1275 1268
1276 if (!platform->aggressive_sync_destroy_thresh) { 1269 if (!g->aggressive_sync_destroy_thresh) {
1277 nvgpu_mutex_acquire(&c->sync_lock); 1270 nvgpu_mutex_acquire(&c->sync_lock);
1278 c->sync = gk20a_channel_sync_create(c); 1271 c->sync = gk20a_channel_sync_create(c);
1279 if (!c->sync) { 1272 if (!c->sync) {
@@ -1968,7 +1961,6 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1968{ 1961{
1969 struct vm_gk20a *vm; 1962 struct vm_gk20a *vm;
1970 struct channel_gk20a_job *job; 1963 struct channel_gk20a_job *job;
1971 struct gk20a_platform *platform;
1972 struct gk20a *g; 1964 struct gk20a *g;
1973 int job_finished = 0; 1965 int job_finished = 0;
1974 bool watchdog_on = false; 1966 bool watchdog_on = false;
@@ -1984,7 +1976,6 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1984 1976
1985 vm = c->vm; 1977 vm = c->vm;
1986 g = c->g; 1978 g = c->g;
1987 platform = gk20a_get_platform(g->dev);
1988 1979
1989 /* 1980 /*
1990 * If !clean_all, we're in a condition where watchdog isn't supported 1981 * If !clean_all, we're in a condition where watchdog isn't supported
@@ -2037,10 +2028,10 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2037 if (c->sync) { 2028 if (c->sync) {
2038 c->sync->signal_timeline(c->sync); 2029 c->sync->signal_timeline(c->sync);
2039 2030
2040 if (platform->aggressive_sync_destroy_thresh) { 2031 if (g->aggressive_sync_destroy_thresh) {
2041 nvgpu_mutex_acquire(&c->sync_lock); 2032 nvgpu_mutex_acquire(&c->sync_lock);
2042 if (atomic_dec_and_test(&c->sync->refcount) && 2033 if (atomic_dec_and_test(&c->sync->refcount) &&
2043 platform->aggressive_sync_destroy) { 2034 g->aggressive_sync_destroy) {
2044 gk20a_channel_sync_destroy(c->sync); 2035 gk20a_channel_sync_destroy(c->sync);
2045 c->sync = NULL; 2036 c->sync = NULL;
2046 } 2037 }
@@ -2243,7 +2234,6 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2243 u32 flags) 2234 u32 flags)
2244{ 2235{
2245 struct gk20a *g = c->g; 2236 struct gk20a *g = c->g;
2246 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
2247 bool need_sync_fence = false; 2237 bool need_sync_fence = false;
2248 bool new_sync_created = false; 2238 bool new_sync_created = false;
2249 int wait_fence_fd = -1; 2239 int wait_fence_fd = -1;
@@ -2258,7 +2248,7 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2258 if (force_need_sync_fence) 2248 if (force_need_sync_fence)
2259 need_sync_fence = true; 2249 need_sync_fence = true;
2260 2250
2261 if (platform->aggressive_sync_destroy_thresh) { 2251 if (g->aggressive_sync_destroy_thresh) {
2262 nvgpu_mutex_acquire(&c->sync_lock); 2252 nvgpu_mutex_acquire(&c->sync_lock);
2263 if (!c->sync) { 2253 if (!c->sync) {
2264 c->sync = gk20a_channel_sync_create(c); 2254 c->sync = gk20a_channel_sync_create(c);