summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/common/linux/driver_common.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c32
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/vgpu/vgpu.c7
4 files changed, 21 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c
index 10394b22..31cc879c 100644
--- a/drivers/gpu/nvgpu/common/linux/driver_common.c
+++ b/drivers/gpu/nvgpu/common/linux/driver_common.c
@@ -105,6 +105,8 @@ static void nvgpu_init_pm_vars(struct gk20a *g)
105 g->can_elpg = 105 g->can_elpg =
106 nvgpu_platform_is_silicon(g) ? platform->can_elpg : false; 106 nvgpu_platform_is_silicon(g) ? platform->can_elpg : false;
107 g->default_pri_timeout = platform->default_pri_timeout; 107 g->default_pri_timeout = platform->default_pri_timeout;
108 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
109 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
108 110
109 /* set default values to aelpg parameters */ 111 /* set default values to aelpg parameters */
110 g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US; 112 g->pmu.aelpg_param[0] = APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US;
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 90efc658..eafa17fb 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -81,9 +81,7 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
81static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f) 81static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
82{ 82{
83 struct channel_gk20a *ch = NULL; 83 struct channel_gk20a *ch = NULL;
84 struct gk20a_platform *platform; 84 struct gk20a *g = f->g;
85
86 platform = gk20a_get_platform(f->g->dev);
87 85
88 nvgpu_mutex_acquire(&f->free_chs_mutex); 86 nvgpu_mutex_acquire(&f->free_chs_mutex);
89 if (!nvgpu_list_empty(&f->free_chs)) { 87 if (!nvgpu_list_empty(&f->free_chs)) {
@@ -96,10 +94,10 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
96 } 94 }
97 nvgpu_mutex_release(&f->free_chs_mutex); 95 nvgpu_mutex_release(&f->free_chs_mutex);
98 96
99 if (platform->aggressive_sync_destroy_thresh && 97 if (g->aggressive_sync_destroy_thresh &&
100 (f->used_channels > 98 (f->used_channels >
101 platform->aggressive_sync_destroy_thresh)) 99 g->aggressive_sync_destroy_thresh))
102 platform->aggressive_sync_destroy = true; 100 g->aggressive_sync_destroy = true;
103 101
104 return ch; 102 return ch;
105} 103}
@@ -107,7 +105,6 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
107static void free_channel(struct fifo_gk20a *f, 105static void free_channel(struct fifo_gk20a *f,
108 struct channel_gk20a *ch) 106 struct channel_gk20a *ch)
109{ 107{
110 struct gk20a_platform *platform;
111 struct gk20a *g = f->g; 108 struct gk20a *g = f->g;
112 109
113 trace_gk20a_release_used_channel(ch->hw_chid); 110 trace_gk20a_release_used_channel(ch->hw_chid);
@@ -123,12 +120,10 @@ static void free_channel(struct fifo_gk20a *f,
123 * this is fine then because no new channels would be created. 120 * this is fine then because no new channels would be created.
124 */ 121 */
125 if (!g->driver_is_dying) { 122 if (!g->driver_is_dying) {
126 platform = gk20a_get_platform(g->dev); 123 if (g->aggressive_sync_destroy_thresh &&
127
128 if (platform->aggressive_sync_destroy_thresh &&
129 (f->used_channels < 124 (f->used_channels <
130 platform->aggressive_sync_destroy_thresh)) 125 g->aggressive_sync_destroy_thresh))
131 platform->aggressive_sync_destroy = false; 126 g->aggressive_sync_destroy = false;
132 } 127 }
133} 128}
134 129
@@ -1218,8 +1213,6 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1218 u32 flags) 1213 u32 flags)
1219{ 1214{
1220 struct gk20a *g = c->g; 1215 struct gk20a *g = c->g;
1221 struct device *d = dev_from_gk20a(g);
1222 struct gk20a_platform *platform = gk20a_get_platform(d);
1223 struct vm_gk20a *ch_vm; 1216 struct vm_gk20a *ch_vm;
1224 u32 gpfifo_size; 1217 u32 gpfifo_size;
1225 int err = 0; 1218 int err = 0;
@@ -1273,7 +1266,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
1273 1266
1274 g->ops.fifo.setup_userd(c); 1267 g->ops.fifo.setup_userd(c);
1275 1268
1276 if (!platform->aggressive_sync_destroy_thresh) { 1269 if (!g->aggressive_sync_destroy_thresh) {
1277 nvgpu_mutex_acquire(&c->sync_lock); 1270 nvgpu_mutex_acquire(&c->sync_lock);
1278 c->sync = gk20a_channel_sync_create(c); 1271 c->sync = gk20a_channel_sync_create(c);
1279 if (!c->sync) { 1272 if (!c->sync) {
@@ -1968,7 +1961,6 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1968{ 1961{
1969 struct vm_gk20a *vm; 1962 struct vm_gk20a *vm;
1970 struct channel_gk20a_job *job; 1963 struct channel_gk20a_job *job;
1971 struct gk20a_platform *platform;
1972 struct gk20a *g; 1964 struct gk20a *g;
1973 int job_finished = 0; 1965 int job_finished = 0;
1974 bool watchdog_on = false; 1966 bool watchdog_on = false;
@@ -1984,7 +1976,6 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
1984 1976
1985 vm = c->vm; 1977 vm = c->vm;
1986 g = c->g; 1978 g = c->g;
1987 platform = gk20a_get_platform(g->dev);
1988 1979
1989 /* 1980 /*
1990 * If !clean_all, we're in a condition where watchdog isn't supported 1981 * If !clean_all, we're in a condition where watchdog isn't supported
@@ -2037,10 +2028,10 @@ static void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2037 if (c->sync) { 2028 if (c->sync) {
2038 c->sync->signal_timeline(c->sync); 2029 c->sync->signal_timeline(c->sync);
2039 2030
2040 if (platform->aggressive_sync_destroy_thresh) { 2031 if (g->aggressive_sync_destroy_thresh) {
2041 nvgpu_mutex_acquire(&c->sync_lock); 2032 nvgpu_mutex_acquire(&c->sync_lock);
2042 if (atomic_dec_and_test(&c->sync->refcount) && 2033 if (atomic_dec_and_test(&c->sync->refcount) &&
2043 platform->aggressive_sync_destroy) { 2034 g->aggressive_sync_destroy) {
2044 gk20a_channel_sync_destroy(c->sync); 2035 gk20a_channel_sync_destroy(c->sync);
2045 c->sync = NULL; 2036 c->sync = NULL;
2046 } 2037 }
@@ -2243,7 +2234,6 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2243 u32 flags) 2234 u32 flags)
2244{ 2235{
2245 struct gk20a *g = c->g; 2236 struct gk20a *g = c->g;
2246 struct gk20a_platform *platform = gk20a_get_platform(g->dev);
2247 bool need_sync_fence = false; 2237 bool need_sync_fence = false;
2248 bool new_sync_created = false; 2238 bool new_sync_created = false;
2249 int wait_fence_fd = -1; 2239 int wait_fence_fd = -1;
@@ -2258,7 +2248,7 @@ static int gk20a_submit_prepare_syncs(struct channel_gk20a *c,
2258 if (force_need_sync_fence) 2248 if (force_need_sync_fence)
2259 need_sync_fence = true; 2249 need_sync_fence = true;
2260 2250
2261 if (platform->aggressive_sync_destroy_thresh) { 2251 if (g->aggressive_sync_destroy_thresh) {
2262 nvgpu_mutex_acquire(&c->sync_lock); 2252 nvgpu_mutex_acquire(&c->sync_lock);
2263 if (!c->sync) { 2253 if (!c->sync) {
2264 c->sync = gk20a_channel_sync_create(c); 2254 c->sync = gk20a_channel_sync_create(c);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 7c8b4eae..0d1ae9d7 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -996,6 +996,9 @@ struct gk20a {
996 996
997 u32 default_pri_timeout; 997 u32 default_pri_timeout;
998 998
999 unsigned int aggressive_sync_destroy_thresh;
1000 bool aggressive_sync_destroy;
1001
999 u32 emc3d_ratio; 1002 u32 emc3d_ratio;
1000 1003
1001#ifdef CONFIG_DEBUG_FS 1004#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c
index cc672b1e..480d80d7 100644
--- a/drivers/gpu/nvgpu/vgpu/vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/vgpu.c
@@ -229,7 +229,7 @@ static void vgpu_remove_support(struct gk20a *g)
229 } 229 }
230} 230}
231 231
232static void vgpu_init_vars(struct gk20a *g) 232static void vgpu_init_vars(struct gk20a *g, struct gk20a_platform *platform)
233{ 233{
234 nvgpu_mutex_init(&g->poweroff_lock); 234 nvgpu_mutex_init(&g->poweroff_lock);
235 g->regs_saved = g->regs; 235 g->regs_saved = g->regs;
@@ -237,6 +237,9 @@ static void vgpu_init_vars(struct gk20a *g)
237 237
238 nvgpu_init_list_node(&g->pending_sema_waits); 238 nvgpu_init_list_node(&g->pending_sema_waits);
239 nvgpu_raw_spinlock_init(&g->pending_sema_waits_lock); 239 nvgpu_raw_spinlock_init(&g->pending_sema_waits_lock);
240
241 g->aggressive_sync_destroy = platform->aggressive_sync_destroy;
242 g->aggressive_sync_destroy_thresh = platform->aggressive_sync_destroy_thresh;
240} 243}
241 244
242static int vgpu_init_support(struct platform_device *pdev) 245static int vgpu_init_support(struct platform_device *pdev)
@@ -595,7 +598,7 @@ int vgpu_probe(struct platform_device *pdev)
595 598
596 vgpu_init_support(pdev); 599 vgpu_init_support(pdev);
597 600
598 vgpu_init_vars(gk20a); 601 vgpu_init_vars(gk20a, platform);
599 602
600 init_rwsem(&gk20a->busy_lock); 603 init_rwsem(&gk20a->busy_lock);
601 604