aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLucas Stach <l.stach@pengutronix.de>2017-12-04 12:41:58 -0500
committerLucas Stach <l.stach@pengutronix.de>2018-02-12 10:30:59 -0500
commite93b6deeb45a781489f4ceaa97f9545a3cbebb81 (patch)
tree82b760c0285f254b0fa73999f9da3230cdad2266
parent8bc4d885bd42e9a1d47a53aa4efbb818597ef9a0 (diff)
drm/etnaviv: hook up DRM GPU scheduler
This hooks in the DRM GPU scheduler. No improvement yet, as all the dependency handling is still done in etnaviv_gem_submit. This just replaces the actual GPU submit by passing through the scheduler. Allows to get rid of the retire worker, as this is now driven by the scheduler. Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c113
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c125
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.h35
10 files changed, 235 insertions, 91 deletions
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 3f58b4077767..e5bfeca361bd 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -11,6 +11,7 @@ config DRM_ETNAVIV
11 select WANT_DEV_COREDUMP 11 select WANT_DEV_COREDUMP
12 select CMA if HAVE_DMA_CONTIGUOUS 12 select CMA if HAVE_DMA_CONTIGUOUS
13 select DMA_CMA if HAVE_DMA_CONTIGUOUS 13 select DMA_CMA if HAVE_DMA_CONTIGUOUS
14 select DRM_SCHED
14 help 15 help
15 DRM driver for Vivante GPUs. 16 DRM driver for Vivante GPUs.
16 17
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 1281c8d4fae5..9bb780c22501 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -12,6 +12,7 @@ etnaviv-y := \
12 etnaviv_iommu_v2.o \ 12 etnaviv_iommu_v2.o \
13 etnaviv_iommu.o \ 13 etnaviv_iommu.o \
14 etnaviv_mmu.o \ 14 etnaviv_mmu.o \
15 etnaviv_perfmon.o 15 etnaviv_perfmon.o \
16 etnaviv_sched.o
16 17
17obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o 18obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6faf4042db23..8a73414682b2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -101,12 +101,25 @@ static void load_gpu(struct drm_device *dev)
101 101
102static int etnaviv_open(struct drm_device *dev, struct drm_file *file) 102static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
103{ 103{
104 struct etnaviv_drm_private *priv = dev->dev_private;
104 struct etnaviv_file_private *ctx; 105 struct etnaviv_file_private *ctx;
106 int i;
105 107
106 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 108 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
107 if (!ctx) 109 if (!ctx)
108 return -ENOMEM; 110 return -ENOMEM;
109 111
112 for (i = 0; i < ETNA_MAX_PIPES; i++) {
113 struct etnaviv_gpu *gpu = priv->gpu[i];
114
115 if (gpu) {
116 drm_sched_entity_init(&gpu->sched,
117 &ctx->sched_entity[i],
118 &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
119 32, NULL);
120 }
121 }
122
110 file->driver_priv = ctx; 123 file->driver_priv = ctx;
111 124
112 return 0; 125 return 0;
@@ -126,6 +139,9 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
126 if (gpu->lastctx == ctx) 139 if (gpu->lastctx == ctx)
127 gpu->lastctx = NULL; 140 gpu->lastctx = NULL;
128 mutex_unlock(&gpu->lock); 141 mutex_unlock(&gpu->lock);
142
143 drm_sched_entity_fini(&gpu->sched,
144 &ctx->sched_entity[i]);
129 } 145 }
130 } 146 }
131 147
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index a54f0b758a5c..1f055d931c6c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -34,6 +34,7 @@
34#include <drm/drm_fb_helper.h> 34#include <drm/drm_fb_helper.h>
35#include <drm/drm_gem.h> 35#include <drm/drm_gem.h>
36#include <drm/etnaviv_drm.h> 36#include <drm/etnaviv_drm.h>
37#include <drm/gpu_scheduler.h>
37 38
38struct etnaviv_cmdbuf; 39struct etnaviv_cmdbuf;
39struct etnaviv_gpu; 40struct etnaviv_gpu;
@@ -42,11 +43,11 @@ struct etnaviv_gem_object;
42struct etnaviv_gem_submit; 43struct etnaviv_gem_submit;
43 44
44struct etnaviv_file_private { 45struct etnaviv_file_private {
45 /* currently we don't do anything useful with this.. but when 46 /*
46 * per-context address spaces are supported we'd keep track of 47 * When per-context address spaces are supported we'd keep track of
47 * the context's page-tables here. 48 * the context's page-tables here.
48 */ 49 */
49 int dummy; 50 struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
50}; 51};
51 52
52struct etnaviv_drm_private { 53struct etnaviv_drm_private {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index c30964152381..ae352f2a77f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -101,6 +101,7 @@ struct etnaviv_gem_submit_bo {
101 * make it easier to unwind when things go wrong, etc). 101 * make it easier to unwind when things go wrong, etc).
102 */ 102 */
103struct etnaviv_gem_submit { 103struct etnaviv_gem_submit {
104 struct drm_sched_job sched_job;
104 struct kref refcount; 105 struct kref refcount;
105 struct etnaviv_gpu *gpu; 106 struct etnaviv_gpu *gpu;
106 struct dma_fence *out_fence, *in_fence; 107 struct dma_fence *out_fence, *in_fence;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 919c8dc39f32..0bc89e4daade 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -22,6 +22,7 @@
22#include "etnaviv_gpu.h" 22#include "etnaviv_gpu.h"
23#include "etnaviv_gem.h" 23#include "etnaviv_gem.h"
24#include "etnaviv_perfmon.h" 24#include "etnaviv_perfmon.h"
25#include "etnaviv_sched.h"
25 26
26/* 27/*
27 * Cmdstream submission: 28 * Cmdstream submission:
@@ -381,8 +382,13 @@ static void submit_cleanup(struct kref *kref)
381 382
382 if (submit->in_fence) 383 if (submit->in_fence)
383 dma_fence_put(submit->in_fence); 384 dma_fence_put(submit->in_fence);
384 if (submit->out_fence) 385 if (submit->out_fence) {
386 /* first remove from IDR, so fence can not be found anymore */
387 mutex_lock(&submit->gpu->fence_idr_lock);
388 idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
389 mutex_unlock(&submit->gpu->fence_idr_lock);
385 dma_fence_put(submit->out_fence); 390 dma_fence_put(submit->out_fence);
391 }
386 kfree(submit->pmrs); 392 kfree(submit->pmrs);
387 kfree(submit); 393 kfree(submit);
388} 394}
@@ -395,6 +401,7 @@ void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
395int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, 401int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
396 struct drm_file *file) 402 struct drm_file *file)
397{ 403{
404 struct etnaviv_file_private *ctx = file->driver_priv;
398 struct etnaviv_drm_private *priv = dev->dev_private; 405 struct etnaviv_drm_private *priv = dev->dev_private;
399 struct drm_etnaviv_gem_submit *args = data; 406 struct drm_etnaviv_gem_submit *args = data;
400 struct drm_etnaviv_gem_submit_reloc *relocs; 407 struct drm_etnaviv_gem_submit_reloc *relocs;
@@ -541,7 +548,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
541 memcpy(submit->cmdbuf.vaddr, stream, args->stream_size); 548 memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
542 submit->cmdbuf.user_size = ALIGN(args->stream_size, 8); 549 submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);
543 550
544 ret = etnaviv_gpu_submit(gpu, submit); 551 ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
545 if (ret) 552 if (ret)
546 goto err_submit_objects; 553 goto err_submit_objects;
547 554
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index bab6a8286520..fa5063010435 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -26,6 +26,7 @@
26#include "etnaviv_gem.h" 26#include "etnaviv_gem.h"
27#include "etnaviv_mmu.h" 27#include "etnaviv_mmu.h"
28#include "etnaviv_perfmon.h" 28#include "etnaviv_perfmon.h"
29#include "etnaviv_sched.h"
29#include "common.xml.h" 30#include "common.xml.h"
30#include "state.xml.h" 31#include "state.xml.h"
31#include "state_hi.xml.h" 32#include "state_hi.xml.h"
@@ -961,9 +962,6 @@ static void recover_worker(struct work_struct *work)
961 mutex_unlock(&gpu->lock); 962 mutex_unlock(&gpu->lock);
962 pm_runtime_mark_last_busy(gpu->dev); 963 pm_runtime_mark_last_busy(gpu->dev);
963 pm_runtime_put_autosuspend(gpu->dev); 964 pm_runtime_put_autosuspend(gpu->dev);
964
965 /* Retire the buffer objects in a work */
966 queue_work(gpu->wq, &gpu->retire_work);
967} 965}
968 966
969static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) 967static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
@@ -1016,7 +1014,6 @@ static void hangcheck_disable(struct etnaviv_gpu *gpu)
1016/* fence object management */ 1014/* fence object management */
1017struct etnaviv_fence { 1015struct etnaviv_fence {
1018 struct etnaviv_gpu *gpu; 1016 struct etnaviv_gpu *gpu;
1019 int id;
1020 struct dma_fence base; 1017 struct dma_fence base;
1021}; 1018};
1022 1019
@@ -1053,11 +1050,6 @@ static void etnaviv_fence_release(struct dma_fence *fence)
1053{ 1050{
1054 struct etnaviv_fence *f = to_etnaviv_fence(fence); 1051 struct etnaviv_fence *f = to_etnaviv_fence(fence);
1055 1052
1056 /* first remove from IDR, so fence can not be looked up anymore */
1057 mutex_lock(&f->gpu->lock);
1058 idr_remove(&f->gpu->fence_idr, f->id);
1059 mutex_unlock(&f->gpu->lock);
1060
1061 kfree_rcu(f, base.rcu); 1053 kfree_rcu(f, base.rcu);
1062} 1054}
1063 1055
@@ -1084,11 +1076,6 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
1084 if (!f) 1076 if (!f)
1085 return NULL; 1077 return NULL;
1086 1078
1087 f->id = idr_alloc_cyclic(&gpu->fence_idr, &f->base, 0, INT_MAX, GFP_KERNEL);
1088 if (f->id < 0) {
1089 kfree(f);
1090 return NULL;
1091 }
1092 f->gpu = gpu; 1079 f->gpu = gpu;
1093 1080
1094 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock, 1081 dma_fence_init(&f->base, &etnaviv_fence_ops, &gpu->fence_spinlock,
@@ -1211,31 +1198,6 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
1211/* 1198/*
1212 * Cmdstream submission/retirement: 1199 * Cmdstream submission/retirement:
1213 */ 1200 */
1214
1215static void retire_worker(struct work_struct *work)
1216{
1217 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
1218 retire_work);
1219 u32 fence = gpu->completed_fence;
1220 struct etnaviv_gem_submit *submit, *tmp;
1221 LIST_HEAD(retire_list);
1222
1223 mutex_lock(&gpu->lock);
1224 list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) {
1225 if (!dma_fence_is_signaled(submit->out_fence))
1226 break;
1227
1228 list_move(&submit->node, &retire_list);
1229 }
1230
1231 gpu->retired_fence = fence;
1232
1233 mutex_unlock(&gpu->lock);
1234
1235 list_for_each_entry_safe(submit, tmp, &retire_list, node)
1236 etnaviv_submit_put(submit);
1237}
1238
1239int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, 1201int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1240 u32 id, struct timespec *timeout) 1202 u32 id, struct timespec *timeout)
1241{ 1203{
@@ -1243,18 +1205,15 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1243 int ret; 1205 int ret;
1244 1206
1245 /* 1207 /*
1246 * Look up the fence and take a reference. The mutex only synchronizes 1208 * Look up the fence and take a reference. We might still find a fence
1247 * the IDR lookup with the fence release. We might still find a fence
1248 * whose refcount has already dropped to zero. dma_fence_get_rcu 1209 * whose refcount has already dropped to zero. dma_fence_get_rcu
1249 * pretends we didn't find a fence in that case. 1210 * pretends we didn't find a fence in that case.
1250 */ 1211 */
1251 ret = mutex_lock_interruptible(&gpu->lock); 1212 rcu_read_lock();
1252 if (ret)
1253 return ret;
1254 fence = idr_find(&gpu->fence_idr, id); 1213 fence = idr_find(&gpu->fence_idr, id);
1255 if (fence) 1214 if (fence)
1256 fence = dma_fence_get_rcu(fence); 1215 fence = dma_fence_get_rcu(fence);
1257 mutex_unlock(&gpu->lock); 1216 rcu_read_unlock();
1258 1217
1259 if (!fence) 1218 if (!fence)
1260 return 0; 1219 return 0;
@@ -1279,7 +1238,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
1279 1238
1280/* 1239/*
1281 * Wait for an object to become inactive. This, on it's own, is not race 1240 * Wait for an object to become inactive. This, on it's own, is not race
1282 * free: the object is moved by the retire worker off the active list, and 1241 * free: the object is moved by the scheduler off the active list, and
1283 * then the iova is put. Moreover, the object could be re-submitted just 1242 * then the iova is put. Moreover, the object could be re-submitted just
1284 * after we notice that it's become inactive. 1243 * after we notice that it's become inactive.
1285 * 1244 *
@@ -1368,15 +1327,16 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
1368 1327
1369 1328
1370/* add bo's to gpu's ring, and kick gpu: */ 1329/* add bo's to gpu's ring, and kick gpu: */
1371int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, 1330struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1372 struct etnaviv_gem_submit *submit)
1373{ 1331{
1332 struct etnaviv_gpu *gpu = submit->gpu;
1333 struct dma_fence *gpu_fence;
1374 unsigned int i, nr_events = 1, event[3]; 1334 unsigned int i, nr_events = 1, event[3];
1375 int ret; 1335 int ret;
1376 1336
1377 ret = pm_runtime_get_sync(gpu->dev); 1337 ret = pm_runtime_get_sync(gpu->dev);
1378 if (ret < 0) 1338 if (ret < 0)
1379 return ret; 1339 return NULL;
1380 submit->runtime_resumed = true; 1340 submit->runtime_resumed = true;
1381 1341
1382 /* 1342 /*
@@ -1392,22 +1352,20 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1392 ret = event_alloc(gpu, nr_events, event); 1352 ret = event_alloc(gpu, nr_events, event);
1393 if (ret) { 1353 if (ret) {
1394 DRM_ERROR("no free events\n"); 1354 DRM_ERROR("no free events\n");
1395 return ret; 1355 return NULL;
1396 } 1356 }
1397 1357
1398 mutex_lock(&gpu->lock); 1358 mutex_lock(&gpu->lock);
1399 1359
1400 submit->out_fence = etnaviv_gpu_fence_alloc(gpu); 1360 gpu_fence = etnaviv_gpu_fence_alloc(gpu);
1401 if (!submit->out_fence) { 1361 if (!gpu_fence) {
1402 for (i = 0; i < nr_events; i++) 1362 for (i = 0; i < nr_events; i++)
1403 event_free(gpu, event[i]); 1363 event_free(gpu, event[i]);
1404 1364
1405 ret = -ENOMEM;
1406 goto out_unlock; 1365 goto out_unlock;
1407 } 1366 }
1408 submit->out_fence_id = to_etnaviv_fence(submit->out_fence)->id;
1409 1367
1410 gpu->active_fence = submit->out_fence->seqno; 1368 gpu->active_fence = gpu_fence->seqno;
1411 1369
1412 if (submit->nr_pmrs) { 1370 if (submit->nr_pmrs) {
1413 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; 1371 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1416,8 +1374,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1416 etnaviv_sync_point_queue(gpu, event[1]); 1374 etnaviv_sync_point_queue(gpu, event[1]);
1417 } 1375 }
1418 1376
1419 kref_get(&submit->refcount); 1377 gpu->event[event[0]].fence = gpu_fence;
1420 gpu->event[event[0]].fence = submit->out_fence;
1421 etnaviv_buffer_queue(gpu, submit->exec_state, event[0], 1378 etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
1422 &submit->cmdbuf); 1379 &submit->cmdbuf);
1423 1380
@@ -1428,15 +1385,12 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
1428 etnaviv_sync_point_queue(gpu, event[2]); 1385 etnaviv_sync_point_queue(gpu, event[2]);
1429 } 1386 }
1430 1387
1431 list_add_tail(&submit->node, &gpu->active_submit_list);
1432
1433 hangcheck_timer_reset(gpu); 1388 hangcheck_timer_reset(gpu);
1434 ret = 0;
1435 1389
1436out_unlock: 1390out_unlock:
1437 mutex_unlock(&gpu->lock); 1391 mutex_unlock(&gpu->lock);
1438 1392
1439 return ret; 1393 return gpu_fence;
1440} 1394}
1441 1395
1442static void sync_point_worker(struct work_struct *work) 1396static void sync_point_worker(struct work_struct *work)
@@ -1527,9 +1481,6 @@ static irqreturn_t irq_handler(int irq, void *data)
1527 event_free(gpu, event); 1481 event_free(gpu, event);
1528 } 1482 }
1529 1483
1530 /* Retire the buffer objects in a work */
1531 queue_work(gpu->wq, &gpu->retire_work);
1532
1533 ret = IRQ_HANDLED; 1484 ret = IRQ_HANDLED;
1534 } 1485 }
1535 1486
@@ -1701,22 +1652,22 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1701 1652
1702 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0); 1653 gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
1703 if (!gpu->wq) { 1654 if (!gpu->wq) {
1704 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) 1655 ret = -ENOMEM;
1705 thermal_cooling_device_unregister(gpu->cooling); 1656 goto out_thermal;
1706 return -ENOMEM;
1707 } 1657 }
1708 1658
1659 ret = etnaviv_sched_init(gpu);
1660 if (ret)
1661 goto out_workqueue;
1662
1709#ifdef CONFIG_PM 1663#ifdef CONFIG_PM
1710 ret = pm_runtime_get_sync(gpu->dev); 1664 ret = pm_runtime_get_sync(gpu->dev);
1711#else 1665#else
1712 ret = etnaviv_gpu_clk_enable(gpu); 1666 ret = etnaviv_gpu_clk_enable(gpu);
1713#endif 1667#endif
1714 if (ret < 0) { 1668 if (ret < 0)
1715 destroy_workqueue(gpu->wq); 1669 goto out_sched;
1716 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) 1670
1717 thermal_cooling_device_unregister(gpu->cooling);
1718 return ret;
1719 }
1720 1671
1721 gpu->drm = drm; 1672 gpu->drm = drm;
1722 gpu->fence_context = dma_fence_context_alloc(1); 1673 gpu->fence_context = dma_fence_context_alloc(1);
@@ -1724,7 +1675,6 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1724 spin_lock_init(&gpu->fence_spinlock); 1675 spin_lock_init(&gpu->fence_spinlock);
1725 1676
1726 INIT_LIST_HEAD(&gpu->active_submit_list); 1677 INIT_LIST_HEAD(&gpu->active_submit_list);
1727 INIT_WORK(&gpu->retire_work, retire_worker);
1728 INIT_WORK(&gpu->sync_point_work, sync_point_worker); 1678 INIT_WORK(&gpu->sync_point_work, sync_point_worker);
1729 INIT_WORK(&gpu->recover_work, recover_worker); 1679 INIT_WORK(&gpu->recover_work, recover_worker);
1730 init_waitqueue_head(&gpu->fence_event); 1680 init_waitqueue_head(&gpu->fence_event);
@@ -1737,6 +1687,18 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
1737 pm_runtime_put_autosuspend(gpu->dev); 1687 pm_runtime_put_autosuspend(gpu->dev);
1738 1688
1739 return 0; 1689 return 0;
1690
1691out_sched:
1692 etnaviv_sched_fini(gpu);
1693
1694out_workqueue:
1695 destroy_workqueue(gpu->wq);
1696
1697out_thermal:
1698 if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
1699 thermal_cooling_device_unregister(gpu->cooling);
1700
1701 return ret;
1740} 1702}
1741 1703
1742static void etnaviv_gpu_unbind(struct device *dev, struct device *master, 1704static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
@@ -1751,6 +1713,8 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
1751 flush_workqueue(gpu->wq); 1713 flush_workqueue(gpu->wq);
1752 destroy_workqueue(gpu->wq); 1714 destroy_workqueue(gpu->wq);
1753 1715
1716 etnaviv_sched_fini(gpu);
1717
1754#ifdef CONFIG_PM 1718#ifdef CONFIG_PM
1755 pm_runtime_get_sync(gpu->dev); 1719 pm_runtime_get_sync(gpu->dev);
1756 pm_runtime_put_sync_suspend(gpu->dev); 1720 pm_runtime_put_sync_suspend(gpu->dev);
@@ -1803,6 +1767,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1803 1767
1804 gpu->dev = &pdev->dev; 1768 gpu->dev = &pdev->dev;
1805 mutex_init(&gpu->lock); 1769 mutex_init(&gpu->lock);
1770 mutex_init(&gpu->fence_idr_lock);
1806 1771
1807 /* Map registers: */ 1772 /* Map registers: */
1808 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev)); 1773 gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 0170eb0a0923..02f7ffa34f3b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -108,6 +108,7 @@ struct etnaviv_gpu {
108 struct etnaviv_chip_identity identity; 108 struct etnaviv_chip_identity identity;
109 struct etnaviv_file_private *lastctx; 109 struct etnaviv_file_private *lastctx;
110 struct workqueue_struct *wq; 110 struct workqueue_struct *wq;
111 struct drm_gpu_scheduler sched;
111 112
112 /* 'ring'-buffer: */ 113 /* 'ring'-buffer: */
113 struct etnaviv_cmdbuf buffer; 114 struct etnaviv_cmdbuf buffer;
@@ -128,18 +129,15 @@ struct etnaviv_gpu {
128 u32 idle_mask; 129 u32 idle_mask;
129 130
130 /* Fencing support */ 131 /* Fencing support */
132 struct mutex fence_idr_lock;
131 struct idr fence_idr; 133 struct idr fence_idr;
132 u32 next_fence; 134 u32 next_fence;
133 u32 active_fence; 135 u32 active_fence;
134 u32 completed_fence; 136 u32 completed_fence;
135 u32 retired_fence;
136 wait_queue_head_t fence_event; 137 wait_queue_head_t fence_event;
137 u64 fence_context; 138 u64 fence_context;
138 spinlock_t fence_spinlock; 139 spinlock_t fence_spinlock;
139 140
140 /* worker for handling active-list retiring: */
141 struct work_struct retire_work;
142
143 /* worker for handling 'sync' points: */ 141 /* worker for handling 'sync' points: */
144 struct work_struct sync_point_work; 142 struct work_struct sync_point_work;
145 int sync_point_event; 143 int sync_point_event;
@@ -182,11 +180,6 @@ static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
182 return fence_after_eq(gpu->completed_fence, fence); 180 return fence_after_eq(gpu->completed_fence, fence);
183} 181}
184 182
185static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
186{
187 return fence_after_eq(gpu->retired_fence, fence);
188}
189
190int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); 183int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
191 184
192int etnaviv_gpu_init(struct etnaviv_gpu *gpu); 185int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
@@ -203,8 +196,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
203 u32 fence, struct timespec *timeout); 196 u32 fence, struct timespec *timeout);
204int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, 197int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
205 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout); 198 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
206int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, 199struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
207 struct etnaviv_gem_submit *submit);
208int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); 200int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
209void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); 201void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
210int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms); 202int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
new file mode 100644
index 000000000000..143c3eca80b0
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -0,0 +1,125 @@
1/*
2 * Copyright (C) 2017 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <drm/gpu_scheduler.h>
18#include <linux/kthread.h>
19
20#include "etnaviv_drv.h"
21#include "etnaviv_gem.h"
22#include "etnaviv_gpu.h"
23
24static int etnaviv_job_hang_limit = 0;
25module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
26static int etnaviv_hw_jobs_limit = 2;
27module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
28
29static inline
30struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
31{
32 return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
33}
34
35struct dma_fence *etnaviv_sched_dependency(struct drm_sched_job *sched_job,
36 struct drm_sched_entity *entity)
37{
38 return NULL;
39}
40
41struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
42{
43 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
44 struct dma_fence *fence;
45
46 mutex_lock(&submit->gpu->lock);
47 list_add_tail(&submit->node, &submit->gpu->active_submit_list);
48 mutex_unlock(&submit->gpu->lock);
49
50 fence = etnaviv_gpu_submit(submit);
51 if (!fence) {
52 etnaviv_submit_put(submit);
53 return NULL;
54 }
55
56 return fence;
57}
58
59static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
60{
61 /* this replaces the hangcheck */
62}
63
64static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
65{
66 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
67
68 mutex_lock(&submit->gpu->lock);
69 list_del(&submit->node);
70 mutex_unlock(&submit->gpu->lock);
71
72 etnaviv_submit_put(submit);
73}
74
75static const struct drm_sched_backend_ops etnaviv_sched_ops = {
76 .dependency = etnaviv_sched_dependency,
77 .run_job = etnaviv_sched_run_job,
78 .timedout_job = etnaviv_sched_timedout_job,
79 .free_job = etnaviv_sched_free_job,
80};
81
82int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
83 struct etnaviv_gem_submit *submit)
84{
85 int ret;
86
87 ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
88 sched_entity, submit->cmdbuf.ctx);
89 if (ret)
90 return ret;
91
92 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
93 mutex_lock(&submit->gpu->fence_idr_lock);
94 submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
95 submit->out_fence, 0,
96 INT_MAX, GFP_KERNEL);
97 mutex_unlock(&submit->gpu->fence_idr_lock);
98 if (submit->out_fence_id < 0)
99 return -ENOMEM;
100
101 /* the scheduler holds on to the job now */
102 kref_get(&submit->refcount);
103
104 drm_sched_entity_push_job(&submit->sched_job, sched_entity);
105
106 return 0;
107}
108
109int etnaviv_sched_init(struct etnaviv_gpu *gpu)
110{
111 int ret;
112
113 ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
114 etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
115 msecs_to_jiffies(500), dev_name(gpu->dev));
116 if (ret)
117 return ret;
118
119 return 0;
120}
121
122void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
123{
124 drm_sched_fini(&gpu->sched);
125}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
new file mode 100644
index 000000000000..097635fa78ae
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2017 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ETNAVIV_SCHED_H__
18#define __ETNAVIV_SCHED_H__
19
20#include <drm/gpu_scheduler.h>
21
22struct etnaviv_gpu;
23
24static inline
25struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
26{
27 return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
28}
29
30int etnaviv_sched_init(struct etnaviv_gpu *gpu);
31void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
32int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
33 struct etnaviv_gem_submit *submit);
34
35#endif /* __ETNAVIV_SCHED_H__ */