diff options
author | Dave Airlie <airlied@redhat.com> | 2017-04-06 15:38:27 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-04-06 15:38:27 -0400 |
commit | aed93ee7d03eac9b7d21f08aebe8a7d9ea069e20 (patch) | |
tree | 4d9ac85cf65b42a2896920b62d4d92f720ac5584 | |
parent | 0e961332147851d16038d52323fc50e23dcbdfa3 (diff) | |
parent | 78ec187f64fa5d8f837b8fc5bbbad88a89b63ab4 (diff) |
Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next
Highlights:
- Cooling device support from Russell, to allow GPU throttling on system
thermal overload.
- Explicit fencing support from Philipp, implemented in a similar way to
drm/msm.
* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux:
drm/etnaviv: submit support for out-fences
drm/etnaviv: return GPU fence through the submit structure
drm/etnaviv: submit support for in-fences
drm/etnaviv: add etnaviv cooling device
drm/etnaviv: switch to postclose
drm/etnaviv: add lockdep assert to fence allocation
-rw-r--r-- | drivers/gpu/drm/etnaviv/Kconfig | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_drv.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 65 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 99 | ||||
-rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 4 | ||||
-rw-r--r-- | include/uapi/drm/etnaviv_drm.h | 8 |
7 files changed, 162 insertions, 25 deletions
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig index cc1731c5289c..71cee4e9fefb 100644 --- a/drivers/gpu/drm/etnaviv/Kconfig +++ b/drivers/gpu/drm/etnaviv/Kconfig | |||
@@ -5,6 +5,7 @@ config DRM_ETNAVIV | |||
5 | depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) | 5 | depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) |
6 | depends on MMU | 6 | depends on MMU |
7 | select SHMEM | 7 | select SHMEM |
8 | select SYNC_FILE | ||
8 | select TMPFS | 9 | select TMPFS |
9 | select IOMMU_API | 10 | select IOMMU_API |
10 | select IOMMU_SUPPORT | 11 | select IOMMU_SUPPORT |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 587e45043542..5255278dde56 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) | |||
111 | return 0; | 111 | return 0; |
112 | } | 112 | } |
113 | 113 | ||
114 | static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) | 114 | static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) |
115 | { | 115 | { |
116 | struct etnaviv_drm_private *priv = dev->dev_private; | 116 | struct etnaviv_drm_private *priv = dev->dev_private; |
117 | struct etnaviv_file_private *ctx = file->driver_priv; | 117 | struct etnaviv_file_private *ctx = file->driver_priv; |
@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = { | |||
488 | DRIVER_PRIME | | 488 | DRIVER_PRIME | |
489 | DRIVER_RENDER, | 489 | DRIVER_RENDER, |
490 | .open = etnaviv_open, | 490 | .open = etnaviv_open, |
491 | .preclose = etnaviv_preclose, | 491 | .postclose = etnaviv_postclose, |
492 | .gem_free_object_unlocked = etnaviv_gem_free_object, | 492 | .gem_free_object_unlocked = etnaviv_gem_free_object, |
493 | .gem_vm_ops = &vm_ops, | 493 | .gem_vm_ops = &vm_ops, |
494 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, | 494 | .prime_handle_to_fd = drm_gem_prime_handle_to_fd, |
@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = { | |||
512 | .desc = "etnaviv DRM", | 512 | .desc = "etnaviv DRM", |
513 | .date = "20151214", | 513 | .date = "20151214", |
514 | .major = 1, | 514 | .major = 1, |
515 | .minor = 0, | 515 | .minor = 1, |
516 | }; | 516 | }; |
517 | 517 | ||
518 | /* | 518 | /* |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index e63ff116a3b3..c4a091e87426 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/reservation.h> | 20 | #include <linux/reservation.h> |
21 | #include "etnaviv_drv.h" | 21 | #include "etnaviv_drv.h" |
22 | 22 | ||
23 | struct dma_fence; | ||
23 | struct etnaviv_gem_ops; | 24 | struct etnaviv_gem_ops; |
24 | struct etnaviv_gem_object; | 25 | struct etnaviv_gem_object; |
25 | 26 | ||
@@ -104,9 +105,10 @@ struct etnaviv_gem_submit { | |||
104 | struct drm_device *dev; | 105 | struct drm_device *dev; |
105 | struct etnaviv_gpu *gpu; | 106 | struct etnaviv_gpu *gpu; |
106 | struct ww_acquire_ctx ticket; | 107 | struct ww_acquire_ctx ticket; |
107 | u32 fence; | 108 | struct dma_fence *fence; |
108 | unsigned int nr_bos; | 109 | unsigned int nr_bos; |
109 | struct etnaviv_gem_submit_bo bos[0]; | 110 | struct etnaviv_gem_submit_bo bos[0]; |
111 | u32 flags; | ||
110 | }; | 112 | }; |
111 | 113 | ||
112 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, | 114 | int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 726090d7a6ac..e1909429837e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | |||
@@ -14,7 +14,9 @@ | |||
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | 14 | * this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/dma-fence-array.h> | ||
17 | #include <linux/reservation.h> | 18 | #include <linux/reservation.h> |
19 | #include <linux/sync_file.h> | ||
18 | #include "etnaviv_cmdbuf.h" | 20 | #include "etnaviv_cmdbuf.h" |
19 | #include "etnaviv_drv.h" | 21 | #include "etnaviv_drv.h" |
20 | #include "etnaviv_gpu.h" | 22 | #include "etnaviv_gpu.h" |
@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) | |||
169 | for (i = 0; i < submit->nr_bos; i++) { | 171 | for (i = 0; i < submit->nr_bos; i++) { |
170 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; | 172 | struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; |
171 | bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; | 173 | bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; |
174 | bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); | ||
172 | 175 | ||
173 | ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); | 176 | ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, |
177 | explicit); | ||
174 | if (ret) | 178 | if (ret) |
175 | break; | 179 | break; |
176 | } | 180 | } |
@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit) | |||
290 | } | 294 | } |
291 | 295 | ||
292 | ww_acquire_fini(&submit->ticket); | 296 | ww_acquire_fini(&submit->ticket); |
297 | dma_fence_put(submit->fence); | ||
293 | kfree(submit); | 298 | kfree(submit); |
294 | } | 299 | } |
295 | 300 | ||
@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
303 | struct etnaviv_gem_submit *submit; | 308 | struct etnaviv_gem_submit *submit; |
304 | struct etnaviv_cmdbuf *cmdbuf; | 309 | struct etnaviv_cmdbuf *cmdbuf; |
305 | struct etnaviv_gpu *gpu; | 310 | struct etnaviv_gpu *gpu; |
311 | struct dma_fence *in_fence = NULL; | ||
312 | struct sync_file *sync_file = NULL; | ||
313 | int out_fence_fd = -1; | ||
306 | void *stream; | 314 | void *stream; |
307 | int ret; | 315 | int ret; |
308 | 316 | ||
@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
326 | return -EINVAL; | 334 | return -EINVAL; |
327 | } | 335 | } |
328 | 336 | ||
337 | if (args->flags & ~ETNA_SUBMIT_FLAGS) { | ||
338 | DRM_ERROR("invalid flags: 0x%x\n", args->flags); | ||
339 | return -EINVAL; | ||
340 | } | ||
341 | |||
329 | /* | 342 | /* |
330 | * Copy the command submission and bo array to kernel space in | 343 | * Copy the command submission and bo array to kernel space in |
331 | * one go, and do this outside of any locks. | 344 | * one go, and do this outside of any locks. |
@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
365 | goto err_submit_cmds; | 378 | goto err_submit_cmds; |
366 | } | 379 | } |
367 | 380 | ||
381 | if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { | ||
382 | out_fence_fd = get_unused_fd_flags(O_CLOEXEC); | ||
383 | if (out_fence_fd < 0) { | ||
384 | ret = out_fence_fd; | ||
385 | goto err_submit_cmds; | ||
386 | } | ||
387 | } | ||
388 | |||
368 | submit = submit_create(dev, gpu, args->nr_bos); | 389 | submit = submit_create(dev, gpu, args->nr_bos); |
369 | if (!submit) { | 390 | if (!submit) { |
370 | ret = -ENOMEM; | 391 | ret = -ENOMEM; |
371 | goto err_submit_cmds; | 392 | goto err_submit_cmds; |
372 | } | 393 | } |
373 | 394 | ||
395 | submit->flags = args->flags; | ||
396 | |||
374 | ret = submit_lookup_objects(submit, file, bos, args->nr_bos); | 397 | ret = submit_lookup_objects(submit, file, bos, args->nr_bos); |
375 | if (ret) | 398 | if (ret) |
376 | goto err_submit_objects; | 399 | goto err_submit_objects; |
@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
385 | goto err_submit_objects; | 408 | goto err_submit_objects; |
386 | } | 409 | } |
387 | 410 | ||
411 | if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) { | ||
412 | in_fence = sync_file_get_fence(args->fence_fd); | ||
413 | if (!in_fence) { | ||
414 | ret = -EINVAL; | ||
415 | goto err_submit_objects; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Wait if the fence is from a foreign context, or if the fence | ||
420 | * array contains any fence from a foreign context. | ||
421 | */ | ||
422 | if (!dma_fence_match_context(in_fence, gpu->fence_context)) { | ||
423 | ret = dma_fence_wait(in_fence, true); | ||
424 | if (ret) | ||
425 | goto err_submit_objects; | ||
426 | } | ||
427 | } | ||
428 | |||
388 | ret = submit_fence_sync(submit); | 429 | ret = submit_fence_sync(submit); |
389 | if (ret) | 430 | if (ret) |
390 | goto err_submit_objects; | 431 | goto err_submit_objects; |
@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
405 | if (ret == 0) | 446 | if (ret == 0) |
406 | cmdbuf = NULL; | 447 | cmdbuf = NULL; |
407 | 448 | ||
408 | args->fence = submit->fence; | 449 | if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { |
450 | /* | ||
451 | * This can be improved: ideally we want to allocate the sync | ||
452 | * file before kicking off the GPU job and just attach the | ||
453 | * fence to the sync file here, eliminating the ENOMEM | ||
454 | * possibility at this stage. | ||
455 | */ | ||
456 | sync_file = sync_file_create(submit->fence); | ||
457 | if (!sync_file) { | ||
458 | ret = -ENOMEM; | ||
459 | goto out; | ||
460 | } | ||
461 | fd_install(out_fence_fd, sync_file->file); | ||
462 | } | ||
463 | |||
464 | args->fence_fd = out_fence_fd; | ||
465 | args->fence = submit->fence->seqno; | ||
409 | 466 | ||
410 | out: | 467 | out: |
411 | submit_unpin_objects(submit); | 468 | submit_unpin_objects(submit); |
@@ -419,9 +476,13 @@ out: | |||
419 | flush_workqueue(priv->wq); | 476 | flush_workqueue(priv->wq); |
420 | 477 | ||
421 | err_submit_objects: | 478 | err_submit_objects: |
479 | if (in_fence) | ||
480 | dma_fence_put(in_fence); | ||
422 | submit_cleanup(submit); | 481 | submit_cleanup(submit); |
423 | 482 | ||
424 | err_submit_cmds: | 483 | err_submit_cmds: |
484 | if (ret && (out_fence_fd >= 0)) | ||
485 | put_unused_fd(out_fence_fd); | ||
425 | /* if we still own the cmdbuf */ | 486 | /* if we still own the cmdbuf */ |
426 | if (cmdbuf) | 487 | if (cmdbuf) |
427 | etnaviv_cmdbuf_free(cmdbuf); | 488 | etnaviv_cmdbuf_free(cmdbuf); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 130d7d517a19..bafbcb463555 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/dma-fence.h> | 18 | #include <linux/dma-fence.h> |
19 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/of_device.h> | 20 | #include <linux/of_device.h> |
21 | #include <linux/thermal.h> | ||
21 | 22 | ||
22 | #include "etnaviv_cmdbuf.h" | 23 | #include "etnaviv_cmdbuf.h" |
23 | #include "etnaviv_dump.h" | 24 | #include "etnaviv_dump.h" |
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) | |||
409 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); | 410 | gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); |
410 | } | 411 | } |
411 | 412 | ||
413 | static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) | ||
414 | { | ||
415 | unsigned int fscale = 1 << (6 - gpu->freq_scale); | ||
416 | u32 clock; | ||
417 | |||
418 | clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | ||
419 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); | ||
420 | |||
421 | etnaviv_gpu_load_clock(gpu, clock); | ||
422 | } | ||
423 | |||
412 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) | 424 | static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) |
413 | { | 425 | { |
414 | u32 control, idle; | 426 | u32 control, idle; |
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) | |||
426 | timeout = jiffies + msecs_to_jiffies(1000); | 438 | timeout = jiffies + msecs_to_jiffies(1000); |
427 | 439 | ||
428 | while (time_is_after_jiffies(timeout)) { | 440 | while (time_is_after_jiffies(timeout)) { |
429 | control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | ||
430 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | ||
431 | |||
432 | /* enable clock */ | 441 | /* enable clock */ |
433 | etnaviv_gpu_load_clock(gpu, control); | 442 | etnaviv_gpu_update_clock(gpu); |
443 | |||
444 | control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); | ||
434 | 445 | ||
435 | /* Wait for stable clock. Vivante's code waited for 1ms */ | 446 | /* Wait for stable clock. Vivante's code waited for 1ms */ |
436 | usleep_range(1000, 10000); | 447 | usleep_range(1000, 10000); |
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) | |||
490 | } | 501 | } |
491 | 502 | ||
492 | /* We rely on the GPU running, so program the clock */ | 503 | /* We rely on the GPU running, so program the clock */ |
493 | control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | 504 | etnaviv_gpu_update_clock(gpu); |
494 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | ||
495 | |||
496 | /* enable clock */ | ||
497 | etnaviv_gpu_load_clock(gpu, control); | ||
498 | 505 | ||
499 | return 0; | 506 | return 0; |
500 | } | 507 | } |
@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | |||
1051 | { | 1058 | { |
1052 | struct etnaviv_fence *f; | 1059 | struct etnaviv_fence *f; |
1053 | 1060 | ||
1061 | /* | ||
1062 | * GPU lock must already be held, otherwise fence completion order might | ||
1063 | * not match the seqno order assigned here. | ||
1064 | */ | ||
1065 | lockdep_assert_held(&gpu->lock); | ||
1066 | |||
1054 | f = kzalloc(sizeof(*f), GFP_KERNEL); | 1067 | f = kzalloc(sizeof(*f), GFP_KERNEL); |
1055 | if (!f) | 1068 | if (!f) |
1056 | return NULL; | 1069 | return NULL; |
@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) | |||
1064 | } | 1077 | } |
1065 | 1078 | ||
1066 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | 1079 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, |
1067 | unsigned int context, bool exclusive) | 1080 | unsigned int context, bool exclusive, bool explicit) |
1068 | { | 1081 | { |
1069 | struct reservation_object *robj = etnaviv_obj->resv; | 1082 | struct reservation_object *robj = etnaviv_obj->resv; |
1070 | struct reservation_object_list *fobj; | 1083 | struct reservation_object_list *fobj; |
@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | |||
1077 | return ret; | 1090 | return ret; |
1078 | } | 1091 | } |
1079 | 1092 | ||
1093 | if (explicit) | ||
1094 | return 0; | ||
1095 | |||
1080 | /* | 1096 | /* |
1081 | * If we have any shared fences, then the exclusive fence | 1097 | * If we have any shared fences, then the exclusive fence |
1082 | * should be ignored as it will already have been signalled. | 1098 | * should be ignored as it will already have been signalled. |
@@ -1321,8 +1337,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1321 | mutex_lock(&gpu->lock); | 1337 | mutex_lock(&gpu->lock); |
1322 | 1338 | ||
1323 | gpu->event[event].fence = fence; | 1339 | gpu->event[event].fence = fence; |
1324 | submit->fence = fence->seqno; | 1340 | submit->fence = dma_fence_get(fence); |
1325 | gpu->active_fence = submit->fence; | 1341 | gpu->active_fence = submit->fence->seqno; |
1326 | 1342 | ||
1327 | if (gpu->lastctx != cmdbuf->ctx) { | 1343 | if (gpu->lastctx != cmdbuf->ctx) { |
1328 | gpu->mmu->need_flush = true; | 1344 | gpu->mmu->need_flush = true; |
@@ -1526,17 +1542,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) | |||
1526 | #ifdef CONFIG_PM | 1542 | #ifdef CONFIG_PM |
1527 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | 1543 | static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) |
1528 | { | 1544 | { |
1529 | u32 clock; | ||
1530 | int ret; | 1545 | int ret; |
1531 | 1546 | ||
1532 | ret = mutex_lock_killable(&gpu->lock); | 1547 | ret = mutex_lock_killable(&gpu->lock); |
1533 | if (ret) | 1548 | if (ret) |
1534 | return ret; | 1549 | return ret; |
1535 | 1550 | ||
1536 | clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | | 1551 | etnaviv_gpu_update_clock(gpu); |
1537 | VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40); | ||
1538 | |||
1539 | etnaviv_gpu_load_clock(gpu, clock); | ||
1540 | etnaviv_gpu_hw_init(gpu); | 1552 | etnaviv_gpu_hw_init(gpu); |
1541 | 1553 | ||
1542 | gpu->switch_context = true; | 1554 | gpu->switch_context = true; |
@@ -1548,6 +1560,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) | |||
1548 | } | 1560 | } |
1549 | #endif | 1561 | #endif |
1550 | 1562 | ||
1563 | static int | ||
1564 | etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev, | ||
1565 | unsigned long *state) | ||
1566 | { | ||
1567 | *state = 6; | ||
1568 | |||
1569 | return 0; | ||
1570 | } | ||
1571 | |||
1572 | static int | ||
1573 | etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev, | ||
1574 | unsigned long *state) | ||
1575 | { | ||
1576 | struct etnaviv_gpu *gpu = cdev->devdata; | ||
1577 | |||
1578 | *state = gpu->freq_scale; | ||
1579 | |||
1580 | return 0; | ||
1581 | } | ||
1582 | |||
1583 | static int | ||
1584 | etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev, | ||
1585 | unsigned long state) | ||
1586 | { | ||
1587 | struct etnaviv_gpu *gpu = cdev->devdata; | ||
1588 | |||
1589 | mutex_lock(&gpu->lock); | ||
1590 | gpu->freq_scale = state; | ||
1591 | if (!pm_runtime_suspended(gpu->dev)) | ||
1592 | etnaviv_gpu_update_clock(gpu); | ||
1593 | mutex_unlock(&gpu->lock); | ||
1594 | |||
1595 | return 0; | ||
1596 | } | ||
1597 | |||
1598 | static struct thermal_cooling_device_ops cooling_ops = { | ||
1599 | .get_max_state = etnaviv_gpu_cooling_get_max_state, | ||
1600 | .get_cur_state = etnaviv_gpu_cooling_get_cur_state, | ||
1601 | .set_cur_state = etnaviv_gpu_cooling_set_cur_state, | ||
1602 | }; | ||
1603 | |||
1551 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, | 1604 | static int etnaviv_gpu_bind(struct device *dev, struct device *master, |
1552 | void *data) | 1605 | void *data) |
1553 | { | 1606 | { |
@@ -1556,13 +1609,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, | |||
1556 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); | 1609 | struct etnaviv_gpu *gpu = dev_get_drvdata(dev); |
1557 | int ret; | 1610 | int ret; |
1558 | 1611 | ||
1612 | gpu->cooling = thermal_of_cooling_device_register(dev->of_node, | ||
1613 | (char *)dev_name(dev), gpu, &cooling_ops); | ||
1614 | if (IS_ERR(gpu->cooling)) | ||
1615 | return PTR_ERR(gpu->cooling); | ||
1616 | |||
1559 | #ifdef CONFIG_PM | 1617 | #ifdef CONFIG_PM |
1560 | ret = pm_runtime_get_sync(gpu->dev); | 1618 | ret = pm_runtime_get_sync(gpu->dev); |
1561 | #else | 1619 | #else |
1562 | ret = etnaviv_gpu_clk_enable(gpu); | 1620 | ret = etnaviv_gpu_clk_enable(gpu); |
1563 | #endif | 1621 | #endif |
1564 | if (ret < 0) | 1622 | if (ret < 0) { |
1623 | thermal_cooling_device_unregister(gpu->cooling); | ||
1565 | return ret; | 1624 | return ret; |
1625 | } | ||
1566 | 1626 | ||
1567 | gpu->drm = drm; | 1627 | gpu->drm = drm; |
1568 | gpu->fence_context = dma_fence_context_alloc(1); | 1628 | gpu->fence_context = dma_fence_context_alloc(1); |
@@ -1616,6 +1676,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, | |||
1616 | } | 1676 | } |
1617 | 1677 | ||
1618 | gpu->drm = NULL; | 1678 | gpu->drm = NULL; |
1679 | |||
1680 | thermal_cooling_device_unregister(gpu->cooling); | ||
1681 | gpu->cooling = NULL; | ||
1619 | } | 1682 | } |
1620 | 1683 | ||
1621 | static const struct component_ops gpu_ops = { | 1684 | static const struct component_ops gpu_ops = { |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 1c0606ea7d5e..9227a9740447 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf; | |||
97 | 97 | ||
98 | struct etnaviv_gpu { | 98 | struct etnaviv_gpu { |
99 | struct drm_device *drm; | 99 | struct drm_device *drm; |
100 | struct thermal_cooling_device *cooling; | ||
100 | struct device *dev; | 101 | struct device *dev; |
101 | struct mutex lock; | 102 | struct mutex lock; |
102 | struct etnaviv_chip_identity identity; | 103 | struct etnaviv_chip_identity identity; |
@@ -150,6 +151,7 @@ struct etnaviv_gpu { | |||
150 | u32 hangcheck_fence; | 151 | u32 hangcheck_fence; |
151 | u32 hangcheck_dma_addr; | 152 | u32 hangcheck_dma_addr; |
152 | struct work_struct recover_work; | 153 | struct work_struct recover_work; |
154 | unsigned int freq_scale; | ||
153 | }; | 155 | }; |
154 | 156 | ||
155 | static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) | 157 | static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) |
@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); | |||
181 | #endif | 183 | #endif |
182 | 184 | ||
183 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, | 185 | int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, |
184 | unsigned int context, bool exclusive); | 186 | unsigned int context, bool exclusive, bool implicit); |
185 | 187 | ||
186 | void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); | 188 | void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); |
187 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, | 189 | int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, |
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 2584c1cca42f..76f6f78a352b 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h | |||
@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo { | |||
154 | * one or more cmdstream buffers. This allows for conditional execution | 154 | * one or more cmdstream buffers. This allows for conditional execution |
155 | * (context-restore), and IB buffers needed for per tile/bin draw cmds. | 155 | * (context-restore), and IB buffers needed for per tile/bin draw cmds. |
156 | */ | 156 | */ |
157 | #define ETNA_SUBMIT_NO_IMPLICIT 0x0001 | ||
158 | #define ETNA_SUBMIT_FENCE_FD_IN 0x0002 | ||
159 | #define ETNA_SUBMIT_FENCE_FD_OUT 0x0004 | ||
160 | #define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \ | ||
161 | ETNA_SUBMIT_FENCE_FD_IN | \ | ||
162 | ETNA_SUBMIT_FENCE_FD_OUT) | ||
157 | #define ETNA_PIPE_3D 0x00 | 163 | #define ETNA_PIPE_3D 0x00 |
158 | #define ETNA_PIPE_2D 0x01 | 164 | #define ETNA_PIPE_2D 0x01 |
159 | #define ETNA_PIPE_VG 0x02 | 165 | #define ETNA_PIPE_VG 0x02 |
@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit { | |||
167 | __u64 bos; /* in, ptr to array of submit_bo's */ | 173 | __u64 bos; /* in, ptr to array of submit_bo's */ |
168 | __u64 relocs; /* in, ptr to array of submit_reloc's */ | 174 | __u64 relocs; /* in, ptr to array of submit_reloc's */ |
169 | __u64 stream; /* in, ptr to cmdstream */ | 175 | __u64 stream; /* in, ptr to cmdstream */ |
176 | __u32 flags; /* in, mask of ETNA_SUBMIT_x */ | ||
177 | __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */ | ||
170 | }; | 178 | }; |
171 | 179 | ||
172 | /* The normal way to synchronize with the GPU is just to CPU_PREP on | 180 | /* The normal way to synchronize with the GPU is just to CPU_PREP on |