diff options
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_bo.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_drv.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_gem.c | 65 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_irq.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_render_cl.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_v3d.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_validate.c | 4 |
7 files changed, 118 insertions, 52 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 18dfe3ec9a62..22278bcfc60e 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c | |||
@@ -215,7 +215,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, | |||
215 | struct drm_gem_cma_object *cma_obj; | 215 | struct drm_gem_cma_object *cma_obj; |
216 | 216 | ||
217 | if (size == 0) | 217 | if (size == 0) |
218 | return NULL; | 218 | return ERR_PTR(-EINVAL); |
219 | 219 | ||
220 | /* First, try to get a vc4_bo from the kernel BO cache. */ | 220 | /* First, try to get a vc4_bo from the kernel BO cache. */ |
221 | if (from_cache) { | 221 | if (from_cache) { |
@@ -237,7 +237,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, | |||
237 | if (IS_ERR(cma_obj)) { | 237 | if (IS_ERR(cma_obj)) { |
238 | DRM_ERROR("Failed to allocate from CMA:\n"); | 238 | DRM_ERROR("Failed to allocate from CMA:\n"); |
239 | vc4_bo_stats_dump(vc4); | 239 | vc4_bo_stats_dump(vc4); |
240 | return NULL; | 240 | return ERR_PTR(-ENOMEM); |
241 | } | 241 | } |
242 | } | 242 | } |
243 | 243 | ||
@@ -259,8 +259,8 @@ int vc4_dumb_create(struct drm_file *file_priv, | |||
259 | args->size = args->pitch * args->height; | 259 | args->size = args->pitch * args->height; |
260 | 260 | ||
261 | bo = vc4_bo_create(dev, args->size, false); | 261 | bo = vc4_bo_create(dev, args->size, false); |
262 | if (!bo) | 262 | if (IS_ERR(bo)) |
263 | return -ENOMEM; | 263 | return PTR_ERR(bo); |
264 | 264 | ||
265 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | 265 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); |
266 | drm_gem_object_unreference_unlocked(&bo->base.base); | 266 | drm_gem_object_unreference_unlocked(&bo->base.base); |
@@ -443,8 +443,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, | |||
443 | * get zeroed, and that might leak data between users. | 443 | * get zeroed, and that might leak data between users. |
444 | */ | 444 | */ |
445 | bo = vc4_bo_create(dev, args->size, false); | 445 | bo = vc4_bo_create(dev, args->size, false); |
446 | if (!bo) | 446 | if (IS_ERR(bo)) |
447 | return -ENOMEM; | 447 | return PTR_ERR(bo); |
448 | 448 | ||
449 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); | 449 | ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); |
450 | drm_gem_object_unreference_unlocked(&bo->base.base); | 450 | drm_gem_object_unreference_unlocked(&bo->base.base); |
@@ -496,8 +496,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, | |||
496 | } | 496 | } |
497 | 497 | ||
498 | bo = vc4_bo_create(dev, args->size, true); | 498 | bo = vc4_bo_create(dev, args->size, true); |
499 | if (!bo) | 499 | if (IS_ERR(bo)) |
500 | return -ENOMEM; | 500 | return PTR_ERR(bo); |
501 | 501 | ||
502 | ret = copy_from_user(bo->base.vaddr, | 502 | ret = copy_from_user(bo->base.vaddr, |
503 | (void __user *)(uintptr_t)args->data, | 503 | (void __user *)(uintptr_t)args->data, |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 080865ec2bae..51a63330d4f8 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h | |||
@@ -91,8 +91,12 @@ struct vc4_dev { | |||
91 | struct vc4_bo *overflow_mem; | 91 | struct vc4_bo *overflow_mem; |
92 | struct work_struct overflow_mem_work; | 92 | struct work_struct overflow_mem_work; |
93 | 93 | ||
94 | int power_refcount; | ||
95 | |||
96 | /* Mutex controlling the power refcount. */ | ||
97 | struct mutex power_lock; | ||
98 | |||
94 | struct { | 99 | struct { |
95 | uint32_t last_ct0ca, last_ct1ca; | ||
96 | struct timer_list timer; | 100 | struct timer_list timer; |
97 | struct work_struct reset_work; | 101 | struct work_struct reset_work; |
98 | } hangcheck; | 102 | } hangcheck; |
@@ -142,6 +146,7 @@ struct vc4_seqno_cb { | |||
142 | }; | 146 | }; |
143 | 147 | ||
144 | struct vc4_v3d { | 148 | struct vc4_v3d { |
149 | struct vc4_dev *vc4; | ||
145 | struct platform_device *pdev; | 150 | struct platform_device *pdev; |
146 | void __iomem *regs; | 151 | void __iomem *regs; |
147 | }; | 152 | }; |
@@ -192,6 +197,11 @@ struct vc4_exec_info { | |||
192 | /* Sequence number for this bin/render job. */ | 197 | /* Sequence number for this bin/render job. */ |
193 | uint64_t seqno; | 198 | uint64_t seqno; |
194 | 199 | ||
200 | /* Last current addresses the hardware was processing when the | ||
201 | * hangcheck timer checked on us. | ||
202 | */ | ||
203 | uint32_t last_ct0ca, last_ct1ca; | ||
204 | |||
195 | /* Kernel-space copy of the ioctl arguments */ | 205 | /* Kernel-space copy of the ioctl arguments */ |
196 | struct drm_vc4_submit_cl *args; | 206 | struct drm_vc4_submit_cl *args; |
197 | 207 | ||
@@ -434,7 +444,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, | |||
434 | extern struct platform_driver vc4_v3d_driver; | 444 | extern struct platform_driver vc4_v3d_driver; |
435 | int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); | 445 | int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); |
436 | int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); | 446 | int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); |
437 | int vc4_v3d_set_power(struct vc4_dev *vc4, bool on); | ||
438 | 447 | ||
439 | /* vc4_validate.c */ | 448 | /* vc4_validate.c */ |
440 | int | 449 | int |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 48ce30a6f4b5..202aa1544acc 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/pm_runtime.h> | ||
26 | #include <linux/device.h> | 27 | #include <linux/device.h> |
27 | #include <linux/io.h> | 28 | #include <linux/io.h> |
28 | 29 | ||
@@ -228,8 +229,16 @@ vc4_reset(struct drm_device *dev) | |||
228 | struct vc4_dev *vc4 = to_vc4_dev(dev); | 229 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
229 | 230 | ||
230 | DRM_INFO("Resetting GPU.\n"); | 231 | DRM_INFO("Resetting GPU.\n"); |
231 | vc4_v3d_set_power(vc4, false); | 232 | |
232 | vc4_v3d_set_power(vc4, true); | 233 | mutex_lock(&vc4->power_lock); |
234 | if (vc4->power_refcount) { | ||
235 | /* Power the device off and back on the by dropping the | ||
236 | * reference on runtime PM. | ||
237 | */ | ||
238 | pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev); | ||
239 | pm_runtime_get_sync(&vc4->v3d->pdev->dev); | ||
240 | } | ||
241 | mutex_unlock(&vc4->power_lock); | ||
233 | 242 | ||
234 | vc4_irq_reset(dev); | 243 | vc4_irq_reset(dev); |
235 | 244 | ||
@@ -257,10 +266,17 @@ vc4_hangcheck_elapsed(unsigned long data) | |||
257 | struct drm_device *dev = (struct drm_device *)data; | 266 | struct drm_device *dev = (struct drm_device *)data; |
258 | struct vc4_dev *vc4 = to_vc4_dev(dev); | 267 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
259 | uint32_t ct0ca, ct1ca; | 268 | uint32_t ct0ca, ct1ca; |
269 | unsigned long irqflags; | ||
270 | struct vc4_exec_info *exec; | ||
271 | |||
272 | spin_lock_irqsave(&vc4->job_lock, irqflags); | ||
273 | exec = vc4_first_job(vc4); | ||
260 | 274 | ||
261 | /* If idle, we can stop watching for hangs. */ | 275 | /* If idle, we can stop watching for hangs. */ |
262 | if (list_empty(&vc4->job_list)) | 276 | if (!exec) { |
277 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); | ||
263 | return; | 278 | return; |
279 | } | ||
264 | 280 | ||
265 | ct0ca = V3D_READ(V3D_CTNCA(0)); | 281 | ct0ca = V3D_READ(V3D_CTNCA(0)); |
266 | ct1ca = V3D_READ(V3D_CTNCA(1)); | 282 | ct1ca = V3D_READ(V3D_CTNCA(1)); |
@@ -268,14 +284,16 @@ vc4_hangcheck_elapsed(unsigned long data) | |||
268 | /* If we've made any progress in execution, rearm the timer | 284 | /* If we've made any progress in execution, rearm the timer |
269 | * and wait. | 285 | * and wait. |
270 | */ | 286 | */ |
271 | if (ct0ca != vc4->hangcheck.last_ct0ca || | 287 | if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) { |
272 | ct1ca != vc4->hangcheck.last_ct1ca) { | 288 | exec->last_ct0ca = ct0ca; |
273 | vc4->hangcheck.last_ct0ca = ct0ca; | 289 | exec->last_ct1ca = ct1ca; |
274 | vc4->hangcheck.last_ct1ca = ct1ca; | 290 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); |
275 | vc4_queue_hangcheck(dev); | 291 | vc4_queue_hangcheck(dev); |
276 | return; | 292 | return; |
277 | } | 293 | } |
278 | 294 | ||
295 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); | ||
296 | |||
279 | /* We've gone too long with no progress, reset. This has to | 297 | /* We've gone too long with no progress, reset. This has to |
280 | * be done from a work struct, since resetting can sleep and | 298 | * be done from a work struct, since resetting can sleep and |
281 | * this timer hook isn't allowed to. | 299 | * this timer hook isn't allowed to. |
@@ -340,12 +358,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, | |||
340 | finish_wait(&vc4->job_wait_queue, &wait); | 358 | finish_wait(&vc4->job_wait_queue, &wait); |
341 | trace_vc4_wait_for_seqno_end(dev, seqno); | 359 | trace_vc4_wait_for_seqno_end(dev, seqno); |
342 | 360 | ||
343 | if (ret && ret != -ERESTARTSYS) { | 361 | return ret; |
344 | DRM_ERROR("timeout waiting for render thread idle\n"); | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | return 0; | ||
349 | } | 362 | } |
350 | 363 | ||
351 | static void | 364 | static void |
@@ -578,9 +591,9 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec) | |||
578 | } | 591 | } |
579 | 592 | ||
580 | bo = vc4_bo_create(dev, exec_size, true); | 593 | bo = vc4_bo_create(dev, exec_size, true); |
581 | if (!bo) { | 594 | if (IS_ERR(bo)) { |
582 | DRM_ERROR("Couldn't allocate BO for binning\n"); | 595 | DRM_ERROR("Couldn't allocate BO for binning\n"); |
583 | ret = -ENOMEM; | 596 | ret = PTR_ERR(bo); |
584 | goto fail; | 597 | goto fail; |
585 | } | 598 | } |
586 | exec->exec_bo = &bo->base; | 599 | exec->exec_bo = &bo->base; |
@@ -617,6 +630,7 @@ fail: | |||
617 | static void | 630 | static void |
618 | vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) | 631 | vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) |
619 | { | 632 | { |
633 | struct vc4_dev *vc4 = to_vc4_dev(dev); | ||
620 | unsigned i; | 634 | unsigned i; |
621 | 635 | ||
622 | /* Need the struct lock for drm_gem_object_unreference(). */ | 636 | /* Need the struct lock for drm_gem_object_unreference(). */ |
@@ -635,6 +649,11 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) | |||
635 | } | 649 | } |
636 | mutex_unlock(&dev->struct_mutex); | 650 | mutex_unlock(&dev->struct_mutex); |
637 | 651 | ||
652 | mutex_lock(&vc4->power_lock); | ||
653 | if (--vc4->power_refcount == 0) | ||
654 | pm_runtime_put(&vc4->v3d->pdev->dev); | ||
655 | mutex_unlock(&vc4->power_lock); | ||
656 | |||
638 | kfree(exec); | 657 | kfree(exec); |
639 | } | 658 | } |
640 | 659 | ||
@@ -746,6 +765,9 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, | |||
746 | struct drm_gem_object *gem_obj; | 765 | struct drm_gem_object *gem_obj; |
747 | struct vc4_bo *bo; | 766 | struct vc4_bo *bo; |
748 | 767 | ||
768 | if (args->pad != 0) | ||
769 | return -EINVAL; | ||
770 | |||
749 | gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); | 771 | gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); |
750 | if (!gem_obj) { | 772 | if (!gem_obj) { |
751 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); | 773 | DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); |
@@ -772,7 +794,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
772 | struct vc4_dev *vc4 = to_vc4_dev(dev); | 794 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
773 | struct drm_vc4_submit_cl *args = data; | 795 | struct drm_vc4_submit_cl *args = data; |
774 | struct vc4_exec_info *exec; | 796 | struct vc4_exec_info *exec; |
775 | int ret; | 797 | int ret = 0; |
776 | 798 | ||
777 | if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { | 799 | if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { |
778 | DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); | 800 | DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); |
@@ -785,6 +807,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, | |||
785 | return -ENOMEM; | 807 | return -ENOMEM; |
786 | } | 808 | } |
787 | 809 | ||
810 | mutex_lock(&vc4->power_lock); | ||
811 | if (vc4->power_refcount++ == 0) | ||
812 | ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); | ||
813 | mutex_unlock(&vc4->power_lock); | ||
814 | if (ret < 0) { | ||
815 | kfree(exec); | ||
816 | return ret; | ||
817 | } | ||
818 | |||
788 | exec->args = args; | 819 | exec->args = args; |
789 | INIT_LIST_HEAD(&exec->unref_list); | 820 | INIT_LIST_HEAD(&exec->unref_list); |
790 | 821 | ||
@@ -839,6 +870,8 @@ vc4_gem_init(struct drm_device *dev) | |||
839 | (unsigned long)dev); | 870 | (unsigned long)dev); |
840 | 871 | ||
841 | INIT_WORK(&vc4->job_done_work, vc4_job_done_work); | 872 | INIT_WORK(&vc4->job_done_work, vc4_job_done_work); |
873 | |||
874 | mutex_init(&vc4->power_lock); | ||
842 | } | 875 | } |
843 | 876 | ||
844 | void | 877 | void |
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index b68060e758db..78a21357fb2d 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c | |||
@@ -57,7 +57,7 @@ vc4_overflow_mem_work(struct work_struct *work) | |||
57 | struct vc4_bo *bo; | 57 | struct vc4_bo *bo; |
58 | 58 | ||
59 | bo = vc4_bo_create(dev, 256 * 1024, true); | 59 | bo = vc4_bo_create(dev, 256 * 1024, true); |
60 | if (!bo) { | 60 | if (IS_ERR(bo)) { |
61 | DRM_ERROR("Couldn't allocate binner overflow mem\n"); | 61 | DRM_ERROR("Couldn't allocate binner overflow mem\n"); |
62 | return; | 62 | return; |
63 | } | 63 | } |
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 8a2a312e2c1b..0f12418725e5 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c | |||
@@ -316,20 +316,11 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, | |||
316 | size += xtiles * ytiles * loop_body_size; | 316 | size += xtiles * ytiles * loop_body_size; |
317 | 317 | ||
318 | setup->rcl = &vc4_bo_create(dev, size, true)->base; | 318 | setup->rcl = &vc4_bo_create(dev, size, true)->base; |
319 | if (!setup->rcl) | 319 | if (IS_ERR(setup->rcl)) |
320 | return -ENOMEM; | 320 | return PTR_ERR(setup->rcl); |
321 | list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, | 321 | list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, |
322 | &exec->unref_list); | 322 | &exec->unref_list); |
323 | 323 | ||
324 | rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); | ||
325 | rcl_u32(setup, | ||
326 | (setup->color_write ? (setup->color_write->paddr + | ||
327 | args->color_write.offset) : | ||
328 | 0)); | ||
329 | rcl_u16(setup, args->width); | ||
330 | rcl_u16(setup, args->height); | ||
331 | rcl_u16(setup, args->color_write.bits); | ||
332 | |||
333 | /* The tile buffer gets cleared when the previous tile is stored. If | 324 | /* The tile buffer gets cleared when the previous tile is stored. If |
334 | * the clear values changed between frames, then the tile buffer has | 325 | * the clear values changed between frames, then the tile buffer has |
335 | * stale clear values in it, so we have to do a store in None mode (no | 326 | * stale clear values in it, so we have to do a store in None mode (no |
@@ -349,6 +340,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec, | |||
349 | rcl_u32(setup, 0); /* no address, since we're in None mode */ | 340 | rcl_u32(setup, 0); /* no address, since we're in None mode */ |
350 | } | 341 | } |
351 | 342 | ||
343 | rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG); | ||
344 | rcl_u32(setup, | ||
345 | (setup->color_write ? (setup->color_write->paddr + | ||
346 | args->color_write.offset) : | ||
347 | 0)); | ||
348 | rcl_u16(setup, args->width); | ||
349 | rcl_u16(setup, args->height); | ||
350 | rcl_u16(setup, args->color_write.bits); | ||
351 | |||
352 | for (y = min_y_tile; y <= max_y_tile; y++) { | 352 | for (y = min_y_tile; y <= max_y_tile; y++) { |
353 | for (x = min_x_tile; x <= max_x_tile; x++) { | 353 | for (x = min_x_tile; x <= max_x_tile; x++) { |
354 | bool first = (x == min_x_tile && y == min_y_tile); | 354 | bool first = (x == min_x_tile && y == min_y_tile); |
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 314ff71db978..31de5d17bc85 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include "linux/component.h" | 19 | #include "linux/component.h" |
20 | #include "linux/pm_runtime.h" | ||
20 | #include "vc4_drv.h" | 21 | #include "vc4_drv.h" |
21 | #include "vc4_regs.h" | 22 | #include "vc4_regs.h" |
22 | 23 | ||
@@ -144,18 +145,6 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) | |||
144 | } | 145 | } |
145 | #endif /* CONFIG_DEBUG_FS */ | 146 | #endif /* CONFIG_DEBUG_FS */ |
146 | 147 | ||
147 | int | ||
148 | vc4_v3d_set_power(struct vc4_dev *vc4, bool on) | ||
149 | { | ||
150 | /* XXX: This interface is needed for GPU reset, and the way to | ||
151 | * do it is to turn our power domain off and back on. We | ||
152 | * can't just reset from within the driver, because the reset | ||
153 | * bits are in the power domain's register area, and get set | ||
154 | * during the poweron process. | ||
155 | */ | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static void vc4_v3d_init_hw(struct drm_device *dev) | 148 | static void vc4_v3d_init_hw(struct drm_device *dev) |
160 | { | 149 | { |
161 | struct vc4_dev *vc4 = to_vc4_dev(dev); | 150 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
@@ -167,6 +156,29 @@ static void vc4_v3d_init_hw(struct drm_device *dev) | |||
167 | V3D_WRITE(V3D_VPMBASE, 0); | 156 | V3D_WRITE(V3D_VPMBASE, 0); |
168 | } | 157 | } |
169 | 158 | ||
159 | #ifdef CONFIG_PM | ||
160 | static int vc4_v3d_runtime_suspend(struct device *dev) | ||
161 | { | ||
162 | struct vc4_v3d *v3d = dev_get_drvdata(dev); | ||
163 | struct vc4_dev *vc4 = v3d->vc4; | ||
164 | |||
165 | vc4_irq_uninstall(vc4->dev); | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static int vc4_v3d_runtime_resume(struct device *dev) | ||
171 | { | ||
172 | struct vc4_v3d *v3d = dev_get_drvdata(dev); | ||
173 | struct vc4_dev *vc4 = v3d->vc4; | ||
174 | |||
175 | vc4_v3d_init_hw(vc4->dev); | ||
176 | vc4_irq_postinstall(vc4->dev); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | #endif | ||
181 | |||
170 | static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) | 182 | static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) |
171 | { | 183 | { |
172 | struct platform_device *pdev = to_platform_device(dev); | 184 | struct platform_device *pdev = to_platform_device(dev); |
@@ -179,6 +191,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) | |||
179 | if (!v3d) | 191 | if (!v3d) |
180 | return -ENOMEM; | 192 | return -ENOMEM; |
181 | 193 | ||
194 | dev_set_drvdata(dev, v3d); | ||
195 | |||
182 | v3d->pdev = pdev; | 196 | v3d->pdev = pdev; |
183 | 197 | ||
184 | v3d->regs = vc4_ioremap_regs(pdev, 0); | 198 | v3d->regs = vc4_ioremap_regs(pdev, 0); |
@@ -186,6 +200,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) | |||
186 | return PTR_ERR(v3d->regs); | 200 | return PTR_ERR(v3d->regs); |
187 | 201 | ||
188 | vc4->v3d = v3d; | 202 | vc4->v3d = v3d; |
203 | v3d->vc4 = vc4; | ||
189 | 204 | ||
190 | if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { | 205 | if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { |
191 | DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", | 206 | DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", |
@@ -207,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) | |||
207 | return ret; | 222 | return ret; |
208 | } | 223 | } |
209 | 224 | ||
225 | pm_runtime_enable(dev); | ||
226 | |||
210 | return 0; | 227 | return 0; |
211 | } | 228 | } |
212 | 229 | ||
@@ -216,6 +233,8 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master, | |||
216 | struct drm_device *drm = dev_get_drvdata(master); | 233 | struct drm_device *drm = dev_get_drvdata(master); |
217 | struct vc4_dev *vc4 = to_vc4_dev(drm); | 234 | struct vc4_dev *vc4 = to_vc4_dev(drm); |
218 | 235 | ||
236 | pm_runtime_disable(dev); | ||
237 | |||
219 | drm_irq_uninstall(drm); | 238 | drm_irq_uninstall(drm); |
220 | 239 | ||
221 | /* Disable the binner's overflow memory address, so the next | 240 | /* Disable the binner's overflow memory address, so the next |
@@ -228,6 +247,10 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master, | |||
228 | vc4->v3d = NULL; | 247 | vc4->v3d = NULL; |
229 | } | 248 | } |
230 | 249 | ||
250 | static const struct dev_pm_ops vc4_v3d_pm_ops = { | ||
251 | SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL) | ||
252 | }; | ||
253 | |||
231 | static const struct component_ops vc4_v3d_ops = { | 254 | static const struct component_ops vc4_v3d_ops = { |
232 | .bind = vc4_v3d_bind, | 255 | .bind = vc4_v3d_bind, |
233 | .unbind = vc4_v3d_unbind, | 256 | .unbind = vc4_v3d_unbind, |
@@ -255,5 +278,6 @@ struct platform_driver vc4_v3d_driver = { | |||
255 | .driver = { | 278 | .driver = { |
256 | .name = "vc4_v3d", | 279 | .name = "vc4_v3d", |
257 | .of_match_table = vc4_v3d_dt_match, | 280 | .of_match_table = vc4_v3d_dt_match, |
281 | .pm = &vc4_v3d_pm_ops, | ||
258 | }, | 282 | }, |
259 | }; | 283 | }; |
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index e26d9f6face3..24c2c746e8f3 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c | |||
@@ -401,8 +401,8 @@ validate_tile_binning_config(VALIDATE_ARGS) | |||
401 | tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, | 401 | tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, |
402 | true); | 402 | true); |
403 | exec->tile_bo = &tile_bo->base; | 403 | exec->tile_bo = &tile_bo->base; |
404 | if (!exec->tile_bo) | 404 | if (IS_ERR(exec->tile_bo)) |
405 | return -ENOMEM; | 405 | return PTR_ERR(exec->tile_bo); |
406 | list_add_tail(&tile_bo->unref_head, &exec->unref_list); | 406 | list_add_tail(&tile_bo->unref_head, &exec->unref_list); |
407 | 407 | ||
408 | /* tile alloc address. */ | 408 | /* tile alloc address. */ |