diff options
author | Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> | 2016-05-13 18:17:02 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@s-opensource.com> | 2016-06-28 11:13:45 -0400 |
commit | 9489a8ff0a13fc0f62e556a31341d3bbaef9da6b (patch) | |
tree | 0d9ce013d2fe217f3ebcf9d355ef32f85fe93b18 | |
parent | 6a8e07b215a91be310dac358fdccd7709dd2458f (diff) |
[media] v4l: vsp1: dl: Don't free fragments with interrupts disabled
Freeing a fragment requires freeing DMA coherent memory, which can be
performed with interrupts disabled as per the DMA mapping API contract.
The fragments can't thus be freed synchronously when a display list is
recycled. Instead, move the fragments to a garbage list and use a work
queue to run the garbage collection.
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
-rw-r--r-- | drivers/media/platform/vsp1/vsp1_dl.c | 72 |
1 files changed, 58 insertions, 14 deletions
diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c index e238d9b9376b..37c3518aa2a8 100644 --- a/drivers/media/platform/vsp1/vsp1_dl.c +++ b/drivers/media/platform/vsp1/vsp1_dl.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/workqueue.h> | ||
18 | 19 | ||
19 | #include "vsp1.h" | 20 | #include "vsp1.h" |
20 | #include "vsp1_dl.h" | 21 | #include "vsp1_dl.h" |
@@ -92,11 +93,13 @@ enum vsp1_dl_mode { | |||
92 | * @index: index of the related WPF | 93 | * @index: index of the related WPF |
93 | * @mode: display list operation mode (header or headerless) | 94 | * @mode: display list operation mode (header or headerless) |
94 | * @vsp1: the VSP1 device | 95 | * @vsp1: the VSP1 device |
95 | * @lock: protects the active, queued and pending lists | 96 | * @lock: protects the free, active, queued, pending and gc_fragments lists |
96 | * @free: array of all free display lists | 97 | * @free: array of all free display lists |
97 | * @active: list currently being processed (loaded) by hardware | 98 | * @active: list currently being processed (loaded) by hardware |
98 | * @queued: list queued to the hardware (written to the DL registers) | 99 | * @queued: list queued to the hardware (written to the DL registers) |
99 | * @pending: list waiting to be queued to the hardware | 100 | * @pending: list waiting to be queued to the hardware |
101 | * @gc_work: fragments garbage collector work struct | ||
102 | * @gc_fragments: array of display list fragments waiting to be freed | ||
100 | */ | 103 | */ |
101 | struct vsp1_dl_manager { | 104 | struct vsp1_dl_manager { |
102 | unsigned int index; | 105 | unsigned int index; |
@@ -108,6 +111,9 @@ struct vsp1_dl_manager { | |||
108 | struct vsp1_dl_list *active; | 111 | struct vsp1_dl_list *active; |
109 | struct vsp1_dl_list *queued; | 112 | struct vsp1_dl_list *queued; |
110 | struct vsp1_dl_list *pending; | 113 | struct vsp1_dl_list *pending; |
114 | |||
115 | struct work_struct gc_work; | ||
116 | struct list_head gc_fragments; | ||
111 | }; | 117 | }; |
112 | 118 | ||
113 | /* ----------------------------------------------------------------------------- | 119 | /* ----------------------------------------------------------------------------- |
@@ -262,21 +268,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) | |||
262 | return dl; | 268 | return dl; |
263 | } | 269 | } |
264 | 270 | ||
265 | static void vsp1_dl_list_free_fragments(struct vsp1_dl_list *dl) | ||
266 | { | ||
267 | struct vsp1_dl_body *dlb, *next; | ||
268 | |||
269 | list_for_each_entry_safe(dlb, next, &dl->fragments, list) { | ||
270 | list_del(&dlb->list); | ||
271 | vsp1_dl_body_cleanup(dlb); | ||
272 | kfree(dlb); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static void vsp1_dl_list_free(struct vsp1_dl_list *dl) | 271 | static void vsp1_dl_list_free(struct vsp1_dl_list *dl) |
277 | { | 272 | { |
278 | vsp1_dl_body_cleanup(&dl->body0); | 273 | vsp1_dl_body_cleanup(&dl->body0); |
279 | vsp1_dl_list_free_fragments(dl); | 274 | list_splice_init(&dl->fragments, &dl->dlm->gc_fragments); |
280 | kfree(dl); | 275 | kfree(dl); |
281 | } | 276 | } |
282 | 277 | ||
@@ -311,7 +306,16 @@ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl) | |||
311 | if (!dl) | 306 | if (!dl) |
312 | return; | 307 | return; |
313 | 308 | ||
314 | vsp1_dl_list_free_fragments(dl); | 309 | /* We can't free fragments here as DMA memory can only be freed in |
310 | * interruptible context. Move all fragments to the display list | ||
311 | * manager's list of fragments to be freed, they will be | ||
312 | * garbage-collected by the work queue. | ||
313 | */ | ||
314 | if (!list_empty(&dl->fragments)) { | ||
315 | list_splice_init(&dl->fragments, &dl->dlm->gc_fragments); | ||
316 | schedule_work(&dl->dlm->gc_work); | ||
317 | } | ||
318 | |||
315 | dl->body0.num_entries = 0; | 319 | dl->body0.num_entries = 0; |
316 | 320 | ||
317 | list_add_tail(&dl->list, &dl->dlm->free); | 321 | list_add_tail(&dl->list, &dl->dlm->free); |
@@ -550,6 +554,40 @@ void vsp1_dlm_reset(struct vsp1_dl_manager *dlm) | |||
550 | dlm->pending = NULL; | 554 | dlm->pending = NULL; |
551 | } | 555 | } |
552 | 556 | ||
557 | /* | ||
558 | * Free all fragments awaiting to be garbage-collected. | ||
559 | * | ||
560 | * This function must be called without the display list manager lock held. | ||
561 | */ | ||
562 | static void vsp1_dlm_fragments_free(struct vsp1_dl_manager *dlm) | ||
563 | { | ||
564 | unsigned long flags; | ||
565 | |||
566 | spin_lock_irqsave(&dlm->lock, flags); | ||
567 | |||
568 | while (!list_empty(&dlm->gc_fragments)) { | ||
569 | struct vsp1_dl_body *dlb; | ||
570 | |||
571 | dlb = list_first_entry(&dlm->gc_fragments, struct vsp1_dl_body, | ||
572 | list); | ||
573 | list_del(&dlb->list); | ||
574 | |||
575 | spin_unlock_irqrestore(&dlm->lock, flags); | ||
576 | vsp1_dl_fragment_free(dlb); | ||
577 | spin_lock_irqsave(&dlm->lock, flags); | ||
578 | } | ||
579 | |||
580 | spin_unlock_irqrestore(&dlm->lock, flags); | ||
581 | } | ||
582 | |||
583 | static void vsp1_dlm_garbage_collect(struct work_struct *work) | ||
584 | { | ||
585 | struct vsp1_dl_manager *dlm = | ||
586 | container_of(work, struct vsp1_dl_manager, gc_work); | ||
587 | |||
588 | vsp1_dlm_fragments_free(dlm); | ||
589 | } | ||
590 | |||
553 | struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, | 591 | struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, |
554 | unsigned int index, | 592 | unsigned int index, |
555 | unsigned int prealloc) | 593 | unsigned int prealloc) |
@@ -568,6 +606,8 @@ struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, | |||
568 | 606 | ||
569 | spin_lock_init(&dlm->lock); | 607 | spin_lock_init(&dlm->lock); |
570 | INIT_LIST_HEAD(&dlm->free); | 608 | INIT_LIST_HEAD(&dlm->free); |
609 | INIT_LIST_HEAD(&dlm->gc_fragments); | ||
610 | INIT_WORK(&dlm->gc_work, vsp1_dlm_garbage_collect); | ||
571 | 611 | ||
572 | for (i = 0; i < prealloc; ++i) { | 612 | for (i = 0; i < prealloc; ++i) { |
573 | struct vsp1_dl_list *dl; | 613 | struct vsp1_dl_list *dl; |
@@ -589,8 +629,12 @@ void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm) | |||
589 | if (!dlm) | 629 | if (!dlm) |
590 | return; | 630 | return; |
591 | 631 | ||
632 | cancel_work_sync(&dlm->gc_work); | ||
633 | |||
592 | list_for_each_entry_safe(dl, next, &dlm->free, list) { | 634 | list_for_each_entry_safe(dl, next, &dlm->free, list) { |
593 | list_del(&dl->list); | 635 | list_del(&dl->list); |
594 | vsp1_dl_list_free(dl); | 636 | vsp1_dl_list_free(dl); |
595 | } | 637 | } |
638 | |||
639 | vsp1_dlm_fragments_free(dlm); | ||
596 | } | 640 | } |