diff options
author | Andrzej Hajda <a.hajda@samsung.com> | 2015-12-02 03:22:31 -0500 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-12-23 10:58:36 -0500 |
commit | 7969b12523668d764746e762b42793fd1aefcf13 (patch) | |
tree | 594852b6d5704899c8b589ea77ff6e3ff130176a | |
parent | 8eceb9a0070138ba6db805b38f46c46a926b37b8 (diff) |
[media] s5p-mfc: use spinlock to protect MFC context
MFC driver uses dev->irqlock spinlock to protect queues only, but many context
fields require protection also - they can be accessed concurrently
from IOCTLs and IRQ handler. The patch increases protection range of irqlock
to those fields also.
Signed-off-by: Andrzej Hajda <a.hajda@samsung.com>
Signed-off-by: Kamil Debski <k.debski@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r-- | drivers/media/platform/s5p-mfc/s5p_mfc.c | 15 | ||||
-rw-r--r-- | drivers/media/platform/s5p-mfc/s5p_mfc_common.h | 2 | ||||
-rw-r--r-- | drivers/media/platform/s5p-mfc/s5p_mfc_dec.c | 13 | ||||
-rw-r--r-- | drivers/media/platform/s5p-mfc/s5p_mfc_enc.c | 14 | ||||
-rw-r--r-- | drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c | 19 | ||||
-rw-r--r-- | drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c | 18 |
6 files changed, 11 insertions, 70 deletions
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index cff59aa9a615..d5bd1762fbb4 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c | |||
@@ -359,7 +359,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, | |||
359 | unsigned int dst_frame_status; | 359 | unsigned int dst_frame_status; |
360 | unsigned int dec_frame_status; | 360 | unsigned int dec_frame_status; |
361 | struct s5p_mfc_buf *src_buf; | 361 | struct s5p_mfc_buf *src_buf; |
362 | unsigned long flags; | ||
363 | unsigned int res_change; | 362 | unsigned int res_change; |
364 | 363 | ||
365 | dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev) | 364 | dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev) |
@@ -385,7 +384,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, | |||
385 | if (ctx->dpb_flush_flag) | 384 | if (ctx->dpb_flush_flag) |
386 | ctx->dpb_flush_flag = 0; | 385 | ctx->dpb_flush_flag = 0; |
387 | 386 | ||
388 | spin_lock_irqsave(&dev->irqlock, flags); | ||
389 | /* All frames remaining in the buffer have been extracted */ | 387 | /* All frames remaining in the buffer have been extracted */ |
390 | if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) { | 388 | if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) { |
391 | if (ctx->state == MFCINST_RES_CHANGE_FLUSH) { | 389 | if (ctx->state == MFCINST_RES_CHANGE_FLUSH) { |
@@ -445,7 +443,6 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, | |||
445 | } | 443 | } |
446 | } | 444 | } |
447 | leave_handle_frame: | 445 | leave_handle_frame: |
448 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
449 | if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) | 446 | if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) |
450 | || ctx->dst_queue_cnt < ctx->pb_count) | 447 | || ctx->dst_queue_cnt < ctx->pb_count) |
451 | clear_work_bit(ctx); | 448 | clear_work_bit(ctx); |
@@ -464,8 +461,6 @@ leave_handle_frame: | |||
464 | static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, | 461 | static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, |
465 | struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) | 462 | struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) |
466 | { | 463 | { |
467 | unsigned long flags; | ||
468 | |||
469 | mfc_err("Interrupt Error: %08x\n", err); | 464 | mfc_err("Interrupt Error: %08x\n", err); |
470 | 465 | ||
471 | if (ctx != NULL) { | 466 | if (ctx != NULL) { |
@@ -482,11 +477,9 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev, | |||
482 | clear_work_bit(ctx); | 477 | clear_work_bit(ctx); |
483 | ctx->state = MFCINST_ERROR; | 478 | ctx->state = MFCINST_ERROR; |
484 | /* Mark all dst buffers as having an error */ | 479 | /* Mark all dst buffers as having an error */ |
485 | spin_lock_irqsave(&dev->irqlock, flags); | ||
486 | s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); | 480 | s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); |
487 | /* Mark all src buffers as having an error */ | 481 | /* Mark all src buffers as having an error */ |
488 | s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); | 482 | s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); |
489 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
490 | wake_up_ctx(ctx, reason, err); | 483 | wake_up_ctx(ctx, reason, err); |
491 | break; | 484 | break; |
492 | default: | 485 | default: |
@@ -562,7 +555,6 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, | |||
562 | { | 555 | { |
563 | struct s5p_mfc_buf *src_buf; | 556 | struct s5p_mfc_buf *src_buf; |
564 | struct s5p_mfc_dev *dev; | 557 | struct s5p_mfc_dev *dev; |
565 | unsigned long flags; | ||
566 | 558 | ||
567 | if (ctx == NULL) | 559 | if (ctx == NULL) |
568 | return; | 560 | return; |
@@ -575,7 +567,6 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, | |||
575 | if (err == 0) { | 567 | if (err == 0) { |
576 | ctx->state = MFCINST_RUNNING; | 568 | ctx->state = MFCINST_RUNNING; |
577 | if (!ctx->dpb_flush_flag && ctx->head_processed) { | 569 | if (!ctx->dpb_flush_flag && ctx->head_processed) { |
578 | spin_lock_irqsave(&dev->irqlock, flags); | ||
579 | if (!list_empty(&ctx->src_queue)) { | 570 | if (!list_empty(&ctx->src_queue)) { |
580 | src_buf = list_entry(ctx->src_queue.next, | 571 | src_buf = list_entry(ctx->src_queue.next, |
581 | struct s5p_mfc_buf, list); | 572 | struct s5p_mfc_buf, list); |
@@ -584,7 +575,6 @@ static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, | |||
584 | vb2_buffer_done(&src_buf->b->vb2_buf, | 575 | vb2_buffer_done(&src_buf->b->vb2_buf, |
585 | VB2_BUF_STATE_DONE); | 576 | VB2_BUF_STATE_DONE); |
586 | } | 577 | } |
587 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
588 | } else { | 578 | } else { |
589 | ctx->dpb_flush_flag = 0; | 579 | ctx->dpb_flush_flag = 0; |
590 | } | 580 | } |
@@ -612,7 +602,6 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx) | |||
612 | 602 | ||
613 | ctx->state = MFCINST_FINISHED; | 603 | ctx->state = MFCINST_FINISHED; |
614 | 604 | ||
615 | spin_lock(&dev->irqlock); | ||
616 | if (!list_empty(&ctx->dst_queue)) { | 605 | if (!list_empty(&ctx->dst_queue)) { |
617 | mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, | 606 | mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, |
618 | list); | 607 | list); |
@@ -621,7 +610,6 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx) | |||
621 | vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0); | 610 | vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, 0); |
622 | vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); | 611 | vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); |
623 | } | 612 | } |
624 | spin_unlock(&dev->irqlock); | ||
625 | 613 | ||
626 | clear_work_bit(ctx); | 614 | clear_work_bit(ctx); |
627 | 615 | ||
@@ -643,6 +631,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) | |||
643 | mfc_debug_enter(); | 631 | mfc_debug_enter(); |
644 | /* Reset the timeout watchdog */ | 632 | /* Reset the timeout watchdog */ |
645 | atomic_set(&dev->watchdog_cnt, 0); | 633 | atomic_set(&dev->watchdog_cnt, 0); |
634 | spin_lock(&dev->irqlock); | ||
646 | ctx = dev->ctx[dev->curr_ctx]; | 635 | ctx = dev->ctx[dev->curr_ctx]; |
647 | /* Get the reason of interrupt and the error code */ | 636 | /* Get the reason of interrupt and the error code */ |
648 | reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev); | 637 | reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev); |
@@ -734,6 +723,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) | |||
734 | mfc_debug(2, "Unknown int reason\n"); | 723 | mfc_debug(2, "Unknown int reason\n"); |
735 | s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); | 724 | s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev); |
736 | } | 725 | } |
726 | spin_unlock(&dev->irqlock); | ||
737 | mfc_debug_leave(); | 727 | mfc_debug_leave(); |
738 | return IRQ_HANDLED; | 728 | return IRQ_HANDLED; |
739 | irq_cleanup_hw: | 729 | irq_cleanup_hw: |
@@ -747,6 +737,7 @@ irq_cleanup_hw: | |||
747 | s5p_mfc_clock_off(); | 737 | s5p_mfc_clock_off(); |
748 | 738 | ||
749 | s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); | 739 | s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); |
740 | spin_unlock(&dev->irqlock); | ||
750 | mfc_debug(2, "Exit via irq_cleanup_hw\n"); | 741 | mfc_debug(2, "Exit via irq_cleanup_hw\n"); |
751 | return IRQ_HANDLED; | 742 | return IRQ_HANDLED; |
752 | } | 743 | } |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h index d80ad84b8d41..8b624e03094f 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h | |||
@@ -308,7 +308,7 @@ struct s5p_mfc_dev { | |||
308 | struct s5p_mfc_pm pm; | 308 | struct s5p_mfc_pm pm; |
309 | struct s5p_mfc_variant *variant; | 309 | struct s5p_mfc_variant *variant; |
310 | int num_inst; | 310 | int num_inst; |
311 | spinlock_t irqlock; /* lock when operating on videobuf2 queues */ | 311 | spinlock_t irqlock; /* lock when operating on context */ |
312 | spinlock_t condlock; /* lock when changing/checking if a context is | 312 | spinlock_t condlock; /* lock when changing/checking if a context is |
313 | ready to be processed */ | 313 | ready to be processed */ |
314 | struct mutex mfc_mutex; /* video_device lock */ | 314 | struct mutex mfc_mutex; /* video_device lock */ |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index b1290ccf5a3d..520fe1576e1d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c | |||
@@ -1023,40 +1023,41 @@ static void s5p_mfc_stop_streaming(struct vb2_queue *q) | |||
1023 | struct s5p_mfc_dev *dev = ctx->dev; | 1023 | struct s5p_mfc_dev *dev = ctx->dev; |
1024 | int aborted = 0; | 1024 | int aborted = 0; |
1025 | 1025 | ||
1026 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1026 | if ((ctx->state == MFCINST_FINISHING || | 1027 | if ((ctx->state == MFCINST_FINISHING || |
1027 | ctx->state == MFCINST_RUNNING) && | 1028 | ctx->state == MFCINST_RUNNING) && |
1028 | dev->curr_ctx == ctx->num && dev->hw_lock) { | 1029 | dev->curr_ctx == ctx->num && dev->hw_lock) { |
1029 | ctx->state = MFCINST_ABORT; | 1030 | ctx->state = MFCINST_ABORT; |
1031 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1030 | s5p_mfc_wait_for_done_ctx(ctx, | 1032 | s5p_mfc_wait_for_done_ctx(ctx, |
1031 | S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0); | 1033 | S5P_MFC_R2H_CMD_FRAME_DONE_RET, 0); |
1032 | aborted = 1; | 1034 | aborted = 1; |
1035 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1033 | } | 1036 | } |
1034 | if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 1037 | if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
1035 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1036 | s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); | 1038 | s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst); |
1037 | INIT_LIST_HEAD(&ctx->dst_queue); | 1039 | INIT_LIST_HEAD(&ctx->dst_queue); |
1038 | ctx->dst_queue_cnt = 0; | 1040 | ctx->dst_queue_cnt = 0; |
1039 | ctx->dpb_flush_flag = 1; | 1041 | ctx->dpb_flush_flag = 1; |
1040 | ctx->dec_dst_flag = 0; | 1042 | ctx->dec_dst_flag = 0; |
1041 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1042 | if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) { | 1043 | if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) { |
1043 | ctx->state = MFCINST_FLUSH; | 1044 | ctx->state = MFCINST_FLUSH; |
1044 | set_work_bit_irqsave(ctx); | 1045 | set_work_bit_irqsave(ctx); |
1045 | s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); | 1046 | s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev); |
1047 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1046 | if (s5p_mfc_wait_for_done_ctx(ctx, | 1048 | if (s5p_mfc_wait_for_done_ctx(ctx, |
1047 | S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0)) | 1049 | S5P_MFC_R2H_CMD_DPB_FLUSH_RET, 0)) |
1048 | mfc_err("Err flushing buffers\n"); | 1050 | mfc_err("Err flushing buffers\n"); |
1051 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1049 | } | 1052 | } |
1050 | } | 1053 | } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { |
1051 | if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { | ||
1052 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1053 | s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); | 1054 | s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src); |
1054 | INIT_LIST_HEAD(&ctx->src_queue); | 1055 | INIT_LIST_HEAD(&ctx->src_queue); |
1055 | ctx->src_queue_cnt = 0; | 1056 | ctx->src_queue_cnt = 0; |
1056 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1057 | } | 1057 | } |
1058 | if (aborted) | 1058 | if (aborted) |
1059 | ctx->state = MFCINST_RUNNING; | 1059 | ctx->state = MFCINST_RUNNING; |
1060 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1060 | } | 1061 | } |
1061 | 1062 | ||
1062 | 1063 | ||
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 82d380be49e9..a7ba4c8db7d9 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c | |||
@@ -769,15 +769,12 @@ static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx) | |||
769 | struct s5p_mfc_buf *dst_mb; | 769 | struct s5p_mfc_buf *dst_mb; |
770 | unsigned long dst_addr; | 770 | unsigned long dst_addr; |
771 | unsigned int dst_size; | 771 | unsigned int dst_size; |
772 | unsigned long flags; | ||
773 | 772 | ||
774 | spin_lock_irqsave(&dev->irqlock, flags); | ||
775 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); | 773 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); |
776 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); | 774 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); |
777 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); | 775 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); |
778 | s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, | 776 | s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, |
779 | dst_size); | 777 | dst_size); |
780 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
781 | return 0; | 778 | return 0; |
782 | } | 779 | } |
783 | 780 | ||
@@ -786,11 +783,9 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) | |||
786 | struct s5p_mfc_dev *dev = ctx->dev; | 783 | struct s5p_mfc_dev *dev = ctx->dev; |
787 | struct s5p_mfc_enc_params *p = &ctx->enc_params; | 784 | struct s5p_mfc_enc_params *p = &ctx->enc_params; |
788 | struct s5p_mfc_buf *dst_mb; | 785 | struct s5p_mfc_buf *dst_mb; |
789 | unsigned long flags; | ||
790 | unsigned int enc_pb_count; | 786 | unsigned int enc_pb_count; |
791 | 787 | ||
792 | if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { | 788 | if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) { |
793 | spin_lock_irqsave(&dev->irqlock, flags); | ||
794 | if (!list_empty(&ctx->dst_queue)) { | 789 | if (!list_empty(&ctx->dst_queue)) { |
795 | dst_mb = list_entry(ctx->dst_queue.next, | 790 | dst_mb = list_entry(ctx->dst_queue.next, |
796 | struct s5p_mfc_buf, list); | 791 | struct s5p_mfc_buf, list); |
@@ -802,7 +797,6 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) | |||
802 | vb2_buffer_done(&dst_mb->b->vb2_buf, | 797 | vb2_buffer_done(&dst_mb->b->vb2_buf, |
803 | VB2_BUF_STATE_DONE); | 798 | VB2_BUF_STATE_DONE); |
804 | } | 799 | } |
805 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
806 | } | 800 | } |
807 | 801 | ||
808 | if (!IS_MFCV6_PLUS(dev)) { | 802 | if (!IS_MFCV6_PLUS(dev)) { |
@@ -826,25 +820,20 @@ static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx) | |||
826 | struct s5p_mfc_dev *dev = ctx->dev; | 820 | struct s5p_mfc_dev *dev = ctx->dev; |
827 | struct s5p_mfc_buf *dst_mb; | 821 | struct s5p_mfc_buf *dst_mb; |
828 | struct s5p_mfc_buf *src_mb; | 822 | struct s5p_mfc_buf *src_mb; |
829 | unsigned long flags; | ||
830 | unsigned long src_y_addr, src_c_addr, dst_addr; | 823 | unsigned long src_y_addr, src_c_addr, dst_addr; |
831 | unsigned int dst_size; | 824 | unsigned int dst_size; |
832 | 825 | ||
833 | spin_lock_irqsave(&dev->irqlock, flags); | ||
834 | src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); | 826 | src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); |
835 | src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0); | 827 | src_y_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 0); |
836 | src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1); | 828 | src_c_addr = vb2_dma_contig_plane_dma_addr(&src_mb->b->vb2_buf, 1); |
837 | s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx, | 829 | s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx, |
838 | src_y_addr, src_c_addr); | 830 | src_y_addr, src_c_addr); |
839 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
840 | 831 | ||
841 | spin_lock_irqsave(&dev->irqlock, flags); | ||
842 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); | 832 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); |
843 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); | 833 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); |
844 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); | 834 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); |
845 | s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, | 835 | s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, |
846 | dst_size); | 836 | dst_size); |
847 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
848 | 837 | ||
849 | return 0; | 838 | return 0; |
850 | } | 839 | } |
@@ -857,7 +846,6 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) | |||
857 | unsigned long mb_y_addr, mb_c_addr; | 846 | unsigned long mb_y_addr, mb_c_addr; |
858 | int slice_type; | 847 | int slice_type; |
859 | unsigned int strm_size; | 848 | unsigned int strm_size; |
860 | unsigned long flags; | ||
861 | 849 | ||
862 | slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); | 850 | slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev); |
863 | strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); | 851 | strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev); |
@@ -865,7 +853,6 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) | |||
865 | mfc_debug(2, "Encoded stream size: %d\n", strm_size); | 853 | mfc_debug(2, "Encoded stream size: %d\n", strm_size); |
866 | mfc_debug(2, "Display order: %d\n", | 854 | mfc_debug(2, "Display order: %d\n", |
867 | mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); | 855 | mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT)); |
868 | spin_lock_irqsave(&dev->irqlock, flags); | ||
869 | if (slice_type >= 0) { | 856 | if (slice_type >= 0) { |
870 | s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx, | 857 | s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx, |
871 | &enc_y_addr, &enc_c_addr); | 858 | &enc_y_addr, &enc_c_addr); |
@@ -929,7 +916,6 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) | |||
929 | vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); | 916 | vb2_set_plane_payload(&mb_entry->b->vb2_buf, 0, strm_size); |
930 | vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); | 917 | vb2_buffer_done(&mb_entry->b->vb2_buf, VB2_BUF_STATE_DONE); |
931 | } | 918 | } |
932 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
933 | if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) | 919 | if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) |
934 | clear_work_bit(ctx); | 920 | clear_work_bit(ctx); |
935 | 921 | ||
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c index 8754b7e039e8..81e1e4ce6c24 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c | |||
@@ -1166,7 +1166,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) | |||
1166 | { | 1166 | { |
1167 | struct s5p_mfc_dev *dev = ctx->dev; | 1167 | struct s5p_mfc_dev *dev = ctx->dev; |
1168 | struct s5p_mfc_buf *temp_vb; | 1168 | struct s5p_mfc_buf *temp_vb; |
1169 | unsigned long flags; | ||
1170 | 1169 | ||
1171 | if (ctx->state == MFCINST_FINISHING) { | 1170 | if (ctx->state == MFCINST_FINISHING) { |
1172 | last_frame = MFC_DEC_LAST_FRAME; | 1171 | last_frame = MFC_DEC_LAST_FRAME; |
@@ -1176,11 +1175,9 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) | |||
1176 | return 0; | 1175 | return 0; |
1177 | } | 1176 | } |
1178 | 1177 | ||
1179 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1180 | /* Frames are being decoded */ | 1178 | /* Frames are being decoded */ |
1181 | if (list_empty(&ctx->src_queue)) { | 1179 | if (list_empty(&ctx->src_queue)) { |
1182 | mfc_debug(2, "No src buffers\n"); | 1180 | mfc_debug(2, "No src buffers\n"); |
1183 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1184 | return -EAGAIN; | 1181 | return -EAGAIN; |
1185 | } | 1182 | } |
1186 | /* Get the next source buffer */ | 1183 | /* Get the next source buffer */ |
@@ -1189,7 +1186,6 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) | |||
1189 | s5p_mfc_set_dec_stream_buffer_v5(ctx, | 1186 | s5p_mfc_set_dec_stream_buffer_v5(ctx, |
1190 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), | 1187 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), |
1191 | ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused); | 1188 | ctx->consumed_stream, temp_vb->b->vb2_buf.planes[0].bytesused); |
1192 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1193 | dev->curr_ctx = ctx->num; | 1189 | dev->curr_ctx = ctx->num; |
1194 | if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) { | 1190 | if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) { |
1195 | last_frame = MFC_DEC_LAST_FRAME; | 1191 | last_frame = MFC_DEC_LAST_FRAME; |
@@ -1203,21 +1199,17 @@ static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) | |||
1203 | static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | 1199 | static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) |
1204 | { | 1200 | { |
1205 | struct s5p_mfc_dev *dev = ctx->dev; | 1201 | struct s5p_mfc_dev *dev = ctx->dev; |
1206 | unsigned long flags; | ||
1207 | struct s5p_mfc_buf *dst_mb; | 1202 | struct s5p_mfc_buf *dst_mb; |
1208 | struct s5p_mfc_buf *src_mb; | 1203 | struct s5p_mfc_buf *src_mb; |
1209 | unsigned long src_y_addr, src_c_addr, dst_addr; | 1204 | unsigned long src_y_addr, src_c_addr, dst_addr; |
1210 | unsigned int dst_size; | 1205 | unsigned int dst_size; |
1211 | 1206 | ||
1212 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1213 | if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { | 1207 | if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { |
1214 | mfc_debug(2, "no src buffers\n"); | 1208 | mfc_debug(2, "no src buffers\n"); |
1215 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1216 | return -EAGAIN; | 1209 | return -EAGAIN; |
1217 | } | 1210 | } |
1218 | if (list_empty(&ctx->dst_queue)) { | 1211 | if (list_empty(&ctx->dst_queue)) { |
1219 | mfc_debug(2, "no dst buffers\n"); | 1212 | mfc_debug(2, "no dst buffers\n"); |
1220 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1221 | return -EAGAIN; | 1213 | return -EAGAIN; |
1222 | } | 1214 | } |
1223 | if (list_empty(&ctx->src_queue)) { | 1215 | if (list_empty(&ctx->src_queue)) { |
@@ -1249,7 +1241,6 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | |||
1249 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); | 1241 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); |
1250 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); | 1242 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); |
1251 | s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); | 1243 | s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); |
1252 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1253 | dev->curr_ctx = ctx->num; | 1244 | dev->curr_ctx = ctx->num; |
1254 | mfc_debug(2, "encoding buffer with index=%d state=%d\n", | 1245 | mfc_debug(2, "encoding buffer with index=%d state=%d\n", |
1255 | src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state); | 1246 | src_mb ? src_mb->b->vb2_buf.index : -1, ctx->state); |
@@ -1260,11 +1251,9 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | |||
1260 | static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) | 1251 | static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) |
1261 | { | 1252 | { |
1262 | struct s5p_mfc_dev *dev = ctx->dev; | 1253 | struct s5p_mfc_dev *dev = ctx->dev; |
1263 | unsigned long flags; | ||
1264 | struct s5p_mfc_buf *temp_vb; | 1254 | struct s5p_mfc_buf *temp_vb; |
1265 | 1255 | ||
1266 | /* Initializing decoding - parsing header */ | 1256 | /* Initializing decoding - parsing header */ |
1267 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1268 | mfc_debug(2, "Preparing to init decoding\n"); | 1257 | mfc_debug(2, "Preparing to init decoding\n"); |
1269 | temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); | 1258 | temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); |
1270 | s5p_mfc_set_dec_desc_buffer(ctx); | 1259 | s5p_mfc_set_dec_desc_buffer(ctx); |
@@ -1273,7 +1262,6 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) | |||
1273 | s5p_mfc_set_dec_stream_buffer_v5(ctx, | 1262 | s5p_mfc_set_dec_stream_buffer_v5(ctx, |
1274 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), | 1263 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), |
1275 | 0, temp_vb->b->vb2_buf.planes[0].bytesused); | 1264 | 0, temp_vb->b->vb2_buf.planes[0].bytesused); |
1276 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1277 | dev->curr_ctx = ctx->num; | 1265 | dev->curr_ctx = ctx->num; |
1278 | s5p_mfc_init_decode_v5(ctx); | 1266 | s5p_mfc_init_decode_v5(ctx); |
1279 | } | 1267 | } |
@@ -1281,18 +1269,15 @@ static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) | |||
1281 | static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) | 1269 | static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) |
1282 | { | 1270 | { |
1283 | struct s5p_mfc_dev *dev = ctx->dev; | 1271 | struct s5p_mfc_dev *dev = ctx->dev; |
1284 | unsigned long flags; | ||
1285 | struct s5p_mfc_buf *dst_mb; | 1272 | struct s5p_mfc_buf *dst_mb; |
1286 | unsigned long dst_addr; | 1273 | unsigned long dst_addr; |
1287 | unsigned int dst_size; | 1274 | unsigned int dst_size; |
1288 | 1275 | ||
1289 | s5p_mfc_set_enc_ref_buffer_v5(ctx); | 1276 | s5p_mfc_set_enc_ref_buffer_v5(ctx); |
1290 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1291 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); | 1277 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); |
1292 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); | 1278 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); |
1293 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); | 1279 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); |
1294 | s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); | 1280 | s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); |
1295 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1296 | dev->curr_ctx = ctx->num; | 1281 | dev->curr_ctx = ctx->num; |
1297 | s5p_mfc_init_encode_v5(ctx); | 1282 | s5p_mfc_init_encode_v5(ctx); |
1298 | } | 1283 | } |
@@ -1300,7 +1285,6 @@ static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) | |||
1300 | static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) | 1285 | static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) |
1301 | { | 1286 | { |
1302 | struct s5p_mfc_dev *dev = ctx->dev; | 1287 | struct s5p_mfc_dev *dev = ctx->dev; |
1303 | unsigned long flags; | ||
1304 | struct s5p_mfc_buf *temp_vb; | 1288 | struct s5p_mfc_buf *temp_vb; |
1305 | int ret; | 1289 | int ret; |
1306 | 1290 | ||
@@ -1314,11 +1298,9 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) | |||
1314 | "before starting processing\n"); | 1298 | "before starting processing\n"); |
1315 | return -EAGAIN; | 1299 | return -EAGAIN; |
1316 | } | 1300 | } |
1317 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1318 | if (list_empty(&ctx->src_queue)) { | 1301 | if (list_empty(&ctx->src_queue)) { |
1319 | mfc_err("Header has been deallocated in the middle of" | 1302 | mfc_err("Header has been deallocated in the middle of" |
1320 | " initialization\n"); | 1303 | " initialization\n"); |
1321 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1322 | return -EIO; | 1304 | return -EIO; |
1323 | } | 1305 | } |
1324 | temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); | 1306 | temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); |
@@ -1327,7 +1309,6 @@ static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) | |||
1327 | s5p_mfc_set_dec_stream_buffer_v5(ctx, | 1309 | s5p_mfc_set_dec_stream_buffer_v5(ctx, |
1328 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), | 1310 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), |
1329 | 0, temp_vb->b->vb2_buf.planes[0].bytesused); | 1311 | 0, temp_vb->b->vb2_buf.planes[0].bytesused); |
1330 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1331 | dev->curr_ctx = ctx->num; | 1312 | dev->curr_ctx = ctx->num; |
1332 | ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); | 1313 | ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); |
1333 | if (ret) { | 1314 | if (ret) { |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c index 764a675ec53d..2a9ca428648f 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c | |||
@@ -1520,7 +1520,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) | |||
1520 | { | 1520 | { |
1521 | struct s5p_mfc_dev *dev = ctx->dev; | 1521 | struct s5p_mfc_dev *dev = ctx->dev; |
1522 | struct s5p_mfc_buf *temp_vb; | 1522 | struct s5p_mfc_buf *temp_vb; |
1523 | unsigned long flags; | ||
1524 | int last_frame = 0; | 1523 | int last_frame = 0; |
1525 | 1524 | ||
1526 | if (ctx->state == MFCINST_FINISHING) { | 1525 | if (ctx->state == MFCINST_FINISHING) { |
@@ -1532,11 +1531,9 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) | |||
1532 | return 0; | 1531 | return 0; |
1533 | } | 1532 | } |
1534 | 1533 | ||
1535 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1536 | /* Frames are being decoded */ | 1534 | /* Frames are being decoded */ |
1537 | if (list_empty(&ctx->src_queue)) { | 1535 | if (list_empty(&ctx->src_queue)) { |
1538 | mfc_debug(2, "No src buffers.\n"); | 1536 | mfc_debug(2, "No src buffers.\n"); |
1539 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1540 | return -EAGAIN; | 1537 | return -EAGAIN; |
1541 | } | 1538 | } |
1542 | /* Get the next source buffer */ | 1539 | /* Get the next source buffer */ |
@@ -1546,7 +1543,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) | |||
1546 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), | 1543 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), |
1547 | ctx->consumed_stream, | 1544 | ctx->consumed_stream, |
1548 | temp_vb->b->vb2_buf.planes[0].bytesused); | 1545 | temp_vb->b->vb2_buf.planes[0].bytesused); |
1549 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1550 | 1546 | ||
1551 | dev->curr_ctx = ctx->num; | 1547 | dev->curr_ctx = ctx->num; |
1552 | if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) { | 1548 | if (temp_vb->b->vb2_buf.planes[0].bytesused == 0) { |
@@ -1562,7 +1558,6 @@ static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) | |||
1562 | static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | 1558 | static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) |
1563 | { | 1559 | { |
1564 | struct s5p_mfc_dev *dev = ctx->dev; | 1560 | struct s5p_mfc_dev *dev = ctx->dev; |
1565 | unsigned long flags; | ||
1566 | struct s5p_mfc_buf *dst_mb; | 1561 | struct s5p_mfc_buf *dst_mb; |
1567 | struct s5p_mfc_buf *src_mb; | 1562 | struct s5p_mfc_buf *src_mb; |
1568 | unsigned long src_y_addr, src_c_addr, dst_addr; | 1563 | unsigned long src_y_addr, src_c_addr, dst_addr; |
@@ -1571,17 +1566,13 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | |||
1571 | */ | 1566 | */ |
1572 | unsigned int dst_size; | 1567 | unsigned int dst_size; |
1573 | 1568 | ||
1574 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1575 | |||
1576 | if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { | 1569 | if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { |
1577 | mfc_debug(2, "no src buffers.\n"); | 1570 | mfc_debug(2, "no src buffers.\n"); |
1578 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1579 | return -EAGAIN; | 1571 | return -EAGAIN; |
1580 | } | 1572 | } |
1581 | 1573 | ||
1582 | if (list_empty(&ctx->dst_queue)) { | 1574 | if (list_empty(&ctx->dst_queue)) { |
1583 | mfc_debug(2, "no dst buffers.\n"); | 1575 | mfc_debug(2, "no dst buffers.\n"); |
1584 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1585 | return -EAGAIN; | 1576 | return -EAGAIN; |
1586 | } | 1577 | } |
1587 | 1578 | ||
@@ -1615,8 +1606,6 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | |||
1615 | 1606 | ||
1616 | s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); | 1607 | s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); |
1617 | 1608 | ||
1618 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1619 | |||
1620 | dev->curr_ctx = ctx->num; | 1609 | dev->curr_ctx = ctx->num; |
1621 | s5p_mfc_encode_one_frame_v6(ctx); | 1610 | s5p_mfc_encode_one_frame_v6(ctx); |
1622 | 1611 | ||
@@ -1626,18 +1615,15 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) | |||
1626 | static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) | 1615 | static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) |
1627 | { | 1616 | { |
1628 | struct s5p_mfc_dev *dev = ctx->dev; | 1617 | struct s5p_mfc_dev *dev = ctx->dev; |
1629 | unsigned long flags; | ||
1630 | struct s5p_mfc_buf *temp_vb; | 1618 | struct s5p_mfc_buf *temp_vb; |
1631 | 1619 | ||
1632 | /* Initializing decoding - parsing header */ | 1620 | /* Initializing decoding - parsing header */ |
1633 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1634 | mfc_debug(2, "Preparing to init decoding.\n"); | 1621 | mfc_debug(2, "Preparing to init decoding.\n"); |
1635 | temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); | 1622 | temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); |
1636 | mfc_debug(2, "Header size: %d\n", temp_vb->b->vb2_buf.planes[0].bytesused); | 1623 | mfc_debug(2, "Header size: %d\n", temp_vb->b->vb2_buf.planes[0].bytesused); |
1637 | s5p_mfc_set_dec_stream_buffer_v6(ctx, | 1624 | s5p_mfc_set_dec_stream_buffer_v6(ctx, |
1638 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0, | 1625 | vb2_dma_contig_plane_dma_addr(&temp_vb->b->vb2_buf, 0), 0, |
1639 | temp_vb->b->vb2_buf.planes[0].bytesused); | 1626 | temp_vb->b->vb2_buf.planes[0].bytesused); |
1640 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1641 | dev->curr_ctx = ctx->num; | 1627 | dev->curr_ctx = ctx->num; |
1642 | s5p_mfc_init_decode_v6(ctx); | 1628 | s5p_mfc_init_decode_v6(ctx); |
1643 | } | 1629 | } |
@@ -1645,18 +1631,14 @@ static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) | |||
1645 | static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) | 1631 | static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) |
1646 | { | 1632 | { |
1647 | struct s5p_mfc_dev *dev = ctx->dev; | 1633 | struct s5p_mfc_dev *dev = ctx->dev; |
1648 | unsigned long flags; | ||
1649 | struct s5p_mfc_buf *dst_mb; | 1634 | struct s5p_mfc_buf *dst_mb; |
1650 | unsigned long dst_addr; | 1635 | unsigned long dst_addr; |
1651 | unsigned int dst_size; | 1636 | unsigned int dst_size; |
1652 | 1637 | ||
1653 | spin_lock_irqsave(&dev->irqlock, flags); | ||
1654 | |||
1655 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); | 1638 | dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); |
1656 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); | 1639 | dst_addr = vb2_dma_contig_plane_dma_addr(&dst_mb->b->vb2_buf, 0); |
1657 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); | 1640 | dst_size = vb2_plane_size(&dst_mb->b->vb2_buf, 0); |
1658 | s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); | 1641 | s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); |
1659 | spin_unlock_irqrestore(&dev->irqlock, flags); | ||
1660 | dev->curr_ctx = ctx->num; | 1642 | dev->curr_ctx = ctx->num; |
1661 | s5p_mfc_init_encode_v6(ctx); | 1643 | s5p_mfc_init_encode_v6(ctx); |
1662 | } | 1644 | } |