diff options
author | Andy Walls <awalls@radix.net> | 2009-04-13 21:42:43 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@redhat.com> | 2009-06-16 17:20:44 -0400 |
commit | 87116159517ecf6b9cf62a136f2935a63833c485 (patch) | |
tree | 4a52a97e9e740304ed44d4348762836284f4d100 | |
parent | deed75ed9f7576ada4bca02e6c851833a352a38d (diff) |
V4L/DVB (11616): cx18: Add a work queue for deferring empty buffer handoffs to the firmware
This change defers sending all CX18_CPU_DE_SET_MDL commands, for a stream with
an ongoing capture, by adding a work queue to handle sending such commands when
needed. This prevents any sleeps, caused by notifying the firmware of new
usable buffers, when a V4L2 application read() is being satisfied or when
an incoming buffer is processed by the cx18-NN-in work queue thread.
Signed-off-by: Andy Walls <awalls@radix.net>
Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
-rw-r--r-- | drivers/media/video/cx18/cx18-driver.c | 92 | ||||
-rw-r--r-- | drivers/media/video/cx18/cx18-driver.h | 32 | ||||
-rw-r--r-- | drivers/media/video/cx18/cx18-streams.c | 99 | ||||
-rw-r--r-- | drivers/media/video/cx18/cx18-streams.h | 5 |
4 files changed, 202 insertions, 26 deletions
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c index 79750208e042..658cfbb1b97e 100644 --- a/drivers/media/video/cx18/cx18-driver.c +++ b/drivers/media/video/cx18/cx18-driver.c | |||
@@ -546,6 +546,47 @@ done: | |||
546 | cx->card_i2c = cx->card->i2c; | 546 | cx->card_i2c = cx->card->i2c; |
547 | } | 547 | } |
548 | 548 | ||
549 | static int __devinit cx18_create_in_workq(struct cx18 *cx) | ||
550 | { | ||
551 | snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in", | ||
552 | cx->v4l2_dev.name); | ||
553 | cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name); | ||
554 | if (cx->in_work_queue == NULL) { | ||
555 | CX18_ERR("Unable to create incoming mailbox handler thread\n"); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static int __devinit cx18_create_out_workq(struct cx18 *cx) | ||
562 | { | ||
563 | snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out", | ||
564 | cx->v4l2_dev.name); | ||
565 | cx->out_work_queue = create_workqueue(cx->out_workq_name); | ||
566 | if (cx->out_work_queue == NULL) { | ||
567 | CX18_ERR("Unable to create outgoing mailbox handler threads\n"); | ||
568 | return -ENOMEM; | ||
569 | } | ||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | static void __devinit cx18_init_in_work_orders(struct cx18 *cx) | ||
574 | { | ||
575 | int i; | ||
576 | for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) { | ||
577 | cx->in_work_order[i].cx = cx; | ||
578 | cx->in_work_order[i].str = cx->epu_debug_str; | ||
579 | INIT_WORK(&cx->in_work_order[i].work, cx18_in_work_handler); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | static void __devinit cx18_init_out_work_orders(struct cx18 *cx) | ||
584 | { | ||
585 | int i; | ||
586 | for (i = 0; i < CX18_MAX_OUT_WORK_ORDERS; i++) | ||
587 | INIT_WORK(&cx->out_work_order[i].work, cx18_out_work_handler); | ||
588 | } | ||
589 | |||
549 | /* Precondition: the cx18 structure has been memset to 0. Only | 590 | /* Precondition: the cx18 structure has been memset to 0. Only |
550 | the dev and instance fields have been filled in. | 591 | the dev and instance fields have been filled in. |
551 | No assumptions on the card type may be made here (see cx18_init_struct2 | 592 | No assumptions on the card type may be made here (see cx18_init_struct2 |
@@ -553,7 +594,7 @@ done: | |||
553 | */ | 594 | */ |
554 | static int __devinit cx18_init_struct1(struct cx18 *cx) | 595 | static int __devinit cx18_init_struct1(struct cx18 *cx) |
555 | { | 596 | { |
556 | int i; | 597 | int ret; |
557 | 598 | ||
558 | cx->base_addr = pci_resource_start(cx->pci_dev, 0); | 599 | cx->base_addr = pci_resource_start(cx->pci_dev, 0); |
559 | 600 | ||
@@ -562,20 +603,19 @@ static int __devinit cx18_init_struct1(struct cx18 *cx) | |||
562 | mutex_init(&cx->epu2apu_mb_lock); | 603 | mutex_init(&cx->epu2apu_mb_lock); |
563 | mutex_init(&cx->epu2cpu_mb_lock); | 604 | mutex_init(&cx->epu2cpu_mb_lock); |
564 | 605 | ||
565 | snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in", | 606 | ret = cx18_create_out_workq(cx); |
566 | cx->v4l2_dev.name); | 607 | if (ret) |
567 | cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name); | 608 | return ret; |
568 | if (cx->in_work_queue == NULL) { | ||
569 | CX18_ERR("Unable to create incoming mailbox handler thread\n"); | ||
570 | return -ENOMEM; | ||
571 | } | ||
572 | 609 | ||
573 | for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) { | 610 | ret = cx18_create_in_workq(cx); |
574 | cx->in_work_order[i].cx = cx; | 611 | if (ret) { |
575 | cx->in_work_order[i].str = cx->epu_debug_str; | 612 | destroy_workqueue(cx->out_work_queue); |
576 | INIT_WORK(&cx->in_work_order[i].work, cx18_in_work_handler); | 613 | return ret; |
577 | } | 614 | } |
578 | 615 | ||
616 | cx18_init_out_work_orders(cx); | ||
617 | cx18_init_in_work_orders(cx); | ||
618 | |||
579 | /* start counting open_id at 1 */ | 619 | /* start counting open_id at 1 */ |
580 | cx->open_id = 1; | 620 | cx->open_id = 1; |
581 | 621 | ||
@@ -761,17 +801,17 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev, | |||
761 | retval = -ENODEV; | 801 | retval = -ENODEV; |
762 | goto err; | 802 | goto err; |
763 | } | 803 | } |
764 | if (cx18_init_struct1(cx)) { | 804 | |
765 | retval = -ENOMEM; | 805 | retval = cx18_init_struct1(cx); |
806 | if (retval) | ||
766 | goto err; | 807 | goto err; |
767 | } | ||
768 | 808 | ||
769 | CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr); | 809 | CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr); |
770 | 810 | ||
771 | /* PCI Device Setup */ | 811 | /* PCI Device Setup */ |
772 | retval = cx18_setup_pci(cx, pci_dev, pci_id); | 812 | retval = cx18_setup_pci(cx, pci_dev, pci_id); |
773 | if (retval != 0) | 813 | if (retval != 0) |
774 | goto free_workqueue; | 814 | goto free_workqueues; |
775 | 815 | ||
776 | /* map io memory */ | 816 | /* map io memory */ |
777 | CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", | 817 | CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n", |
@@ -945,8 +985,9 @@ free_map: | |||
945 | cx18_iounmap(cx); | 985 | cx18_iounmap(cx); |
946 | free_mem: | 986 | free_mem: |
947 | release_mem_region(cx->base_addr, CX18_MEM_SIZE); | 987 | release_mem_region(cx->base_addr, CX18_MEM_SIZE); |
948 | free_workqueue: | 988 | free_workqueues: |
949 | destroy_workqueue(cx->in_work_queue); | 989 | destroy_workqueue(cx->in_work_queue); |
990 | destroy_workqueue(cx->out_work_queue); | ||
950 | err: | 991 | err: |
951 | if (retval == 0) | 992 | if (retval == 0) |
952 | retval = -ENODEV; | 993 | retval = -ENODEV; |
@@ -1075,15 +1116,26 @@ static void cx18_remove(struct pci_dev *pci_dev) | |||
1075 | if (atomic_read(&cx->tot_capturing) > 0) | 1116 | if (atomic_read(&cx->tot_capturing) > 0) |
1076 | cx18_stop_all_captures(cx); | 1117 | cx18_stop_all_captures(cx); |
1077 | 1118 | ||
1078 | /* Interrupts */ | 1119 | /* Stop interrupts that cause incoming work to be queued */ |
1079 | cx18_sw1_irq_disable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU); | 1120 | cx18_sw1_irq_disable(cx, IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU); |
1121 | |||
1122 | /* Incoming work can cause outgoing work, so clean up incoming first */ | ||
1123 | cx18_cancel_in_work_orders(cx); | ||
1124 | |||
1125 | /* | ||
1126 | * An outgoing work order can have the only pointer to a dynamically | ||
1127 | * allocated buffer, so we need to flush outgoing work and not just | ||
1128 | * cancel it, so we don't lose the pointer and leak memory. | ||
1129 | */ | ||
1130 | flush_workqueue(cx->out_work_queue); | ||
1131 | |||
1132 | /* Stop ack interrupts that may have been needed for work to finish */ | ||
1080 | cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK); | 1133 | cx18_sw2_irq_disable(cx, IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK); |
1081 | 1134 | ||
1082 | cx18_halt_firmware(cx); | 1135 | cx18_halt_firmware(cx); |
1083 | 1136 | ||
1084 | cx18_cancel_in_work_orders(cx); | ||
1085 | |||
1086 | destroy_workqueue(cx->in_work_queue); | 1137 | destroy_workqueue(cx->in_work_queue); |
1138 | destroy_workqueue(cx->out_work_queue); | ||
1087 | 1139 | ||
1088 | cx18_streams_cleanup(cx, 1); | 1140 | cx18_streams_cleanup(cx, 1); |
1089 | 1141 | ||
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h index e6f42d0cb2b3..62dca432fdbb 100644 --- a/drivers/media/video/cx18/cx18-driver.h +++ b/drivers/media/video/cx18/cx18-driver.h | |||
@@ -254,6 +254,7 @@ struct cx18_options { | |||
254 | #define CX18_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */ | 254 | #define CX18_F_S_INTERNAL_USE 5 /* this stream is used internally (sliced VBI processing) */ |
255 | #define CX18_F_S_STREAMOFF 7 /* signal end of stream EOS */ | 255 | #define CX18_F_S_STREAMOFF 7 /* signal end of stream EOS */ |
256 | #define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */ | 256 | #define CX18_F_S_APPL_IO 8 /* this stream is used read/written by an application */ |
257 | #define CX18_F_S_STOPPING 9 /* telling the fw to stop capturing */ | ||
257 | 258 | ||
258 | /* per-cx18, i_flags */ | 259 | /* per-cx18, i_flags */ |
259 | #define CX18_F_I_LOADED_FW 0 /* Loaded firmware 1st time */ | 260 | #define CX18_F_I_LOADED_FW 0 /* Loaded firmware 1st time */ |
@@ -324,6 +325,33 @@ struct cx18_in_work_order { | |||
324 | char *str; | 325 | char *str; |
325 | }; | 326 | }; |
326 | 327 | ||
328 | /* | ||
329 | * There are 2 types of deferrable tasks that send messages out to the firmware: | ||
330 | * 1. Sending individual buffers back to the firmware | ||
331 | * 2. Sending as many free buffers for a stream from q_free as we can to the fw | ||
332 | * | ||
333 | * The worst case scenario for multiple simultaneous streams is | ||
334 | * TS, YUV, PCM, VBI, MPEG, and IDX all going at once. | ||
335 | * | ||
336 | * We try to load the firmware queue with as many free buffers as possible, | ||
337 | * whenever we get a buffer back for a stream. For the TS we return the single | ||
338 | * buffer to the firmware at that time as well. For all other streams, we | ||
339 | * return single buffers to the firmware as the application drains them. | ||
340 | * | ||
341 | * 6 streams * 2 sets of orders * (1 single buf + 1 load fw from q_free) | ||
342 | * = 24 work orders should cover our needs, provided the applications read | ||
343 | * at a fairly steady rate. If apps don't, we fall back to non-deferred | ||
344 | * operation, when no cx18_out_work_orders are available for use. | ||
345 | */ | ||
346 | #define CX18_MAX_OUT_WORK_ORDERS (24) | ||
347 | |||
348 | struct cx18_out_work_order { | ||
349 | struct work_struct work; | ||
350 | atomic_t pending; | ||
351 | struct cx18_stream *s; | ||
352 | struct cx18_buffer *buf; /* buf == NULL, means load fw from q_free */ | ||
353 | }; | ||
354 | |||
327 | #define CX18_INVALID_TASK_HANDLE 0xffffffff | 355 | #define CX18_INVALID_TASK_HANDLE 0xffffffff |
328 | 356 | ||
329 | struct cx18_stream { | 357 | struct cx18_stream { |
@@ -573,6 +601,10 @@ struct cx18 { | |||
573 | struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS]; | 601 | struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS]; |
574 | char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */ | 602 | char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */ |
575 | 603 | ||
604 | struct workqueue_struct *out_work_queue; | ||
605 | char out_workq_name[12]; /* "cx18-NN-out" */ | ||
606 | struct cx18_out_work_order out_work_order[CX18_MAX_OUT_WORK_ORDERS]; | ||
607 | |||
576 | /* i2c */ | 608 | /* i2c */ |
577 | struct i2c_adapter i2c_adap[2]; | 609 | struct i2c_adapter i2c_adap[2]; |
578 | struct i2c_algo_bit_data i2c_algo[2]; | 610 | struct i2c_algo_bit_data i2c_algo[2]; |
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c index 0932b76b2373..bbeb01c5cf32 100644 --- a/drivers/media/video/cx18/cx18-streams.c +++ b/drivers/media/video/cx18/cx18-streams.c | |||
@@ -431,14 +431,16 @@ static void cx18_vbi_setup(struct cx18_stream *s) | |||
431 | cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data); | 431 | cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data); |
432 | } | 432 | } |
433 | 433 | ||
434 | struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s, | 434 | static |
435 | struct cx18_buffer *buf) | 435 | struct cx18_queue *_cx18_stream_put_buf_fw(struct cx18_stream *s, |
436 | struct cx18_buffer *buf) | ||
436 | { | 437 | { |
437 | struct cx18 *cx = s->cx; | 438 | struct cx18 *cx = s->cx; |
438 | struct cx18_queue *q; | 439 | struct cx18_queue *q; |
439 | 440 | ||
440 | /* Don't give it to the firmware, if we're not running a capture */ | 441 | /* Don't give it to the firmware, if we're not running a capture */ |
441 | if (s->handle == CX18_INVALID_TASK_HANDLE || | 442 | if (s->handle == CX18_INVALID_TASK_HANDLE || |
443 | test_bit(CX18_F_S_STOPPING, &s->s_flags) || | ||
442 | !test_bit(CX18_F_S_STREAMING, &s->s_flags)) | 444 | !test_bit(CX18_F_S_STREAMING, &s->s_flags)) |
443 | return cx18_enqueue(s, buf, &s->q_free); | 445 | return cx18_enqueue(s, buf, &s->q_free); |
444 | 446 | ||
@@ -453,7 +455,8 @@ struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s, | |||
453 | return q; | 455 | return q; |
454 | } | 456 | } |
455 | 457 | ||
456 | void cx18_stream_load_fw_queue(struct cx18_stream *s) | 458 | static |
459 | void _cx18_stream_load_fw_queue(struct cx18_stream *s) | ||
457 | { | 460 | { |
458 | struct cx18_queue *q; | 461 | struct cx18_queue *q; |
459 | struct cx18_buffer *buf; | 462 | struct cx18_buffer *buf; |
@@ -467,11 +470,93 @@ void cx18_stream_load_fw_queue(struct cx18_stream *s) | |||
467 | buf = cx18_dequeue(s, &s->q_free); | 470 | buf = cx18_dequeue(s, &s->q_free); |
468 | if (buf == NULL) | 471 | if (buf == NULL) |
469 | break; | 472 | break; |
470 | q = cx18_stream_put_buf_fw(s, buf); | 473 | q = _cx18_stream_put_buf_fw(s, buf); |
471 | } while (atomic_read(&s->q_busy.buffers) < CX18_MAX_FW_MDLS_PER_STREAM | 474 | } while (atomic_read(&s->q_busy.buffers) < CX18_MAX_FW_MDLS_PER_STREAM |
472 | && q == &s->q_busy); | 475 | && q == &s->q_busy); |
473 | } | 476 | } |
474 | 477 | ||
478 | static inline | ||
479 | void free_out_work_order(struct cx18_out_work_order *order) | ||
480 | { | ||
481 | atomic_set(&order->pending, 0); | ||
482 | } | ||
483 | |||
484 | void cx18_out_work_handler(struct work_struct *work) | ||
485 | { | ||
486 | struct cx18_out_work_order *order = | ||
487 | container_of(work, struct cx18_out_work_order, work); | ||
488 | struct cx18_stream *s = order->s; | ||
489 | struct cx18_buffer *buf = order->buf; | ||
490 | |||
491 | free_out_work_order(order); | ||
492 | |||
493 | if (buf == NULL) | ||
494 | _cx18_stream_load_fw_queue(s); | ||
495 | else | ||
496 | _cx18_stream_put_buf_fw(s, buf); | ||
497 | } | ||
498 | |||
499 | static | ||
500 | struct cx18_out_work_order *alloc_out_work_order(struct cx18 *cx) | ||
501 | { | ||
502 | int i; | ||
503 | struct cx18_out_work_order *order = NULL; | ||
504 | |||
505 | for (i = 0; i < CX18_MAX_OUT_WORK_ORDERS; i++) { | ||
506 | /* | ||
507 | * We need "pending" to be atomic to inspect & set its contents | ||
508 | * 1. "pending" is only set to 1 here, but needs multiple access | ||
509 | * protection | ||
510 | * 2. work handler threads only clear "pending" and only | ||
511 | * on one, particular work order at a time, per handler thread. | ||
512 | */ | ||
513 | if (atomic_add_unless(&cx->out_work_order[i].pending, 1, 1)) { | ||
514 | order = &cx->out_work_order[i]; | ||
515 | break; | ||
516 | } | ||
517 | } | ||
518 | return order; | ||
519 | } | ||
520 | |||
521 | struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s, | ||
522 | struct cx18_buffer *buf) | ||
523 | { | ||
524 | struct cx18 *cx = s->cx; | ||
525 | struct cx18_out_work_order *order; | ||
526 | |||
527 | order = alloc_out_work_order(cx); | ||
528 | if (order == NULL) { | ||
529 | CX18_DEBUG_WARN("No blank, outgoing-mailbox, deferred-work, " | ||
530 | "order forms available; sending buffer %u back " | ||
531 | "to the firmware immediately for stream %s\n", | ||
532 | buf->id, s->name); | ||
533 | return _cx18_stream_put_buf_fw(s, buf); | ||
534 | } | ||
535 | order->s = s; | ||
536 | order->buf = buf; | ||
537 | queue_work(cx->out_work_queue, &order->work); | ||
538 | return NULL; | ||
539 | } | ||
540 | |||
541 | void cx18_stream_load_fw_queue(struct cx18_stream *s) | ||
542 | { | ||
543 | struct cx18 *cx = s->cx; | ||
544 | struct cx18_out_work_order *order; | ||
545 | |||
546 | order = alloc_out_work_order(cx); | ||
547 | if (order == NULL) { | ||
548 | CX18_DEBUG_WARN("No blank, outgoing-mailbox, deferred-work, " | ||
549 | "order forms available; filling the firmware " | ||
550 | "buffer queue immediately for stream %s\n", | ||
551 | s->name); | ||
552 | _cx18_stream_load_fw_queue(s); | ||
553 | return; | ||
554 | } | ||
555 | order->s = s; | ||
556 | order->buf = NULL; /* Indicates to load the fw queue */ | ||
557 | queue_work(cx->out_work_queue, &order->work); | ||
558 | } | ||
559 | |||
475 | int cx18_start_v4l2_encode_stream(struct cx18_stream *s) | 560 | int cx18_start_v4l2_encode_stream(struct cx18_stream *s) |
476 | { | 561 | { |
477 | u32 data[MAX_MB_ARGUMENTS]; | 562 | u32 data[MAX_MB_ARGUMENTS]; |
@@ -607,12 +692,13 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s) | |||
607 | cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length); | 692 | cx18_writel(cx, s->buf_size, &cx->scb->cpu_mdl[buf->id].length); |
608 | } | 693 | } |
609 | mutex_unlock(&s->qlock); | 694 | mutex_unlock(&s->qlock); |
610 | cx18_stream_load_fw_queue(s); | 695 | _cx18_stream_load_fw_queue(s); |
611 | 696 | ||
612 | /* begin_capture */ | 697 | /* begin_capture */ |
613 | if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) { | 698 | if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) { |
614 | CX18_DEBUG_WARN("Error starting capture!\n"); | 699 | CX18_DEBUG_WARN("Error starting capture!\n"); |
615 | /* Ensure we're really not capturing before releasing MDLs */ | 700 | /* Ensure we're really not capturing before releasing MDLs */ |
701 | set_bit(CX18_F_S_STOPPING, &s->s_flags); | ||
616 | if (s->type == CX18_ENC_STREAM_TYPE_MPG) | 702 | if (s->type == CX18_ENC_STREAM_TYPE_MPG) |
617 | cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1); | 703 | cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, 1); |
618 | else | 704 | else |
@@ -622,6 +708,7 @@ int cx18_start_v4l2_encode_stream(struct cx18_stream *s) | |||
622 | cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle); | 708 | cx18_vapi(cx, CX18_CPU_DE_RELEASE_MDL, 1, s->handle); |
623 | cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); | 709 | cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); |
624 | s->handle = CX18_INVALID_TASK_HANDLE; | 710 | s->handle = CX18_INVALID_TASK_HANDLE; |
711 | clear_bit(CX18_F_S_STOPPING, &s->s_flags); | ||
625 | if (atomic_read(&cx->tot_capturing) == 0) { | 712 | if (atomic_read(&cx->tot_capturing) == 0) { |
626 | set_bit(CX18_F_I_EOS, &cx->i_flags); | 713 | set_bit(CX18_F_I_EOS, &cx->i_flags); |
627 | cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK); | 714 | cx18_write_reg(cx, 5, CX18_DSP0_INTERRUPT_MASK); |
@@ -666,6 +753,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end) | |||
666 | if (atomic_read(&cx->tot_capturing) == 0) | 753 | if (atomic_read(&cx->tot_capturing) == 0) |
667 | return 0; | 754 | return 0; |
668 | 755 | ||
756 | set_bit(CX18_F_S_STOPPING, &s->s_flags); | ||
669 | if (s->type == CX18_ENC_STREAM_TYPE_MPG) | 757 | if (s->type == CX18_ENC_STREAM_TYPE_MPG) |
670 | cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end); | 758 | cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end); |
671 | else | 759 | else |
@@ -689,6 +777,7 @@ int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end) | |||
689 | 777 | ||
690 | cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); | 778 | cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle); |
691 | s->handle = CX18_INVALID_TASK_HANDLE; | 779 | s->handle = CX18_INVALID_TASK_HANDLE; |
780 | clear_bit(CX18_F_S_STOPPING, &s->s_flags); | ||
692 | 781 | ||
693 | if (atomic_read(&cx->tot_capturing) > 0) | 782 | if (atomic_read(&cx->tot_capturing) > 0) |
694 | return 0; | 783 | return 0; |
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h index 420e0a172945..1fdcfffb07ed 100644 --- a/drivers/media/video/cx18/cx18-streams.h +++ b/drivers/media/video/cx18/cx18-streams.h | |||
@@ -28,10 +28,13 @@ int cx18_streams_setup(struct cx18 *cx); | |||
28 | int cx18_streams_register(struct cx18 *cx); | 28 | int cx18_streams_register(struct cx18 *cx); |
29 | void cx18_streams_cleanup(struct cx18 *cx, int unregister); | 29 | void cx18_streams_cleanup(struct cx18 *cx, int unregister); |
30 | 30 | ||
31 | /* Capture related */ | 31 | /* Related to submission of buffers to firmware */ |
32 | void cx18_stream_load_fw_queue(struct cx18_stream *s); | 32 | void cx18_stream_load_fw_queue(struct cx18_stream *s); |
33 | struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s, | 33 | struct cx18_queue *cx18_stream_put_buf_fw(struct cx18_stream *s, |
34 | struct cx18_buffer *buf); | 34 | struct cx18_buffer *buf); |
35 | void cx18_out_work_handler(struct work_struct *work); | ||
36 | |||
37 | /* Capture related */ | ||
35 | int cx18_start_v4l2_encode_stream(struct cx18_stream *s); | 38 | int cx18_start_v4l2_encode_stream(struct cx18_stream *s); |
36 | int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end); | 39 | int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end); |
37 | 40 | ||