diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-17 21:16:55 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-17 21:16:55 -0400 |
commit | 4b337c5f245b6587ba844ac7bb13c313a2912f7b (patch) | |
tree | 999c6a6580b76a083c8efb9dabff709d1c49fcd0 /drivers/media/video/cx18/cx18-mailbox.c | |
parent | 492b057c426e4aa747484958e18e9da29003985d (diff) | |
parent | 3fe0344faf7fdcb158bd5c1a9aec960a8d70c8e8 (diff) |
Merge commit 'origin/master' into next
Diffstat (limited to 'drivers/media/video/cx18/cx18-mailbox.c')
-rw-r--r-- | drivers/media/video/cx18/cx18-mailbox.c | 114 |
1 files changed, 70 insertions, 44 deletions
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c index 2226e5791e99..afe46c3d4057 100644 --- a/drivers/media/video/cx18/cx18-mailbox.c +++ b/drivers/media/video/cx18/cx18-mailbox.c | |||
@@ -131,7 +131,7 @@ static void dump_mb(struct cx18 *cx, struct cx18_mailbox *mb, char *name) | |||
131 | * Functions that run in a work_queue work handling context | 131 | * Functions that run in a work_queue work handling context |
132 | */ | 132 | */ |
133 | 133 | ||
134 | static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order) | 134 | static void epu_dma_done(struct cx18 *cx, struct cx18_in_work_order *order) |
135 | { | 135 | { |
136 | u32 handle, mdl_ack_count, id; | 136 | u32 handle, mdl_ack_count, id; |
137 | struct cx18_mailbox *mb; | 137 | struct cx18_mailbox *mb; |
@@ -191,29 +191,30 @@ static void epu_dma_done(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
191 | if (buf == NULL) { | 191 | if (buf == NULL) { |
192 | CX18_WARN("Could not find buf %d for stream %s\n", | 192 | CX18_WARN("Could not find buf %d for stream %s\n", |
193 | id, s->name); | 193 | id, s->name); |
194 | /* Put as many buffers as possible back into fw use */ | ||
195 | cx18_stream_load_fw_queue(s); | ||
196 | continue; | 194 | continue; |
197 | } | 195 | } |
198 | 196 | ||
199 | if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) { | 197 | CX18_DEBUG_HI_DMA("%s recv bytesused = %d\n", |
200 | CX18_DEBUG_HI_DMA("TS recv bytesused = %d\n", | 198 | s->name, buf->bytesused); |
201 | buf->bytesused); | 199 | |
202 | dvb_dmx_swfilter(&s->dvb.demux, buf->buf, | 200 | if (s->type != CX18_ENC_STREAM_TYPE_TS) |
203 | buf->bytesused); | 201 | cx18_enqueue(s, buf, &s->q_full); |
202 | else { | ||
203 | if (s->dvb.enabled) | ||
204 | dvb_dmx_swfilter(&s->dvb.demux, buf->buf, | ||
205 | buf->bytesused); | ||
206 | cx18_enqueue(s, buf, &s->q_free); | ||
204 | } | 207 | } |
205 | /* Put as many buffers as possible back into fw use */ | ||
206 | cx18_stream_load_fw_queue(s); | ||
207 | /* Put back TS buffer, since it was removed from all queues */ | ||
208 | if (s->type == CX18_ENC_STREAM_TYPE_TS) | ||
209 | cx18_stream_put_buf_fw(s, buf); | ||
210 | } | 208 | } |
209 | /* Put as many buffers as possible back into fw use */ | ||
210 | cx18_stream_load_fw_queue(s); | ||
211 | |||
211 | wake_up(&cx->dma_waitq); | 212 | wake_up(&cx->dma_waitq); |
212 | if (s->id != -1) | 213 | if (s->id != -1) |
213 | wake_up(&s->waitq); | 214 | wake_up(&s->waitq); |
214 | } | 215 | } |
215 | 216 | ||
216 | static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order) | 217 | static void epu_debug(struct cx18 *cx, struct cx18_in_work_order *order) |
217 | { | 218 | { |
218 | char *p; | 219 | char *p; |
219 | char *str = order->str; | 220 | char *str = order->str; |
@@ -224,7 +225,7 @@ static void epu_debug(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
224 | CX18_INFO("FW version: %s\n", p - 1); | 225 | CX18_INFO("FW version: %s\n", p - 1); |
225 | } | 226 | } |
226 | 227 | ||
227 | static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order) | 228 | static void epu_cmd(struct cx18 *cx, struct cx18_in_work_order *order) |
228 | { | 229 | { |
229 | switch (order->rpu) { | 230 | switch (order->rpu) { |
230 | case CPU: | 231 | case CPU: |
@@ -253,18 +254,18 @@ static void epu_cmd(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
253 | } | 254 | } |
254 | 255 | ||
255 | static | 256 | static |
256 | void free_epu_work_order(struct cx18 *cx, struct cx18_epu_work_order *order) | 257 | void free_in_work_order(struct cx18 *cx, struct cx18_in_work_order *order) |
257 | { | 258 | { |
258 | atomic_set(&order->pending, 0); | 259 | atomic_set(&order->pending, 0); |
259 | } | 260 | } |
260 | 261 | ||
261 | void cx18_epu_work_handler(struct work_struct *work) | 262 | void cx18_in_work_handler(struct work_struct *work) |
262 | { | 263 | { |
263 | struct cx18_epu_work_order *order = | 264 | struct cx18_in_work_order *order = |
264 | container_of(work, struct cx18_epu_work_order, work); | 265 | container_of(work, struct cx18_in_work_order, work); |
265 | struct cx18 *cx = order->cx; | 266 | struct cx18 *cx = order->cx; |
266 | epu_cmd(cx, order); | 267 | epu_cmd(cx, order); |
267 | free_epu_work_order(cx, order); | 268 | free_in_work_order(cx, order); |
268 | } | 269 | } |
269 | 270 | ||
270 | 271 | ||
@@ -272,7 +273,7 @@ void cx18_epu_work_handler(struct work_struct *work) | |||
272 | * Functions that run in an interrupt handling context | 273 | * Functions that run in an interrupt handling context |
273 | */ | 274 | */ |
274 | 275 | ||
275 | static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | 276 | static void mb_ack_irq(struct cx18 *cx, struct cx18_in_work_order *order) |
276 | { | 277 | { |
277 | struct cx18_mailbox __iomem *ack_mb; | 278 | struct cx18_mailbox __iomem *ack_mb; |
278 | u32 ack_irq, req; | 279 | u32 ack_irq, req; |
@@ -308,7 +309,7 @@ static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
308 | return; | 309 | return; |
309 | } | 310 | } |
310 | 311 | ||
311 | static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | 312 | static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order) |
312 | { | 313 | { |
313 | u32 handle, mdl_ack_offset, mdl_ack_count; | 314 | u32 handle, mdl_ack_offset, mdl_ack_count; |
314 | struct cx18_mailbox *mb; | 315 | struct cx18_mailbox *mb; |
@@ -334,7 +335,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
334 | } | 335 | } |
335 | 336 | ||
336 | static | 337 | static |
337 | int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | 338 | int epu_debug_irq(struct cx18 *cx, struct cx18_in_work_order *order) |
338 | { | 339 | { |
339 | u32 str_offset; | 340 | u32 str_offset; |
340 | char *str = order->str; | 341 | char *str = order->str; |
@@ -355,7 +356,7 @@ int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
355 | } | 356 | } |
356 | 357 | ||
357 | static inline | 358 | static inline |
358 | int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | 359 | int epu_cmd_irq(struct cx18 *cx, struct cx18_in_work_order *order) |
359 | { | 360 | { |
360 | int ret = -1; | 361 | int ret = -1; |
361 | 362 | ||
@@ -387,12 +388,12 @@ int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order) | |||
387 | } | 388 | } |
388 | 389 | ||
389 | static inline | 390 | static inline |
390 | struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx) | 391 | struct cx18_in_work_order *alloc_in_work_order_irq(struct cx18 *cx) |
391 | { | 392 | { |
392 | int i; | 393 | int i; |
393 | struct cx18_epu_work_order *order = NULL; | 394 | struct cx18_in_work_order *order = NULL; |
394 | 395 | ||
395 | for (i = 0; i < CX18_MAX_EPU_WORK_ORDERS; i++) { | 396 | for (i = 0; i < CX18_MAX_IN_WORK_ORDERS; i++) { |
396 | /* | 397 | /* |
397 | * We only need "pending" atomic to inspect its contents, | 398 | * We only need "pending" atomic to inspect its contents, |
398 | * and need not do a check and set because: | 399 | * and need not do a check and set because: |
@@ -401,8 +402,8 @@ struct cx18_epu_work_order *alloc_epu_work_order_irq(struct cx18 *cx) | |||
401 | * 2. "pending" is only set here, and we're serialized because | 402 | * 2. "pending" is only set here, and we're serialized because |
402 | * we're called in an IRQ handler context. | 403 | * we're called in an IRQ handler context. |
403 | */ | 404 | */ |
404 | if (atomic_read(&cx->epu_work_order[i].pending) == 0) { | 405 | if (atomic_read(&cx->in_work_order[i].pending) == 0) { |
405 | order = &cx->epu_work_order[i]; | 406 | order = &cx->in_work_order[i]; |
406 | atomic_set(&order->pending, 1); | 407 | atomic_set(&order->pending, 1); |
407 | break; | 408 | break; |
408 | } | 409 | } |
@@ -414,7 +415,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu) | |||
414 | { | 415 | { |
415 | struct cx18_mailbox __iomem *mb; | 416 | struct cx18_mailbox __iomem *mb; |
416 | struct cx18_mailbox *order_mb; | 417 | struct cx18_mailbox *order_mb; |
417 | struct cx18_epu_work_order *order; | 418 | struct cx18_in_work_order *order; |
418 | int submit; | 419 | int submit; |
419 | 420 | ||
420 | switch (rpu) { | 421 | switch (rpu) { |
@@ -428,7 +429,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu) | |||
428 | return; | 429 | return; |
429 | } | 430 | } |
430 | 431 | ||
431 | order = alloc_epu_work_order_irq(cx); | 432 | order = alloc_in_work_order_irq(cx); |
432 | if (order == NULL) { | 433 | if (order == NULL) { |
433 | CX18_WARN("Unable to find blank work order form to schedule " | 434 | CX18_WARN("Unable to find blank work order form to schedule " |
434 | "incoming mailbox command processing\n"); | 435 | "incoming mailbox command processing\n"); |
@@ -461,7 +462,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu) | |||
461 | */ | 462 | */ |
462 | submit = epu_cmd_irq(cx, order); | 463 | submit = epu_cmd_irq(cx, order); |
463 | if (submit > 0) { | 464 | if (submit > 0) { |
464 | queue_work(cx->work_queue, &order->work); | 465 | queue_work(cx->in_work_queue, &order->work); |
465 | } | 466 | } |
466 | } | 467 | } |
467 | 468 | ||
@@ -478,9 +479,10 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) | |||
478 | u32 __iomem *xpu_state; | 479 | u32 __iomem *xpu_state; |
479 | wait_queue_head_t *waitq; | 480 | wait_queue_head_t *waitq; |
480 | struct mutex *mb_lock; | 481 | struct mutex *mb_lock; |
481 | long int timeout, ret; | 482 | unsigned long int t0, timeout, ret; |
482 | int i; | 483 | int i; |
483 | char argstr[MAX_MB_ARGUMENTS*11+1]; | 484 | char argstr[MAX_MB_ARGUMENTS*11+1]; |
485 | DEFINE_WAIT(w); | ||
484 | 486 | ||
485 | if (info == NULL) { | 487 | if (info == NULL) { |
486 | CX18_WARN("unknown cmd %x\n", cmd); | 488 | CX18_WARN("unknown cmd %x\n", cmd); |
@@ -562,25 +564,49 @@ static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[]) | |||
562 | 564 | ||
563 | CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n", | 565 | CX18_DEBUG_HI_IRQ("sending interrupt SW1: %x to send %s\n", |
564 | irq, info->name); | 566 | irq, info->name); |
567 | |||
568 | /* So we don't miss the wakeup, prepare to wait before notifying fw */ | ||
569 | prepare_to_wait(waitq, &w, TASK_UNINTERRUPTIBLE); | ||
565 | cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); | 570 | cx18_write_reg_expect(cx, irq, SW1_INT_SET, irq, irq); |
566 | 571 | ||
567 | ret = wait_event_timeout( | 572 | t0 = jiffies; |
568 | *waitq, | 573 | ack = cx18_readl(cx, &mb->ack); |
569 | cx18_readl(cx, &mb->ack) == cx18_readl(cx, &mb->request), | 574 | if (ack != req) { |
570 | timeout); | 575 | schedule_timeout(timeout); |
576 | ret = jiffies - t0; | ||
577 | ack = cx18_readl(cx, &mb->ack); | ||
578 | } else { | ||
579 | ret = jiffies - t0; | ||
580 | } | ||
571 | 581 | ||
572 | if (ret == 0) { | 582 | finish_wait(waitq, &w); |
573 | /* Timed out */ | 583 | |
584 | if (req != ack) { | ||
574 | mutex_unlock(mb_lock); | 585 | mutex_unlock(mb_lock); |
575 | CX18_DEBUG_WARN("sending %s timed out waiting %d msecs for RPU " | 586 | if (ret >= timeout) { |
576 | "acknowledgement\n", | 587 | /* Timed out */ |
577 | info->name, jiffies_to_msecs(timeout)); | 588 | CX18_DEBUG_WARN("sending %s timed out waiting %d msecs " |
589 | "for RPU acknowledgement\n", | ||
590 | info->name, jiffies_to_msecs(ret)); | ||
591 | } else { | ||
592 | CX18_DEBUG_WARN("woken up before mailbox ack was ready " | ||
593 | "after submitting %s to RPU. only " | ||
594 | "waited %d msecs on req %u but awakened" | ||
595 | " with unmatched ack %u\n", | ||
596 | info->name, | ||
597 | jiffies_to_msecs(ret), | ||
598 | req, ack); | ||
599 | } | ||
578 | return -EINVAL; | 600 | return -EINVAL; |
579 | } | 601 | } |
580 | 602 | ||
581 | if (ret != timeout) | 603 | if (ret >= timeout) |
604 | CX18_DEBUG_WARN("failed to be awakened upon RPU acknowledgment " | ||
605 | "sending %s; timed out waiting %d msecs\n", | ||
606 | info->name, jiffies_to_msecs(ret)); | ||
607 | else | ||
582 | CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n", | 608 | CX18_DEBUG_HI_API("waited %u msecs for %s to be acked\n", |
583 | jiffies_to_msecs(timeout-ret), info->name); | 609 | jiffies_to_msecs(ret), info->name); |
584 | 610 | ||
585 | /* Collect data returned by the XPU */ | 611 | /* Collect data returned by the XPU */ |
586 | for (i = 0; i < MAX_MB_ARGUMENTS; i++) | 612 | for (i = 0; i < MAX_MB_ARGUMENTS; i++) |