aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/cx18/cx18-mailbox.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/video/cx18/cx18-mailbox.c')
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index d49c7c27c18f..844a62de6535 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -233,7 +233,7 @@ void cx18_epu_work_handler(struct work_struct *work)
233 * Functions that run in an interrupt handling context 233 * Functions that run in an interrupt handling context
234 */ 234 */
235 235
236static void mb_ack_irq(struct cx18 *cx, const struct cx18_epu_work_order *order) 236static void mb_ack_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
237{ 237{
238 struct cx18_mailbox __iomem *ack_mb; 238 struct cx18_mailbox __iomem *ack_mb;
239 u32 ack_irq, req; 239 u32 ack_irq, req;
@@ -256,15 +256,20 @@ static void mb_ack_irq(struct cx18 *cx, const struct cx18_epu_work_order *order)
256 req = order->mb.request; 256 req = order->mb.request;
257 /* Don't ack if the RPU has gotten impatient and timed us out */ 257 /* Don't ack if the RPU has gotten impatient and timed us out */
258 if (req != cx18_readl(cx, &ack_mb->request) || 258 if (req != cx18_readl(cx, &ack_mb->request) ||
259 req == cx18_readl(cx, &ack_mb->ack)) 259 req == cx18_readl(cx, &ack_mb->ack)) {
260 CX18_WARN("Possibly falling behind: %s self-ack'ed our incoming"
261 " %s to EPU mailbox (sequence no. %u) while "
262 "processing\n",
263 rpu_str[order->rpu], rpu_str[order->rpu], req);
264 order->flags |= CX18_F_EWO_MB_STALE_WHILE_PROC;
260 return; 265 return;
266 }
261 cx18_writel(cx, req, &ack_mb->ack); 267 cx18_writel(cx, req, &ack_mb->ack);
262 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq); 268 cx18_write_reg_expect(cx, ack_irq, SW2_INT_SET, ack_irq, ack_irq);
263 return; 269 return;
264} 270}
265 271
266static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order, 272static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
267 int stale)
268{ 273{
269 u32 handle, mdl_ack_offset, mdl_ack_count; 274 u32 handle, mdl_ack_offset, mdl_ack_count;
270 struct cx18_mailbox *mb; 275 struct cx18_mailbox *mb;
@@ -276,20 +281,21 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_epu_work_order *order,
276 281
277 if (handle == CX18_INVALID_TASK_HANDLE || 282 if (handle == CX18_INVALID_TASK_HANDLE ||
278 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) { 283 mdl_ack_count == 0 || mdl_ack_count > CX18_MAX_MDL_ACKS) {
279 if (!stale) 284 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
280 mb_ack_irq(cx, order); 285 mb_ack_irq(cx, order);
281 return -1; 286 return -1;
282 } 287 }
283 288
284 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset, 289 cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
285 sizeof(struct cx18_mdl_ack) * mdl_ack_count); 290 sizeof(struct cx18_mdl_ack) * mdl_ack_count);
286 if (!stale) 291
292 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
287 mb_ack_irq(cx, order); 293 mb_ack_irq(cx, order);
288 return 1; 294 return 1;
289} 295}
290 296
291static 297static
292int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale) 298int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
293{ 299{
294 u32 str_offset; 300 u32 str_offset;
295 char *str = order->str; 301 char *str = order->str;
@@ -303,14 +309,14 @@ int epu_debug_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale)
303 cx18_setup_page(cx, SCB_OFFSET); 309 cx18_setup_page(cx, SCB_OFFSET);
304 } 310 }
305 311
306 if (!stale) 312 if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
307 mb_ack_irq(cx, order); 313 mb_ack_irq(cx, order);
308 314
309 return str_offset ? 1 : 0; 315 return str_offset ? 1 : 0;
310} 316}
311 317
312static inline 318static inline
313int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale) 319int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order)
314{ 320{
315 int ret = -1; 321 int ret = -1;
316 322
@@ -319,10 +325,10 @@ int epu_cmd_irq(struct cx18 *cx, struct cx18_epu_work_order *order, int stale)
319 { 325 {
320 switch (order->mb.cmd) { 326 switch (order->mb.cmd) {
321 case CX18_EPU_DMA_DONE: 327 case CX18_EPU_DMA_DONE:
322 ret = epu_dma_done_irq(cx, order, stale); 328 ret = epu_dma_done_irq(cx, order);
323 break; 329 break;
324 case CX18_EPU_DEBUG: 330 case CX18_EPU_DEBUG:
325 ret = epu_debug_irq(cx, order, stale); 331 ret = epu_debug_irq(cx, order);
326 break; 332 break;
327 default: 333 default:
328 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n", 334 CX18_WARN("Unknown CPU to EPU mailbox command %#0x\n",
@@ -370,7 +376,6 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
370 struct cx18_mailbox __iomem *mb; 376 struct cx18_mailbox __iomem *mb;
371 struct cx18_mailbox *order_mb; 377 struct cx18_mailbox *order_mb;
372 struct cx18_epu_work_order *order; 378 struct cx18_epu_work_order *order;
373 int stale = 0;
374 int submit; 379 int submit;
375 380
376 switch (rpu) { 381 switch (rpu) {
@@ -391,6 +396,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
391 return; 396 return;
392 } 397 }
393 398
399 order->flags = 0;
394 order->rpu = rpu; 400 order->rpu = rpu;
395 order_mb = &order->mb; 401 order_mb = &order->mb;
396 cx18_memcpy_fromio(cx, order_mb, mb, sizeof(struct cx18_mailbox)); 402 cx18_memcpy_fromio(cx, order_mb, mb, sizeof(struct cx18_mailbox));
@@ -400,14 +406,14 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
400 " %s to EPU mailbox (sequence no. %u)\n", 406 " %s to EPU mailbox (sequence no. %u)\n",
401 rpu_str[rpu], rpu_str[rpu], order_mb->request); 407 rpu_str[rpu], rpu_str[rpu], order_mb->request);
402 dump_mb(cx, order_mb, "incoming"); 408 dump_mb(cx, order_mb, "incoming");
403 stale = 1; 409 order->flags = CX18_F_EWO_MB_STALE_UPON_RECEIPT;
404 } 410 }
405 411
406 /* 412 /*
407 * Individual EPU command processing is responsible for ack-ing 413 * Individual EPU command processing is responsible for ack-ing
408 * a non-stale mailbox as soon as possible 414 * a non-stale mailbox as soon as possible
409 */ 415 */
410 submit = epu_cmd_irq(cx, order, stale); 416 submit = epu_cmd_irq(cx, order);
411 if (submit > 0) { 417 if (submit > 0) {
412 queue_work(cx18_work_queue, &order->work); 418 queue_work(cx18_work_queue, &order->work);
413 } 419 }