diff options
author | Daniel Martensson <daniel.martensson@stericsson.com> | 2011-10-13 07:29:25 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-10-19 03:25:41 -0400 |
commit | 687b13e98addc99644002703944ec89e94287cb6 (patch) | |
tree | 79ef989f2c8bc701edd31969751577ad81852193 /drivers/net/caif | |
parent | 73033c987a8bd0b080509063bb7c130b8941ad73 (diff) |
caif-hsi: Making read and writes asynchronous.
Some platforms do not allow to put HSI block into low-power
mode when FIFO is not empty. The patch flushes (by reading)
FIFO at wake down sequence. Asynchronous read and write is
implemented for that. As a side effect this will also greatly
improve performance.
Signed-off-by: Sjur Brændeland <sjur.brandeland@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/caif')
-rw-r--r-- | drivers/net/caif/caif_hsi.c | 263 |
1 files changed, 127 insertions, 136 deletions
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 82c4d6ca2d3f..478b025c9f8b 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c | |||
@@ -145,7 +145,7 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi) | |||
145 | } | 145 | } |
146 | 146 | ||
147 | ret = 5 * HZ; | 147 | ret = 5 * HZ; |
148 | wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, | 148 | ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, |
149 | !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); | 149 | !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); |
150 | 150 | ||
151 | if (ret < 0) { | 151 | if (ret < 0) { |
@@ -272,16 +272,13 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
272 | return CFHSI_DESC_SZ + pld_len; | 272 | return CFHSI_DESC_SZ + pld_len; |
273 | } | 273 | } |
274 | 274 | ||
275 | static void cfhsi_tx_done_work(struct work_struct *work) | 275 | static void cfhsi_tx_done(struct cfhsi *cfhsi) |
276 | { | 276 | { |
277 | struct cfhsi *cfhsi = NULL; | ||
278 | struct cfhsi_desc *desc = NULL; | 277 | struct cfhsi_desc *desc = NULL; |
279 | int len = 0; | 278 | int len = 0; |
280 | int res; | 279 | int res; |
281 | 280 | ||
282 | cfhsi = container_of(work, struct cfhsi, tx_done_work); | 281 | dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); |
283 | dev_dbg(&cfhsi->ndev->dev, "%s.\n", | ||
284 | __func__); | ||
285 | 282 | ||
286 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) | 283 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) |
287 | return; | 284 | return; |
@@ -343,11 +340,11 @@ static void cfhsi_tx_done_cb(struct cfhsi_drv *drv) | |||
343 | 340 | ||
344 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) | 341 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) |
345 | return; | 342 | return; |
346 | 343 | cfhsi_tx_done(cfhsi); | |
347 | queue_work(cfhsi->wq, &cfhsi->tx_done_work); | ||
348 | } | 344 | } |
349 | 345 | ||
350 | static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | 346 | static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi, |
347 | bool *dump) | ||
351 | { | 348 | { |
352 | int xfer_sz = 0; | 349 | int xfer_sz = 0; |
353 | int nfrms = 0; | 350 | int nfrms = 0; |
@@ -358,6 +355,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
358 | (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { | 355 | (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { |
359 | dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", | 356 | dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", |
360 | __func__); | 357 | __func__); |
358 | *dump = true; | ||
361 | return 0; | 359 | return 0; |
362 | } | 360 | } |
363 | 361 | ||
@@ -365,7 +363,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
365 | if (desc->offset) { | 363 | if (desc->offset) { |
366 | struct sk_buff *skb; | 364 | struct sk_buff *skb; |
367 | u8 *dst = NULL; | 365 | u8 *dst = NULL; |
368 | int len = 0, retries = 0; | 366 | int len = 0; |
369 | pfrm = ((u8 *)desc) + desc->offset; | 367 | pfrm = ((u8 *)desc) + desc->offset; |
370 | 368 | ||
371 | /* Remove offset padding. */ | 369 | /* Remove offset padding. */ |
@@ -378,24 +376,11 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
378 | 376 | ||
379 | 377 | ||
380 | /* Allocate SKB (OK even in IRQ context). */ | 378 | /* Allocate SKB (OK even in IRQ context). */ |
381 | skb = alloc_skb(len + 1, GFP_KERNEL); | 379 | skb = alloc_skb(len + 1, GFP_ATOMIC); |
382 | while (!skb) { | 380 | if (!skb) { |
383 | retries++; | 381 | dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", |
384 | schedule_timeout(1); | 382 | __func__); |
385 | skb = alloc_skb(len + 1, GFP_KERNEL); | 383 | return -ENOMEM; |
386 | if (skb) { | ||
387 | printk(KERN_WARNING "%s: slept for %u " | ||
388 | "before getting memory\n", | ||
389 | __func__, retries); | ||
390 | break; | ||
391 | } | ||
392 | if (retries > HZ) { | ||
393 | printk(KERN_ERR "%s: slept for 1HZ and " | ||
394 | "did not get memory\n", | ||
395 | __func__); | ||
396 | cfhsi->ndev->stats.rx_dropped++; | ||
397 | goto drop_frame; | ||
398 | } | ||
399 | } | 384 | } |
400 | caif_assert(skb != NULL); | 385 | caif_assert(skb != NULL); |
401 | 386 | ||
@@ -421,7 +406,6 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
421 | cfhsi->ndev->stats.rx_bytes += len; | 406 | cfhsi->ndev->stats.rx_bytes += len; |
422 | } | 407 | } |
423 | 408 | ||
424 | drop_frame: | ||
425 | /* Calculate transfer length. */ | 409 | /* Calculate transfer length. */ |
426 | plen = desc->cffrm_len; | 410 | plen = desc->cffrm_len; |
427 | while (nfrms < CFHSI_MAX_PKTS && *plen) { | 411 | while (nfrms < CFHSI_MAX_PKTS && *plen) { |
@@ -439,12 +423,13 @@ drop_frame: | |||
439 | "%s: Invalid payload len: %d, ignored.\n", | 423 | "%s: Invalid payload len: %d, ignored.\n", |
440 | __func__, xfer_sz); | 424 | __func__, xfer_sz); |
441 | xfer_sz = 0; | 425 | xfer_sz = 0; |
426 | *dump = true; | ||
442 | } | 427 | } |
443 | |||
444 | return xfer_sz; | 428 | return xfer_sz; |
445 | } | 429 | } |
446 | 430 | ||
447 | static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | 431 | static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi, |
432 | bool *dump) | ||
448 | { | 433 | { |
449 | int rx_sz = 0; | 434 | int rx_sz = 0; |
450 | int nfrms = 0; | 435 | int nfrms = 0; |
@@ -456,21 +441,33 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
456 | (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { | 441 | (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { |
457 | dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", | 442 | dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n", |
458 | __func__); | 443 | __func__); |
444 | *dump = true; | ||
459 | return -EINVAL; | 445 | return -EINVAL; |
460 | } | 446 | } |
461 | 447 | ||
462 | /* Set frame pointer to start of payload. */ | 448 | /* Set frame pointer to start of payload. */ |
463 | pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; | 449 | pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; |
464 | plen = desc->cffrm_len; | 450 | plen = desc->cffrm_len; |
451 | |||
452 | /* Skip already processed frames. */ | ||
453 | while (nfrms < cfhsi->rx_state.nfrms) { | ||
454 | pfrm += *plen; | ||
455 | rx_sz += *plen; | ||
456 | plen++; | ||
457 | nfrms++; | ||
458 | } | ||
459 | |||
460 | /* Parse payload. */ | ||
465 | while (nfrms < CFHSI_MAX_PKTS && *plen) { | 461 | while (nfrms < CFHSI_MAX_PKTS && *plen) { |
466 | struct sk_buff *skb; | 462 | struct sk_buff *skb; |
467 | u8 *dst = NULL; | 463 | u8 *dst = NULL; |
468 | u8 *pcffrm = NULL; | 464 | u8 *pcffrm = NULL; |
469 | int len = 0, retries = 0; | 465 | int len = 0; |
470 | 466 | ||
471 | if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) { | 467 | if (WARN_ON(desc->cffrm_len[nfrms] > CFHSI_MAX_PAYLOAD_SZ)) { |
472 | dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n", | 468 | dev_err(&cfhsi->ndev->dev, "%s: Invalid payload.\n", |
473 | __func__); | 469 | __func__); |
470 | *dump = true; | ||
474 | return -EINVAL; | 471 | return -EINVAL; |
475 | } | 472 | } |
476 | 473 | ||
@@ -483,24 +480,12 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
483 | len += 2; /* Add FCS fields. */ | 480 | len += 2; /* Add FCS fields. */ |
484 | 481 | ||
485 | /* Allocate SKB (OK even in IRQ context). */ | 482 | /* Allocate SKB (OK even in IRQ context). */ |
486 | skb = alloc_skb(len + 1, GFP_KERNEL); | 483 | skb = alloc_skb(len + 1, GFP_ATOMIC); |
487 | while (!skb) { | 484 | if (!skb) { |
488 | retries++; | 485 | dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n", |
489 | schedule_timeout(1); | 486 | __func__); |
490 | skb = alloc_skb(len + 1, GFP_KERNEL); | 487 | cfhsi->rx_state.nfrms = nfrms; |
491 | if (skb) { | 488 | return -ENOMEM; |
492 | printk(KERN_WARNING "%s: slept for %u " | ||
493 | "before getting memory\n", | ||
494 | __func__, retries); | ||
495 | break; | ||
496 | } | ||
497 | if (retries > HZ) { | ||
498 | printk(KERN_ERR "%s: slept for 1HZ " | ||
499 | "and did not get memory\n", | ||
500 | __func__); | ||
501 | cfhsi->ndev->stats.rx_dropped++; | ||
502 | goto drop_frame; | ||
503 | } | ||
504 | } | 489 | } |
505 | caif_assert(skb != NULL); | 490 | caif_assert(skb != NULL); |
506 | 491 | ||
@@ -524,7 +509,6 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) | |||
524 | cfhsi->ndev->stats.rx_packets++; | 509 | cfhsi->ndev->stats.rx_packets++; |
525 | cfhsi->ndev->stats.rx_bytes += len; | 510 | cfhsi->ndev->stats.rx_bytes += len; |
526 | 511 | ||
527 | drop_frame: | ||
528 | pfrm += *plen; | 512 | pfrm += *plen; |
529 | rx_sz += *plen; | 513 | rx_sz += *plen; |
530 | plen++; | 514 | plen++; |
@@ -534,18 +518,16 @@ drop_frame: | |||
534 | return rx_sz; | 518 | return rx_sz; |
535 | } | 519 | } |
536 | 520 | ||
537 | static void cfhsi_rx_done_work(struct work_struct *work) | 521 | static void cfhsi_rx_done(struct cfhsi *cfhsi) |
538 | { | 522 | { |
539 | int res; | 523 | int res; |
540 | int desc_pld_len = 0; | 524 | int desc_pld_len = 0; |
541 | struct cfhsi *cfhsi = NULL; | ||
542 | struct cfhsi_desc *desc = NULL; | 525 | struct cfhsi_desc *desc = NULL; |
526 | bool dump = false; | ||
543 | 527 | ||
544 | cfhsi = container_of(work, struct cfhsi, rx_done_work); | ||
545 | desc = (struct cfhsi_desc *)cfhsi->rx_buf; | 528 | desc = (struct cfhsi_desc *)cfhsi->rx_buf; |
546 | 529 | ||
547 | dev_dbg(&cfhsi->ndev->dev, "%s: Kick timer if pending.\n", | 530 | dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__); |
548 | __func__); | ||
549 | 531 | ||
550 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) | 532 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) |
551 | return; | 533 | return; |
@@ -555,21 +537,33 @@ static void cfhsi_rx_done_work(struct work_struct *work) | |||
555 | mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT); | 537 | mod_timer_pending(&cfhsi->timer, jiffies + CFHSI_INACTIVITY_TOUT); |
556 | spin_unlock_bh(&cfhsi->lock); | 538 | spin_unlock_bh(&cfhsi->lock); |
557 | 539 | ||
558 | if (cfhsi->rx_state == CFHSI_RX_STATE_DESC) { | 540 | if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { |
559 | desc_pld_len = cfhsi_rx_desc(desc, cfhsi); | 541 | desc_pld_len = cfhsi_rx_desc(desc, cfhsi, &dump); |
542 | if (desc_pld_len == -ENOMEM) | ||
543 | goto restart; | ||
560 | } else { | 544 | } else { |
561 | int pld_len; | 545 | int pld_len; |
562 | 546 | ||
563 | pld_len = cfhsi_rx_pld(desc, cfhsi); | 547 | if (!cfhsi->rx_state.piggy_desc) { |
548 | pld_len = cfhsi_rx_pld(desc, cfhsi, &dump); | ||
549 | if (pld_len == -ENOMEM) | ||
550 | goto restart; | ||
551 | cfhsi->rx_state.pld_len = pld_len; | ||
552 | } else { | ||
553 | pld_len = cfhsi->rx_state.pld_len; | ||
554 | } | ||
564 | 555 | ||
565 | if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) { | 556 | if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) { |
566 | struct cfhsi_desc *piggy_desc; | 557 | struct cfhsi_desc *piggy_desc; |
567 | piggy_desc = (struct cfhsi_desc *) | 558 | piggy_desc = (struct cfhsi_desc *) |
568 | (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + | 559 | (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + |
569 | pld_len); | 560 | pld_len); |
561 | cfhsi->rx_state.piggy_desc = true; | ||
570 | 562 | ||
571 | /* Extract piggy-backed descriptor. */ | 563 | /* Extract piggy-backed descriptor. */ |
572 | desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi); | 564 | desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi, &dump); |
565 | if (desc_pld_len == -ENOMEM) | ||
566 | goto restart; | ||
573 | 567 | ||
574 | /* | 568 | /* |
575 | * Copy needed information from the piggy-backed | 569 | * Copy needed information from the piggy-backed |
@@ -580,16 +574,24 @@ static void cfhsi_rx_done_work(struct work_struct *work) | |||
580 | } | 574 | } |
581 | } | 575 | } |
582 | 576 | ||
577 | if (unlikely(dump)) { | ||
578 | size_t rx_offset = cfhsi->rx_ptr - cfhsi->rx_buf; | ||
579 | dev_err(&cfhsi->ndev->dev, "%s: RX offset: %u.\n", | ||
580 | __func__, (unsigned) rx_offset); | ||
581 | print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, | ||
582 | cfhsi->rx_buf, cfhsi->rx_len + rx_offset); | ||
583 | } | ||
584 | |||
585 | memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); | ||
583 | if (desc_pld_len) { | 586 | if (desc_pld_len) { |
584 | cfhsi->rx_state = CFHSI_RX_STATE_PAYLOAD; | 587 | cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD; |
585 | cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ; | 588 | cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ; |
586 | cfhsi->rx_len = desc_pld_len; | 589 | cfhsi->rx_len = desc_pld_len; |
587 | } else { | 590 | } else { |
588 | cfhsi->rx_state = CFHSI_RX_STATE_DESC; | 591 | cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; |
589 | cfhsi->rx_ptr = cfhsi->rx_buf; | 592 | cfhsi->rx_ptr = cfhsi->rx_buf; |
590 | cfhsi->rx_len = CFHSI_DESC_SZ; | 593 | cfhsi->rx_len = CFHSI_DESC_SZ; |
591 | } | 594 | } |
592 | clear_bit(CFHSI_PENDING_RX, &cfhsi->bits); | ||
593 | 595 | ||
594 | if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { | 596 | if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { |
595 | /* Set up new transfer. */ | 597 | /* Set up new transfer. */ |
@@ -604,6 +606,26 @@ static void cfhsi_rx_done_work(struct work_struct *work) | |||
604 | cfhsi->ndev->stats.rx_dropped++; | 606 | cfhsi->ndev->stats.rx_dropped++; |
605 | } | 607 | } |
606 | } | 608 | } |
609 | return; | ||
610 | |||
611 | restart: | ||
612 | if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) { | ||
613 | dev_err(&cfhsi->ndev->dev, "%s: No memory available " | ||
614 | "in %d iterations.\n", | ||
615 | __func__, CFHSI_MAX_RX_RETRIES); | ||
616 | BUG(); | ||
617 | } | ||
618 | mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1); | ||
619 | } | ||
620 | |||
621 | static void cfhsi_rx_slowpath(unsigned long arg) | ||
622 | { | ||
623 | struct cfhsi *cfhsi = (struct cfhsi *)arg; | ||
624 | |||
625 | dev_dbg(&cfhsi->ndev->dev, "%s.\n", | ||
626 | __func__); | ||
627 | |||
628 | cfhsi_rx_done(cfhsi); | ||
607 | } | 629 | } |
608 | 630 | ||
609 | static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) | 631 | static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) |
@@ -617,12 +639,10 @@ static void cfhsi_rx_done_cb(struct cfhsi_drv *drv) | |||
617 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) | 639 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) |
618 | return; | 640 | return; |
619 | 641 | ||
620 | set_bit(CFHSI_PENDING_RX, &cfhsi->bits); | ||
621 | |||
622 | if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) | 642 | if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) |
623 | wake_up_interruptible(&cfhsi->flush_fifo_wait); | 643 | wake_up_interruptible(&cfhsi->flush_fifo_wait); |
624 | else | 644 | else |
625 | queue_work(cfhsi->wq, &cfhsi->rx_done_work); | 645 | cfhsi_rx_done(cfhsi); |
626 | } | 646 | } |
627 | 647 | ||
628 | static void cfhsi_wake_up(struct work_struct *work) | 648 | static void cfhsi_wake_up(struct work_struct *work) |
@@ -651,9 +671,9 @@ static void cfhsi_wake_up(struct work_struct *work) | |||
651 | __func__); | 671 | __func__); |
652 | 672 | ||
653 | /* Wait for acknowledge. */ | 673 | /* Wait for acknowledge. */ |
654 | ret = CFHSI_WAKEUP_TOUT; | 674 | ret = CFHSI_WAKE_TOUT; |
655 | wait_event_interruptible_timeout(cfhsi->wake_up_wait, | 675 | ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, |
656 | test_bit(CFHSI_WAKE_UP_ACK, | 676 | test_and_clear_bit(CFHSI_WAKE_UP_ACK, |
657 | &cfhsi->bits), ret); | 677 | &cfhsi->bits), ret); |
658 | if (unlikely(ret < 0)) { | 678 | if (unlikely(ret < 0)) { |
659 | /* Interrupted by signal. */ | 679 | /* Interrupted by signal. */ |
@@ -678,16 +698,11 @@ static void cfhsi_wake_up(struct work_struct *work) | |||
678 | clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); | 698 | clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); |
679 | 699 | ||
680 | /* Resume read operation. */ | 700 | /* Resume read operation. */ |
681 | if (!test_bit(CFHSI_PENDING_RX, &cfhsi->bits)) { | 701 | dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__); |
682 | dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", | 702 | res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev); |
683 | __func__); | 703 | |
684 | res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, | 704 | if (WARN_ON(res < 0)) |
685 | cfhsi->rx_len, cfhsi->dev); | 705 | dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res); |
686 | if (WARN_ON(res < 0)) { | ||
687 | dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n", | ||
688 | __func__, res); | ||
689 | } | ||
690 | } | ||
691 | 706 | ||
692 | /* Clear power up acknowledment. */ | 707 | /* Clear power up acknowledment. */ |
693 | clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); | 708 | clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); |
@@ -726,50 +741,29 @@ static void cfhsi_wake_up(struct work_struct *work) | |||
726 | "%s: Failed to create HSI frame: %d.\n", | 741 | "%s: Failed to create HSI frame: %d.\n", |
727 | __func__, len); | 742 | __func__, len); |
728 | } | 743 | } |
729 | |||
730 | } | 744 | } |
731 | 745 | ||
732 | static void cfhsi_wake_down(struct work_struct *work) | 746 | static void cfhsi_wake_down(struct work_struct *work) |
733 | { | 747 | { |
734 | long ret; | 748 | long ret; |
735 | struct cfhsi *cfhsi = NULL; | 749 | struct cfhsi *cfhsi = NULL; |
736 | size_t fifo_occupancy; | 750 | size_t fifo_occupancy = 0; |
751 | int retry = CFHSI_WAKE_TOUT; | ||
737 | 752 | ||
738 | cfhsi = container_of(work, struct cfhsi, wake_down_work); | 753 | cfhsi = container_of(work, struct cfhsi, wake_down_work); |
739 | dev_dbg(&cfhsi->ndev->dev, "%s.\n", | 754 | dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); |
740 | __func__); | ||
741 | 755 | ||
742 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) | 756 | if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) |
743 | return; | 757 | return; |
744 | 758 | ||
745 | /* Check if there is something in FIFO. */ | ||
746 | if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, | ||
747 | &fifo_occupancy))) | ||
748 | fifo_occupancy = 0; | ||
749 | |||
750 | if (fifo_occupancy) { | ||
751 | dev_dbg(&cfhsi->ndev->dev, | ||
752 | "%s: %u words in RX FIFO, restart timer.\n", | ||
753 | __func__, (unsigned) fifo_occupancy); | ||
754 | spin_lock_bh(&cfhsi->lock); | ||
755 | mod_timer(&cfhsi->timer, | ||
756 | jiffies + CFHSI_INACTIVITY_TOUT); | ||
757 | spin_unlock_bh(&cfhsi->lock); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | /* Cancel pending RX requests */ | ||
762 | cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); | ||
763 | |||
764 | /* Deactivate wake line. */ | 759 | /* Deactivate wake line. */ |
765 | cfhsi->dev->cfhsi_wake_down(cfhsi->dev); | 760 | cfhsi->dev->cfhsi_wake_down(cfhsi->dev); |
766 | 761 | ||
767 | /* Wait for acknowledge. */ | 762 | /* Wait for acknowledge. */ |
768 | ret = CFHSI_WAKEUP_TOUT; | 763 | ret = CFHSI_WAKE_TOUT; |
769 | ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, | 764 | ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, |
770 | test_bit(CFHSI_WAKE_DOWN_ACK, | 765 | test_and_clear_bit(CFHSI_WAKE_DOWN_ACK, |
771 | &cfhsi->bits), | 766 | &cfhsi->bits), ret); |
772 | ret); | ||
773 | if (ret < 0) { | 767 | if (ret < 0) { |
774 | /* Interrupted by signal. */ | 768 | /* Interrupted by signal. */ |
775 | dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", | 769 | dev_info(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n", |
@@ -777,28 +771,31 @@ static void cfhsi_wake_down(struct work_struct *work) | |||
777 | return; | 771 | return; |
778 | } else if (!ret) { | 772 | } else if (!ret) { |
779 | /* Timeout */ | 773 | /* Timeout */ |
780 | dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", | 774 | dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); |
781 | __func__); | ||
782 | } | 775 | } |
783 | 776 | ||
784 | /* Clear power down acknowledment. */ | 777 | /* Check FIFO occupancy. */ |
785 | clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); | 778 | while (retry) { |
779 | WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, | ||
780 | &fifo_occupancy)); | ||
781 | |||
782 | if (!fifo_occupancy) | ||
783 | break; | ||
784 | |||
785 | set_current_state(TASK_INTERRUPTIBLE); | ||
786 | schedule_timeout(1); | ||
787 | retry--; | ||
788 | } | ||
789 | |||
790 | if (!retry) | ||
791 | dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__); | ||
792 | |||
793 | /* Clear AWAKE condition. */ | ||
786 | clear_bit(CFHSI_AWAKE, &cfhsi->bits); | 794 | clear_bit(CFHSI_AWAKE, &cfhsi->bits); |
787 | 795 | ||
788 | /* Check if there is something in FIFO. */ | 796 | /* Cancel pending RX requests. */ |
789 | if (WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, | 797 | cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); |
790 | &fifo_occupancy))) | ||
791 | fifo_occupancy = 0; | ||
792 | 798 | ||
793 | if (fifo_occupancy) { | ||
794 | dev_dbg(&cfhsi->ndev->dev, | ||
795 | "%s: %u words in RX FIFO, wakeup forced.\n", | ||
796 | __func__, (unsigned) fifo_occupancy); | ||
797 | if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) | ||
798 | queue_work(cfhsi->wq, &cfhsi->wake_up_work); | ||
799 | } else | ||
800 | dev_dbg(&cfhsi->ndev->dev, "%s: Done.\n", | ||
801 | __func__); | ||
802 | } | 799 | } |
803 | 800 | ||
804 | static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) | 801 | static void cfhsi_wake_up_cb(struct cfhsi_drv *drv) |
@@ -874,11 +871,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) | |||
874 | } | 871 | } |
875 | 872 | ||
876 | /* Delete inactivity timer if started. */ | 873 | /* Delete inactivity timer if started. */ |
877 | #ifdef CONFIG_SMP | ||
878 | timer_active = del_timer_sync(&cfhsi->timer); | 874 | timer_active = del_timer_sync(&cfhsi->timer); |
879 | #else | ||
880 | timer_active = del_timer(&cfhsi->timer); | ||
881 | #endif /* CONFIG_SMP */ | ||
882 | 875 | ||
883 | spin_unlock_bh(&cfhsi->lock); | 876 | spin_unlock_bh(&cfhsi->lock); |
884 | 877 | ||
@@ -962,7 +955,7 @@ int cfhsi_probe(struct platform_device *pdev) | |||
962 | 955 | ||
963 | /* Initialize state vaiables. */ | 956 | /* Initialize state vaiables. */ |
964 | cfhsi->tx_state = CFHSI_TX_STATE_IDLE; | 957 | cfhsi->tx_state = CFHSI_TX_STATE_IDLE; |
965 | cfhsi->rx_state = CFHSI_RX_STATE_DESC; | 958 | cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; |
966 | 959 | ||
967 | /* Set flow info */ | 960 | /* Set flow info */ |
968 | cfhsi->flow_off_sent = 0; | 961 | cfhsi->flow_off_sent = 0; |
@@ -1012,15 +1005,12 @@ int cfhsi_probe(struct platform_device *pdev) | |||
1012 | /* Initialize the work queues. */ | 1005 | /* Initialize the work queues. */ |
1013 | INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); | 1006 | INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); |
1014 | INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); | 1007 | INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); |
1015 | INIT_WORK(&cfhsi->rx_done_work, cfhsi_rx_done_work); | ||
1016 | INIT_WORK(&cfhsi->tx_done_work, cfhsi_tx_done_work); | ||
1017 | 1008 | ||
1018 | /* Clear all bit fields. */ | 1009 | /* Clear all bit fields. */ |
1019 | clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); | 1010 | clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); |
1020 | clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); | 1011 | clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); |
1021 | clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); | 1012 | clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); |
1022 | clear_bit(CFHSI_AWAKE, &cfhsi->bits); | 1013 | clear_bit(CFHSI_AWAKE, &cfhsi->bits); |
1023 | clear_bit(CFHSI_PENDING_RX, &cfhsi->bits); | ||
1024 | 1014 | ||
1025 | /* Create work thread. */ | 1015 | /* Create work thread. */ |
1026 | cfhsi->wq = create_singlethread_workqueue(pdev->name); | 1016 | cfhsi->wq = create_singlethread_workqueue(pdev->name); |
@@ -1040,6 +1030,10 @@ int cfhsi_probe(struct platform_device *pdev) | |||
1040 | init_timer(&cfhsi->timer); | 1030 | init_timer(&cfhsi->timer); |
1041 | cfhsi->timer.data = (unsigned long)cfhsi; | 1031 | cfhsi->timer.data = (unsigned long)cfhsi; |
1042 | cfhsi->timer.function = cfhsi_inactivity_tout; | 1032 | cfhsi->timer.function = cfhsi_inactivity_tout; |
1033 | /* Setup the slowpath RX timer. */ | ||
1034 | init_timer(&cfhsi->rx_slowpath_timer); | ||
1035 | cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; | ||
1036 | cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; | ||
1043 | 1037 | ||
1044 | /* Add CAIF HSI device to list. */ | 1038 | /* Add CAIF HSI device to list. */ |
1045 | spin_lock(&cfhsi_list_lock); | 1039 | spin_lock(&cfhsi_list_lock); |
@@ -1110,12 +1104,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi, bool remove_platform_dev) | |||
1110 | /* Flush workqueue */ | 1104 | /* Flush workqueue */ |
1111 | flush_workqueue(cfhsi->wq); | 1105 | flush_workqueue(cfhsi->wq); |
1112 | 1106 | ||
1113 | /* Delete timer if pending */ | 1107 | /* Delete timers if pending */ |
1114 | #ifdef CONFIG_SMP | ||
1115 | del_timer_sync(&cfhsi->timer); | 1108 | del_timer_sync(&cfhsi->timer); |
1116 | #else | 1109 | del_timer_sync(&cfhsi->rx_slowpath_timer); |
1117 | del_timer(&cfhsi->timer); | ||
1118 | #endif /* CONFIG_SMP */ | ||
1119 | 1110 | ||
1120 | /* Cancel pending RX request (if any) */ | 1111 | /* Cancel pending RX request (if any) */ |
1121 | cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); | 1112 | cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); |