diff options
author | Rob Rice <rob.rice@broadcom.com> | 2016-11-14 13:25:59 -0500 |
---|---|---|
committer | Jassi Brar <jaswinder.singh@linaro.org> | 2016-12-19 09:40:20 -0500 |
commit | e004c7e7d3b873a671fecf04f197982806e380eb (patch) | |
tree | 769acd7c4f6ea8f659dea84f4aa9704dfe893723 /drivers/mailbox/bcm-pdc-mailbox.c | |
parent | ab8d1b2d564f6649547b97e65806556c42f93a26 (diff) |
mailbox: bcm-pdc: streamline rx code
Remove the unnecessary rmb() from the receive path.
If the rx ring has multiple messages ready, avoid reading
last_rx_curr multiple times from the register.
Signed-off-by: Rob Rice <rob.rice@broadcom.com>
Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
Diffstat (limited to 'drivers/mailbox/bcm-pdc-mailbox.c')
-rw-r--r-- | drivers/mailbox/bcm-pdc-mailbox.c | 108 |
1 files changed, 48 insertions, 60 deletions
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index fa3f484d3771..21957609ea91 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c | |||
@@ -570,27 +570,23 @@ pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len, | |||
570 | } | 570 | } |
571 | 571 | ||
572 | /** | 572 | /** |
573 | * pdc_receive() - Receive a response message from a given SPU. | 573 | * pdc_receive_one() - Receive a response message from a given SPU. |
574 | * @pdcs: PDC state for the SPU to receive from | 574 | * @pdcs: PDC state for the SPU to receive from |
575 | * @mssg: mailbox message to be returned to client | ||
576 | * | 575 | * |
577 | * When the return code indicates success, the response message is available in | 576 | * When the return code indicates success, the response message is available in |
578 | * the receive buffers provided prior to submission of the request. | 577 | * the receive buffers provided prior to submission of the request. |
579 | * | 578 | * |
580 | * Input: | ||
581 | * pdcs - PDC state structure for the SPU to be polled | ||
582 | * mssg - mailbox message to be returned to client. This function sets the | ||
583 | * context pointer on the message to help the client associate the | ||
584 | * response with a request. | ||
585 | * | ||
586 | * Return: PDC_SUCCESS if one or more receive descriptors was processed | 579 | * Return: PDC_SUCCESS if one or more receive descriptors was processed |
587 | * -EAGAIN indicates that no response message is available | 580 | * -EAGAIN indicates that no response message is available |
588 | * -EIO an error occurred | 581 | * -EIO an error occurred |
589 | */ | 582 | */ |
590 | static int | 583 | static int |
591 | pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg) | 584 | pdc_receive_one(struct pdc_state *pdcs) |
592 | { | 585 | { |
593 | struct device *dev = &pdcs->pdev->dev; | 586 | struct device *dev = &pdcs->pdev->dev; |
587 | struct mbox_controller *mbc; | ||
588 | struct mbox_chan *chan; | ||
589 | struct brcm_message mssg; | ||
594 | u32 len, rx_status; | 590 | u32 len, rx_status; |
595 | u32 num_frags; | 591 | u32 num_frags; |
596 | int i; | 592 | int i; |
@@ -599,29 +595,23 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg) | |||
599 | u32 rx_idx; /* ring index of start of receive frame */ | 595 | u32 rx_idx; /* ring index of start of receive frame */ |
600 | dma_addr_t resp_hdr_daddr; | 596 | dma_addr_t resp_hdr_daddr; |
601 | 597 | ||
598 | mbc = &pdcs->mbc; | ||
599 | chan = &mbc->chans[0]; | ||
600 | mssg.type = BRCM_MESSAGE_SPU; | ||
601 | |||
602 | /* | 602 | /* |
603 | * return if a complete response message is not yet ready. | 603 | * return if a complete response message is not yet ready. |
604 | * rxin_numd[rxin] is the number of fragments in the next msg | 604 | * rxin_numd[rxin] is the number of fragments in the next msg |
605 | * to read. | 605 | * to read. |
606 | */ | 606 | */ |
607 | frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost); | 607 | frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost); |
608 | if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) { | 608 | if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) |
609 | /* See if the hw has written more fragments than we know */ | 609 | /* No response ready */ |
610 | pdcs->last_rx_curr = | 610 | return -EAGAIN; |
611 | (ioread32((void *)&pdcs->rxregs_64->status0) & | ||
612 | CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; | ||
613 | frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, | ||
614 | pdcs->nrxpost); | ||
615 | if ((frags_rdy == 0) || | ||
616 | (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) { | ||
617 | /* No response ready */ | ||
618 | return -EAGAIN; | ||
619 | } | ||
620 | /* can't read descriptors/data until write index is read */ | ||
621 | rmb(); | ||
622 | } | ||
623 | 611 | ||
624 | num_frags = pdcs->txin_numd[pdcs->txin]; | 612 | num_frags = pdcs->txin_numd[pdcs->txin]; |
613 | WARN_ON(num_frags == 0); | ||
614 | |||
625 | dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin], | 615 | dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin], |
626 | sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE); | 616 | sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE); |
627 | 617 | ||
@@ -634,7 +624,7 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg) | |||
634 | rx_idx = pdcs->rxin; | 624 | rx_idx = pdcs->rxin; |
635 | num_frags = pdcs->rxin_numd[rx_idx]; | 625 | num_frags = pdcs->rxin_numd[rx_idx]; |
636 | /* Return opaque context with result */ | 626 | /* Return opaque context with result */ |
637 | mssg->ctx = pdcs->rxp_ctx[rx_idx]; | 627 | mssg.ctx = pdcs->rxp_ctx[rx_idx]; |
638 | pdcs->rxp_ctx[rx_idx] = NULL; | 628 | pdcs->rxp_ctx[rx_idx] = NULL; |
639 | resp_hdr = pdcs->resp_hdr[rx_idx]; | 629 | resp_hdr = pdcs->resp_hdr[rx_idx]; |
640 | resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx]; | 630 | resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx]; |
@@ -674,12 +664,35 @@ pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg) | |||
674 | 664 | ||
675 | dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr); | 665 | dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr); |
676 | 666 | ||
667 | mbox_chan_received_data(chan, &mssg); | ||
668 | |||
677 | pdcs->pdc_replies++; | 669 | pdcs->pdc_replies++; |
678 | /* if we read one or more rx descriptors, claim success */ | 670 | return PDC_SUCCESS; |
679 | if (num_frags > 0) | 671 | } |
680 | return PDC_SUCCESS; | 672 | |
681 | else | 673 | /** |
682 | return -EIO; | 674 | * pdc_receive() - Process as many responses as are available in the rx ring. |
675 | * @pdcs: PDC state | ||
676 | * | ||
677 | * Called within the hard IRQ. | ||
678 | * Return: | ||
679 | */ | ||
680 | static int | ||
681 | pdc_receive(struct pdc_state *pdcs) | ||
682 | { | ||
683 | int rx_status; | ||
684 | |||
685 | /* read last_rx_curr from register once */ | ||
686 | pdcs->last_rx_curr = | ||
687 | (ioread32((void *)&pdcs->rxregs_64->status0) & | ||
688 | CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; | ||
689 | |||
690 | do { | ||
691 | /* Could be many frames ready */ | ||
692 | rx_status = pdc_receive_one(pdcs); | ||
693 | } while (rx_status == PDC_SUCCESS); | ||
694 | |||
695 | return 0; | ||
683 | } | 696 | } |
684 | 697 | ||
685 | /** | 698 | /** |
@@ -946,14 +959,13 @@ static irqreturn_t pdc_irq_handler(int irq, void *cookie) | |||
946 | } | 959 | } |
947 | 960 | ||
948 | /** | 961 | /** |
949 | * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has | 962 | * pdc_irq_thread() - Function invoked on deferred thread when data is available |
950 | * completed or data is available to receive. | 963 | * to receive. |
951 | * @irq: Interrupt number | 964 | * @irq: Interrupt number |
952 | * @cookie: PDC state for PDC that generated the interrupt | 965 | * @cookie: PDC state for PDC that generated the interrupt |
953 | * | 966 | * |
954 | * On DMA tx complete, notify the mailbox client. On DMA rx complete, process | 967 | * On DMA rx complete, process as many SPU response messages as are available |
955 | * as many SPU response messages as are available and send each to the mailbox | 968 | * and send each to the mailbox client. |
956 | * client. | ||
957 | * | 969 | * |
958 | * Return: IRQ_HANDLED if we recognized and handled the interrupt | 970 | * Return: IRQ_HANDLED if we recognized and handled the interrupt |
959 | * IRQ_NONE otherwise | 971 | * IRQ_NONE otherwise |
@@ -961,39 +973,15 @@ static irqreturn_t pdc_irq_handler(int irq, void *cookie) | |||
961 | static irqreturn_t pdc_irq_thread(int irq, void *cookie) | 973 | static irqreturn_t pdc_irq_thread(int irq, void *cookie) |
962 | { | 974 | { |
963 | struct pdc_state *pdcs = cookie; | 975 | struct pdc_state *pdcs = cookie; |
964 | struct mbox_controller *mbc; | ||
965 | struct mbox_chan *chan; | ||
966 | bool rx_int; | 976 | bool rx_int; |
967 | int rx_status; | ||
968 | struct brcm_message mssg; | ||
969 | 977 | ||
970 | rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus); | 978 | rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus); |
971 | |||
972 | if (pdcs && rx_int) { | 979 | if (pdcs && rx_int) { |
973 | dev_dbg(&pdcs->pdev->dev, | 980 | dev_dbg(&pdcs->pdev->dev, |
974 | "%s() got irq %d with rx_int %s", | 981 | "%s() got irq %d with rx_int %s", |
975 | __func__, irq, rx_int ? "set" : "clear"); | 982 | __func__, irq, rx_int ? "set" : "clear"); |
976 | 983 | ||
977 | mbc = &pdcs->mbc; | 984 | pdc_receive(pdcs); |
978 | chan = &mbc->chans[0]; | ||
979 | |||
980 | while (1) { | ||
981 | /* Could be many frames ready */ | ||
982 | memset(&mssg, 0, sizeof(mssg)); | ||
983 | mssg.type = BRCM_MESSAGE_SPU; | ||
984 | rx_status = pdc_receive(pdcs, &mssg); | ||
985 | if (rx_status >= 0) { | ||
986 | dev_dbg(&pdcs->pdev->dev, | ||
987 | "%s(): invoking client rx cb", | ||
988 | __func__); | ||
989 | mbox_chan_received_data(chan, &mssg); | ||
990 | } else { | ||
991 | dev_dbg(&pdcs->pdev->dev, | ||
992 | "%s(): no SPU response available", | ||
993 | __func__); | ||
994 | break; | ||
995 | } | ||
996 | } | ||
997 | return IRQ_HANDLED; | 985 | return IRQ_HANDLED; |
998 | } | 986 | } |
999 | return IRQ_NONE; | 987 | return IRQ_NONE; |