aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRob Rice <rob.rice@broadcom.com>2016-11-14 13:26:00 -0500
committerJassi Brar <jaswinder.singh@linaro.org>2016-12-19 09:40:20 -0500
commit7493cde34efc28641c295ee0d52ab9d790853c62 (patch)
tree7863def79ef02d42854ed481cedf64ac3f378ae9
parente004c7e7d3b873a671fecf04f197982806e380eb (diff)
mailbox: bcm-pdc: Try to improve branch prediction
Use likely/unlikely directives to improve branch prediction. Signed-off-by: Rob Rice <rob.rice@broadcom.com> Reviewed-by: Andy Gospodarek <gospo@broadcom.com> Signed-off-by: Jassi Brar <jaswinder.singh@linaro.org>
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 21957609ea91..7ed3f0247cb8 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -834,7 +834,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
834 834
835 /* allocate a buffer for the dma rx status */ 835 /* allocate a buffer for the dma rx status */
836 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr); 836 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
837 if (!vaddr) 837 if (unlikely(!vaddr))
838 return -ENOMEM; 838 return -ENOMEM;
839 839
840 /* 840 /*
@@ -945,14 +945,14 @@ static irqreturn_t pdc_irq_handler(int irq, void *cookie)
945 struct pdc_state *pdcs = cookie; 945 struct pdc_state *pdcs = cookie;
946 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); 946 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
947 947
948 if (intstatus & PDC_RCVINTEN_0) 948 if (likely(intstatus & PDC_RCVINTEN_0))
949 set_bit(PDC_RCVINT_0, &pdcs->intstatus); 949 set_bit(PDC_RCVINT_0, &pdcs->intstatus);
950 950
951 /* Clear interrupt flags in device */ 951 /* Clear interrupt flags in device */
952 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); 952 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
953 953
954 /* Wakeup IRQ thread */ 954 /* Wakeup IRQ thread */
955 if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK)) 955 if (likely(pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK)))
956 return IRQ_WAKE_THREAD; 956 return IRQ_WAKE_THREAD;
957 957
958 return IRQ_NONE; 958 return IRQ_NONE;
@@ -976,7 +976,7 @@ static irqreturn_t pdc_irq_thread(int irq, void *cookie)
976 bool rx_int; 976 bool rx_int;
977 977
978 rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus); 978 rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus);
979 if (pdcs && rx_int) { 979 if (likely(pdcs && rx_int)) {
980 dev_dbg(&pdcs->pdev->dev, 980 dev_dbg(&pdcs->pdev->dev,
981 "%s() got irq %d with rx_int %s", 981 "%s() got irq %d with rx_int %s",
982 __func__, irq, rx_int ? "set" : "clear"); 982 __func__, irq, rx_int ? "set" : "clear");
@@ -1007,14 +1007,14 @@ static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
1007 1007
1008 /* Allocate tx ring */ 1008 /* Allocate tx ring */
1009 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase); 1009 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
1010 if (!tx.vbase) { 1010 if (unlikely(!tx.vbase)) {
1011 err = -ENOMEM; 1011 err = -ENOMEM;
1012 goto done; 1012 goto done;
1013 } 1013 }
1014 1014
1015 /* Allocate rx ring */ 1015 /* Allocate rx ring */
1016 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase); 1016 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1017 if (!rx.vbase) { 1017 if (unlikely(!rx.vbase)) {
1018 err = -ENOMEM; 1018 err = -ENOMEM;
1019 goto fail_dealloc; 1019 goto fail_dealloc;
1020 } 1020 }
@@ -1219,21 +1219,21 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
1219 u32 tx_desc_req; 1219 u32 tx_desc_req;
1220 u32 rx_desc_req; 1220 u32 rx_desc_req;
1221 1221
1222 if (mssg->type != BRCM_MESSAGE_SPU) 1222 if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1223 return -ENOTSUPP; 1223 return -ENOTSUPP;
1224 1224
1225 src_nent = sg_nents(mssg->spu.src); 1225 src_nent = sg_nents(mssg->spu.src);
1226 if (src_nent) { 1226 if (likely(src_nent)) {
1227 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); 1227 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1228 if (nent == 0) 1228 if (unlikely(nent == 0))
1229 return -EIO; 1229 return -EIO;
1230 } 1230 }
1231 1231
1232 dst_nent = sg_nents(mssg->spu.dst); 1232 dst_nent = sg_nents(mssg->spu.dst);
1233 if (dst_nent) { 1233 if (likely(dst_nent)) {
1234 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, 1234 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1235 DMA_FROM_DEVICE); 1235 DMA_FROM_DEVICE);
1236 if (nent == 0) { 1236 if (unlikely(nent == 0)) {
1237 dma_unmap_sg(dev, mssg->spu.src, src_nent, 1237 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1238 DMA_TO_DEVICE); 1238 DMA_TO_DEVICE);
1239 return -EIO; 1239 return -EIO;
@@ -1251,7 +1251,7 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
1251 */ 1251 */
1252 tx_desc_req = pdc_desc_count(mssg->spu.src); 1252 tx_desc_req = pdc_desc_count(mssg->spu.src);
1253 rx_desc_req = pdc_desc_count(mssg->spu.dst); 1253 rx_desc_req = pdc_desc_count(mssg->spu.dst);
1254 if (pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)) 1254 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1255 return -ENOSPC; 1255 return -ENOSPC;
1256 1256
1257 /* Create rx descriptors to SPU catch response */ 1257 /* Create rx descriptors to SPU catch response */
@@ -1262,7 +1262,7 @@ static int pdc_send_data(struct mbox_chan *chan, void *data)
1262 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src); 1262 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1263 err |= pdc_tx_list_final(pdcs); /* initiate transfer */ 1263 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1264 1264
1265 if (err) 1265 if (unlikely(err))
1266 dev_err(&pdcs->pdev->dev, 1266 dev_err(&pdcs->pdev->dev,
1267 "%s failed with error %d", __func__, err); 1267 "%s failed with error %d", __func__, err);
1268 1268