diff options
author | Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com> | 2015-02-15 07:02:33 -0500 |
---|---|---|
committer | Kalle Valo <kvalo@codeaurora.org> | 2015-02-27 03:15:18 -0500 |
commit | 33c477fdab257efcad139ac2a5031708aad2a1e7 (patch) | |
tree | 357685d875d83b00bf7137a44c991ef018f33533 /drivers/net/wireless/ath/wil6210 | |
parent | e3351277ac585df77ac2454c518205897c01a184 (diff) |
wil6210: branch prediction hints
Mark expected branches using likely()/unlikely().
Do it on high performance route - data path and interrupts
Signed-off-by: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Diffstat (limited to 'drivers/net/wireless/ath/wil6210')
-rw-r--r-- | drivers/net/wireless/ath/wil6210/interrupt.c | 25 | ||||
-rw-r--r-- | drivers/net/wireless/ath/wil6210/txrx.c | 40 |
2 files changed, 33 insertions, 32 deletions
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c index d5a651bb800e..28ffc18466c4 100644 --- a/drivers/net/wireless/ath/wil6210/interrupt.c +++ b/drivers/net/wireless/ath/wil6210/interrupt.c | |||
@@ -226,7 +226,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) | |||
226 | trace_wil6210_irq_rx(isr); | 226 | trace_wil6210_irq_rx(isr); |
227 | wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); | 227 | wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); |
228 | 228 | ||
229 | if (!isr) { | 229 | if (unlikely(!isr)) { |
230 | wil_err(wil, "spurious IRQ: RX\n"); | 230 | wil_err(wil, "spurious IRQ: RX\n"); |
231 | return IRQ_NONE; | 231 | return IRQ_NONE; |
232 | } | 232 | } |
@@ -239,17 +239,18 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) | |||
239 | * action is always the same - should empty the accumulated | 239 | * action is always the same - should empty the accumulated |
240 | * packets from the RX ring. | 240 | * packets from the RX ring. |
241 | */ | 241 | */ |
242 | if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) { | 242 | if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE | |
243 | BIT_DMA_EP_RX_ICR_RX_HTRSH))) { | ||
243 | wil_dbg_irq(wil, "RX done\n"); | 244 | wil_dbg_irq(wil, "RX done\n"); |
244 | 245 | ||
245 | if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH) | 246 | if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH)) |
246 | wil_err_ratelimited(wil, | 247 | wil_err_ratelimited(wil, |
247 | "Received \"Rx buffer is in risk of overflow\" interrupt\n"); | 248 | "Received \"Rx buffer is in risk of overflow\" interrupt\n"); |
248 | 249 | ||
249 | isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | | 250 | isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | |
250 | BIT_DMA_EP_RX_ICR_RX_HTRSH); | 251 | BIT_DMA_EP_RX_ICR_RX_HTRSH); |
251 | if (test_bit(wil_status_reset_done, wil->status)) { | 252 | if (likely(test_bit(wil_status_reset_done, wil->status))) { |
252 | if (test_bit(wil_status_napi_en, wil->status)) { | 253 | if (likely(test_bit(wil_status_napi_en, wil->status))) { |
253 | wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); | 254 | wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); |
254 | need_unmask = false; | 255 | need_unmask = false; |
255 | napi_schedule(&wil->napi_rx); | 256 | napi_schedule(&wil->napi_rx); |
@@ -262,7 +263,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie) | |||
262 | } | 263 | } |
263 | } | 264 | } |
264 | 265 | ||
265 | if (isr) | 266 | if (unlikely(isr)) |
266 | wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); | 267 | wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); |
267 | 268 | ||
268 | /* Rx IRQ will be enabled when NAPI processing finished */ | 269 | /* Rx IRQ will be enabled when NAPI processing finished */ |
@@ -286,19 +287,19 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) | |||
286 | trace_wil6210_irq_tx(isr); | 287 | trace_wil6210_irq_tx(isr); |
287 | wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); | 288 | wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); |
288 | 289 | ||
289 | if (!isr) { | 290 | if (unlikely(!isr)) { |
290 | wil_err(wil, "spurious IRQ: TX\n"); | 291 | wil_err(wil, "spurious IRQ: TX\n"); |
291 | return IRQ_NONE; | 292 | return IRQ_NONE; |
292 | } | 293 | } |
293 | 294 | ||
294 | wil6210_mask_irq_tx(wil); | 295 | wil6210_mask_irq_tx(wil); |
295 | 296 | ||
296 | if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { | 297 | if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) { |
297 | wil_dbg_irq(wil, "TX done\n"); | 298 | wil_dbg_irq(wil, "TX done\n"); |
298 | isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; | 299 | isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; |
299 | /* clear also all VRING interrupts */ | 300 | /* clear also all VRING interrupts */ |
300 | isr &= ~(BIT(25) - 1UL); | 301 | isr &= ~(BIT(25) - 1UL); |
301 | if (test_bit(wil_status_reset_done, wil->status)) { | 302 | if (likely(test_bit(wil_status_reset_done, wil->status))) { |
302 | wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); | 303 | wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); |
303 | need_unmask = false; | 304 | need_unmask = false; |
304 | napi_schedule(&wil->napi_tx); | 305 | napi_schedule(&wil->napi_tx); |
@@ -307,7 +308,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie) | |||
307 | } | 308 | } |
308 | } | 309 | } |
309 | 310 | ||
310 | if (isr) | 311 | if (unlikely(isr)) |
311 | wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); | 312 | wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); |
312 | 313 | ||
313 | /* Tx IRQ will be enabled when NAPI processing finished */ | 314 | /* Tx IRQ will be enabled when NAPI processing finished */ |
@@ -496,11 +497,11 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie) | |||
496 | /** | 497 | /** |
497 | * pseudo_cause is Clear-On-Read, no need to ACK | 498 | * pseudo_cause is Clear-On-Read, no need to ACK |
498 | */ | 499 | */ |
499 | if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)) | 500 | if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff))) |
500 | return IRQ_NONE; | 501 | return IRQ_NONE; |
501 | 502 | ||
502 | /* FIXME: IRQ mask debug */ | 503 | /* FIXME: IRQ mask debug */ |
503 | if (wil6210_debug_irq_mask(wil, pseudo_cause)) | 504 | if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause))) |
504 | return IRQ_NONE; | 505 | return IRQ_NONE; |
505 | 506 | ||
506 | trace_wil6210_irq_pseudo(pseudo_cause); | 507 | trace_wil6210_irq_pseudo(pseudo_cause); |
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 779d8369f9bc..7e119d0d8454 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c | |||
@@ -370,11 +370,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, | |||
370 | 370 | ||
371 | BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); | 371 | BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); |
372 | 372 | ||
373 | if (wil_vring_is_empty(vring)) | 373 | if (unlikely(wil_vring_is_empty(vring))) |
374 | return NULL; | 374 | return NULL; |
375 | 375 | ||
376 | _d = &vring->va[vring->swhead].rx; | 376 | _d = &vring->va[vring->swhead].rx; |
377 | if (!(_d->dma.status & RX_DMA_STATUS_DU)) { | 377 | if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { |
378 | /* it is not error, we just reached end of Rx done area */ | 378 | /* it is not error, we just reached end of Rx done area */ |
379 | return NULL; | 379 | return NULL; |
380 | } | 380 | } |
@@ -394,7 +394,7 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, | |||
394 | wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, | 394 | wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, |
395 | (const void *)d, sizeof(*d), false); | 395 | (const void *)d, sizeof(*d), false); |
396 | 396 | ||
397 | if (dmalen > sz) { | 397 | if (unlikely(dmalen > sz)) { |
398 | wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); | 398 | wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); |
399 | kfree_skb(skb); | 399 | kfree_skb(skb); |
400 | return NULL; | 400 | return NULL; |
@@ -423,14 +423,14 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, | |||
423 | * in Rx descriptor. If type is not data, it is 802.11 frame as is | 423 | * in Rx descriptor. If type is not data, it is 802.11 frame as is |
424 | */ | 424 | */ |
425 | ftype = wil_rxdesc_ftype(d) << 2; | 425 | ftype = wil_rxdesc_ftype(d) << 2; |
426 | if (ftype != IEEE80211_FTYPE_DATA) { | 426 | if (unlikely(ftype != IEEE80211_FTYPE_DATA)) { |
427 | wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); | 427 | wil_dbg_txrx(wil, "Non-data frame ftype 0x%08x\n", ftype); |
428 | /* TODO: process it */ | 428 | /* TODO: process it */ |
429 | kfree_skb(skb); | 429 | kfree_skb(skb); |
430 | return NULL; | 430 | return NULL; |
431 | } | 431 | } |
432 | 432 | ||
433 | if (skb->len < ETH_HLEN) { | 433 | if (unlikely(skb->len < ETH_HLEN)) { |
434 | wil_err(wil, "Short frame, len = %d\n", skb->len); | 434 | wil_err(wil, "Short frame, len = %d\n", skb->len); |
435 | /* TODO: process it (i.e. BAR) */ | 435 | /* TODO: process it (i.e. BAR) */ |
436 | kfree_skb(skb); | 436 | kfree_skb(skb); |
@@ -441,9 +441,9 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil, | |||
441 | * and in case of error drop the packet | 441 | * and in case of error drop the packet |
442 | * higher stack layers will handle retransmission (if required) | 442 | * higher stack layers will handle retransmission (if required) |
443 | */ | 443 | */ |
444 | if (d->dma.status & RX_DMA_STATUS_L4I) { | 444 | if (likely(d->dma.status & RX_DMA_STATUS_L4I)) { |
445 | /* L4 protocol identified, csum calculated */ | 445 | /* L4 protocol identified, csum calculated */ |
446 | if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) | 446 | if (likely((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)) |
447 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 447 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
448 | /* If HW reports bad checksum, let IP stack re-check it | 448 | /* If HW reports bad checksum, let IP stack re-check it |
449 | * For example, HW don't understand Microsoft IP stack that | 449 | * For example, HW don't understand Microsoft IP stack that |
@@ -472,7 +472,7 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) | |||
472 | (next_tail != v->swhead) && (count-- > 0); | 472 | (next_tail != v->swhead) && (count-- > 0); |
473 | v->swtail = next_tail) { | 473 | v->swtail = next_tail) { |
474 | rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); | 474 | rc = wil_vring_alloc_skb(wil, v, v->swtail, headroom); |
475 | if (rc) { | 475 | if (unlikely(rc)) { |
476 | wil_err(wil, "Error %d in wil_rx_refill[%d]\n", | 476 | wil_err(wil, "Error %d in wil_rx_refill[%d]\n", |
477 | rc, v->swtail); | 477 | rc, v->swtail); |
478 | break; | 478 | break; |
@@ -534,7 +534,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota) | |||
534 | struct vring *v = &wil->vring_rx; | 534 | struct vring *v = &wil->vring_rx; |
535 | struct sk_buff *skb; | 535 | struct sk_buff *skb; |
536 | 536 | ||
537 | if (!v->va) { | 537 | if (unlikely(!v->va)) { |
538 | wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); | 538 | wil_err(wil, "Rx IRQ while Rx not yet initialized\n"); |
539 | return; | 539 | return; |
540 | } | 540 | } |
@@ -927,7 +927,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, | |||
927 | if (unlikely(!txdata->enabled)) | 927 | if (unlikely(!txdata->enabled)) |
928 | return -EINVAL; | 928 | return -EINVAL; |
929 | 929 | ||
930 | if (avail < 1 + nr_frags) { | 930 | if (unlikely(avail < 1 + nr_frags)) { |
931 | wil_err_ratelimited(wil, | 931 | wil_err_ratelimited(wil, |
932 | "Tx ring[%2d] full. No space for %d fragments\n", | 932 | "Tx ring[%2d] full. No space for %d fragments\n", |
933 | vring_index, 1 + nr_frags); | 933 | vring_index, 1 + nr_frags); |
@@ -948,7 +948,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, | |||
948 | /* 1-st segment */ | 948 | /* 1-st segment */ |
949 | wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); | 949 | wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); |
950 | /* Process TCP/UDP checksum offloading */ | 950 | /* Process TCP/UDP checksum offloading */ |
951 | if (wil_tx_desc_offload_cksum_set(wil, d, skb)) { | 951 | if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { |
952 | wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", | 952 | wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", |
953 | vring_index); | 953 | vring_index); |
954 | goto dma_error; | 954 | goto dma_error; |
@@ -1051,18 +1051,18 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1051 | int rc; | 1051 | int rc; |
1052 | 1052 | ||
1053 | wil_dbg_txrx(wil, "%s()\n", __func__); | 1053 | wil_dbg_txrx(wil, "%s()\n", __func__); |
1054 | if (!test_bit(wil_status_fwready, wil->status)) { | 1054 | if (unlikely(!test_bit(wil_status_fwready, wil->status))) { |
1055 | if (!pr_once_fw) { | 1055 | if (!pr_once_fw) { |
1056 | wil_err(wil, "FW not ready\n"); | 1056 | wil_err(wil, "FW not ready\n"); |
1057 | pr_once_fw = true; | 1057 | pr_once_fw = true; |
1058 | } | 1058 | } |
1059 | goto drop; | 1059 | goto drop; |
1060 | } | 1060 | } |
1061 | if (!test_bit(wil_status_fwconnected, wil->status)) { | 1061 | if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) { |
1062 | wil_err(wil, "FW not connected\n"); | 1062 | wil_err(wil, "FW not connected\n"); |
1063 | goto drop; | 1063 | goto drop; |
1064 | } | 1064 | } |
1065 | if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) { | 1065 | if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) { |
1066 | wil_err(wil, "Xmit in monitor mode not supported\n"); | 1066 | wil_err(wil, "Xmit in monitor mode not supported\n"); |
1067 | goto drop; | 1067 | goto drop; |
1068 | } | 1068 | } |
@@ -1078,7 +1078,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1078 | else | 1078 | else |
1079 | vring = wil_tx_bcast(wil, skb); | 1079 | vring = wil_tx_bcast(wil, skb); |
1080 | } | 1080 | } |
1081 | if (!vring) { | 1081 | if (unlikely(!vring)) { |
1082 | wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); | 1082 | wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); |
1083 | goto drop; | 1083 | goto drop; |
1084 | } | 1084 | } |
@@ -1086,7 +1086,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1086 | rc = wil_tx_vring(wil, vring, skb); | 1086 | rc = wil_tx_vring(wil, vring, skb); |
1087 | 1087 | ||
1088 | /* do we still have enough room in the vring? */ | 1088 | /* do we still have enough room in the vring? */ |
1089 | if (wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring)) { | 1089 | if (unlikely(wil_vring_avail_tx(vring) < wil_vring_wmark_low(vring))) { |
1090 | netif_tx_stop_all_queues(wil_to_ndev(wil)); | 1090 | netif_tx_stop_all_queues(wil_to_ndev(wil)); |
1091 | wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); | 1091 | wil_dbg_txrx(wil, "netif_tx_stop : ring full\n"); |
1092 | } | 1092 | } |
@@ -1142,12 +1142,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) | |||
1142 | struct wil_net_stats *stats = &wil->sta[cid].stats; | 1142 | struct wil_net_stats *stats = &wil->sta[cid].stats; |
1143 | volatile struct vring_tx_desc *_d; | 1143 | volatile struct vring_tx_desc *_d; |
1144 | 1144 | ||
1145 | if (!vring->va) { | 1145 | if (unlikely(!vring->va)) { |
1146 | wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); | 1146 | wil_err(wil, "Tx irq[%d]: vring not initialized\n", ringid); |
1147 | return 0; | 1147 | return 0; |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | if (!txdata->enabled) { | 1150 | if (unlikely(!txdata->enabled)) { |
1151 | wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); | 1151 | wil_info(wil, "Tx irq[%d]: vring disabled\n", ringid); |
1152 | return 0; | 1152 | return 0; |
1153 | } | 1153 | } |
@@ -1165,7 +1165,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) | |||
1165 | /* TODO: check we are not past head */ | 1165 | /* TODO: check we are not past head */ |
1166 | 1166 | ||
1167 | _d = &vring->va[lf].tx; | 1167 | _d = &vring->va[lf].tx; |
1168 | if (!(_d->dma.status & TX_DMA_STATUS_DU)) | 1168 | if (unlikely(!(_d->dma.status & TX_DMA_STATUS_DU))) |
1169 | break; | 1169 | break; |
1170 | 1170 | ||
1171 | new_swtail = (lf + 1) % vring->size; | 1171 | new_swtail = (lf + 1) % vring->size; |
@@ -1193,7 +1193,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) | |||
1193 | wil_txdesc_unmap(dev, d, ctx); | 1193 | wil_txdesc_unmap(dev, d, ctx); |
1194 | 1194 | ||
1195 | if (skb) { | 1195 | if (skb) { |
1196 | if (d->dma.error == 0) { | 1196 | if (likely(d->dma.error == 0)) { |
1197 | ndev->stats.tx_packets++; | 1197 | ndev->stats.tx_packets++; |
1198 | stats->tx_packets++; | 1198 | stats->tx_packets++; |
1199 | ndev->stats.tx_bytes += skb->len; | 1199 | ndev->stats.tx_bytes += skb->len; |