diff options
author | yfw <fengwei.yin@linaro.org> | 2015-10-25 22:36:22 -0400 |
---|---|---|
committer | Kalle Valo <kvalo@codeaurora.org> | 2015-10-28 14:58:58 -0400 |
commit | 072255241d2e3e9c1e23fc693f0333be72adfe2a (patch) | |
tree | 12ea728f30c78315345bbb3d36009b28cb044c60 /drivers/net/wireless/ath/wcn36xx | |
parent | 8e8e54c490032f15779d7b199548eb0143b70f0f (diff) |
wcn36xx: Remove warning message when dev is NULL for arm64 dma_alloc.
arm64 has requirement that all the dma operations have actual device.
Otherwise, following warnning message shown and dma allocation fails:
WARNING: CPU: 0 PID: 954 at arch/arm64/mm/dma-mapping.c:106 __dma_alloc+0x24c/0x258()
Use an actual device structure for DMA allocation
Modules linked in: wcn36xx wcn36xx_platform
CPU: 0 PID: 954 Comm: ifconfig Not tainted 4.0.0+ #14
Hardware name: Qualcomm Technologies, Inc. MSM 8916 MTP (DT)
Call trace:
[<ffffffc000089904>] dump_backtrace+0x0/0x124
[<ffffffc000089a38>] show_stack+0x10/0x1c
[<ffffffc000627114>] dump_stack+0x80/0xc4
[<ffffffc0000b2e64>] warn_slowpath_common+0x98/0xd0
[<ffffffc0000b2ee8>] warn_slowpath_fmt+0x4c/0x58
[<ffffffc00009487c>] __dma_alloc+0x248/0x258
[<ffffffbffc009270>] wcn36xx_dxe_allocate_mem_pools+0xc4/0x108 [wcn36xx]
[<ffffffbffc0079c4>] wcn36xx_start+0x38/0x240 [wcn36xx]
[<ffffffc0005f161c>] ieee80211_do_open+0x1b0/0x9a4
[<ffffffc0005f1e68>] ieee80211_open+0x58/0x68
[<ffffffc00051693c>] __dev_open+0xb0/0x120
[<ffffffc000516c10>] __dev_change_flags+0x88/0x150
[<ffffffc000516cf4>] dev_change_flags+0x1c/0x5c
[<ffffffc000570950>] devinet_ioctl+0x644/0x6f0
Signed-off-by: Yin, Fengwei <fengwei.yin@linaro.org>
Acked-by: Bjorn Andersson <bjorn.andersson@sonymobile.com>
Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
Diffstat (limited to 'drivers/net/wireless/ath/wcn36xx')
-rw-r--r-- | drivers/net/wireless/ath/wcn36xx/dxe.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 26085f72fc6a..f8dfa05b290a 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c | |||
@@ -170,7 +170,7 @@ void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn) | |||
170 | wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch); | 170 | wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch); |
171 | } | 171 | } |
172 | 172 | ||
173 | static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch) | 173 | static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch) |
174 | { | 174 | { |
175 | struct wcn36xx_dxe_desc *cur_dxe = NULL; | 175 | struct wcn36xx_dxe_desc *cur_dxe = NULL; |
176 | struct wcn36xx_dxe_desc *prev_dxe = NULL; | 176 | struct wcn36xx_dxe_desc *prev_dxe = NULL; |
@@ -179,7 +179,7 @@ static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch) | |||
179 | int i; | 179 | int i; |
180 | 180 | ||
181 | size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); | 181 | size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); |
182 | wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr, | 182 | wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, |
183 | GFP_KERNEL); | 183 | GFP_KERNEL); |
184 | if (!wcn_ch->cpu_addr) | 184 | if (!wcn_ch->cpu_addr) |
185 | return -ENOMEM; | 185 | return -ENOMEM; |
@@ -271,7 +271,7 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) | |||
271 | return 0; | 271 | return 0; |
272 | } | 272 | } |
273 | 273 | ||
274 | static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl) | 274 | static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl) |
275 | { | 275 | { |
276 | struct wcn36xx_dxe_desc *dxe = ctl->desc; | 276 | struct wcn36xx_dxe_desc *dxe = ctl->desc; |
277 | struct sk_buff *skb; | 277 | struct sk_buff *skb; |
@@ -280,7 +280,7 @@ static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl) | |||
280 | if (skb == NULL) | 280 | if (skb == NULL) |
281 | return -ENOMEM; | 281 | return -ENOMEM; |
282 | 282 | ||
283 | dxe->dst_addr_l = dma_map_single(NULL, | 283 | dxe->dst_addr_l = dma_map_single(dev, |
284 | skb_tail_pointer(skb), | 284 | skb_tail_pointer(skb), |
285 | WCN36XX_PKT_SIZE, | 285 | WCN36XX_PKT_SIZE, |
286 | DMA_FROM_DEVICE); | 286 | DMA_FROM_DEVICE); |
@@ -298,7 +298,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, | |||
298 | cur_ctl = wcn_ch->head_blk_ctl; | 298 | cur_ctl = wcn_ch->head_blk_ctl; |
299 | 299 | ||
300 | for (i = 0; i < wcn_ch->desc_num; i++) { | 300 | for (i = 0; i < wcn_ch->desc_num; i++) { |
301 | wcn36xx_dxe_fill_skb(cur_ctl); | 301 | wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl); |
302 | cur_ctl = cur_ctl->next; | 302 | cur_ctl = cur_ctl->next; |
303 | } | 303 | } |
304 | 304 | ||
@@ -361,7 +361,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) | |||
361 | if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK) | 361 | if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK) |
362 | break; | 362 | break; |
363 | if (ctl->skb) { | 363 | if (ctl->skb) { |
364 | dma_unmap_single(NULL, ctl->desc->src_addr_l, | 364 | dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, |
365 | ctl->skb->len, DMA_TO_DEVICE); | 365 | ctl->skb->len, DMA_TO_DEVICE); |
366 | info = IEEE80211_SKB_CB(ctl->skb); | 366 | info = IEEE80211_SKB_CB(ctl->skb); |
367 | if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { | 367 | if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) { |
@@ -478,7 +478,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, | |||
478 | while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) { | 478 | while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) { |
479 | skb = ctl->skb; | 479 | skb = ctl->skb; |
480 | dma_addr = dxe->dst_addr_l; | 480 | dma_addr = dxe->dst_addr_l; |
481 | wcn36xx_dxe_fill_skb(ctl); | 481 | wcn36xx_dxe_fill_skb(wcn->dev, ctl); |
482 | 482 | ||
483 | switch (ch->ch_type) { | 483 | switch (ch->ch_type) { |
484 | case WCN36XX_DXE_CH_RX_L: | 484 | case WCN36XX_DXE_CH_RX_L: |
@@ -495,7 +495,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, | |||
495 | wcn36xx_warn("Unknown channel\n"); | 495 | wcn36xx_warn("Unknown channel\n"); |
496 | } | 496 | } |
497 | 497 | ||
498 | dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE, | 498 | dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE, |
499 | DMA_FROM_DEVICE); | 499 | DMA_FROM_DEVICE); |
500 | wcn36xx_rx_skb(wcn, skb); | 500 | wcn36xx_rx_skb(wcn, skb); |
501 | ctl = ctl->next; | 501 | ctl = ctl->next; |
@@ -544,7 +544,7 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) | |||
544 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); | 544 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); |
545 | 545 | ||
546 | s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; | 546 | s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; |
547 | cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr, | 547 | cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr, |
548 | GFP_KERNEL); | 548 | GFP_KERNEL); |
549 | if (!cpu_addr) | 549 | if (!cpu_addr) |
550 | goto out_err; | 550 | goto out_err; |
@@ -559,7 +559,7 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) | |||
559 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); | 559 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); |
560 | 560 | ||
561 | s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; | 561 | s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; |
562 | cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr, | 562 | cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr, |
563 | GFP_KERNEL); | 563 | GFP_KERNEL); |
564 | if (!cpu_addr) | 564 | if (!cpu_addr) |
565 | goto out_err; | 565 | goto out_err; |
@@ -578,13 +578,13 @@ out_err: | |||
578 | void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn) | 578 | void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn) |
579 | { | 579 | { |
580 | if (wcn->mgmt_mem_pool.virt_addr) | 580 | if (wcn->mgmt_mem_pool.virt_addr) |
581 | dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size * | 581 | dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size * |
582 | WCN36XX_DXE_CH_DESC_NUMB_TX_H, | 582 | WCN36XX_DXE_CH_DESC_NUMB_TX_H, |
583 | wcn->mgmt_mem_pool.virt_addr, | 583 | wcn->mgmt_mem_pool.virt_addr, |
584 | wcn->mgmt_mem_pool.phy_addr); | 584 | wcn->mgmt_mem_pool.phy_addr); |
585 | 585 | ||
586 | if (wcn->data_mem_pool.virt_addr) { | 586 | if (wcn->data_mem_pool.virt_addr) { |
587 | dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size * | 587 | dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size * |
588 | WCN36XX_DXE_CH_DESC_NUMB_TX_L, | 588 | WCN36XX_DXE_CH_DESC_NUMB_TX_L, |
589 | wcn->data_mem_pool.virt_addr, | 589 | wcn->data_mem_pool.virt_addr, |
590 | wcn->data_mem_pool.phy_addr); | 590 | wcn->data_mem_pool.phy_addr); |
@@ -651,7 +651,7 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, | |||
651 | goto unlock; | 651 | goto unlock; |
652 | } | 652 | } |
653 | 653 | ||
654 | desc->src_addr_l = dma_map_single(NULL, | 654 | desc->src_addr_l = dma_map_single(wcn->dev, |
655 | ctl->skb->data, | 655 | ctl->skb->data, |
656 | ctl->skb->len, | 656 | ctl->skb->len, |
657 | DMA_TO_DEVICE); | 657 | DMA_TO_DEVICE); |
@@ -707,7 +707,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) | |||
707 | /***************************************/ | 707 | /***************************************/ |
708 | /* Init descriptors for TX LOW channel */ | 708 | /* Init descriptors for TX LOW channel */ |
709 | /***************************************/ | 709 | /***************************************/ |
710 | wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch); | 710 | wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch); |
711 | wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool); | 711 | wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool); |
712 | 712 | ||
713 | /* Write channel head to a NEXT register */ | 713 | /* Write channel head to a NEXT register */ |
@@ -725,7 +725,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) | |||
725 | /***************************************/ | 725 | /***************************************/ |
726 | /* Init descriptors for TX HIGH channel */ | 726 | /* Init descriptors for TX HIGH channel */ |
727 | /***************************************/ | 727 | /***************************************/ |
728 | wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch); | 728 | wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch); |
729 | wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool); | 729 | wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool); |
730 | 730 | ||
731 | /* Write channel head to a NEXT register */ | 731 | /* Write channel head to a NEXT register */ |
@@ -745,7 +745,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) | |||
745 | /***************************************/ | 745 | /***************************************/ |
746 | /* Init descriptors for RX LOW channel */ | 746 | /* Init descriptors for RX LOW channel */ |
747 | /***************************************/ | 747 | /***************************************/ |
748 | wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch); | 748 | wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch); |
749 | 749 | ||
750 | /* For RX we need to preallocated buffers */ | 750 | /* For RX we need to preallocated buffers */ |
751 | wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); | 751 | wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch); |
@@ -775,7 +775,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) | |||
775 | /***************************************/ | 775 | /***************************************/ |
776 | /* Init descriptors for RX HIGH channel */ | 776 | /* Init descriptors for RX HIGH channel */ |
777 | /***************************************/ | 777 | /***************************************/ |
778 | wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch); | 778 | wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch); |
779 | 779 | ||
780 | /* For RX we need to prealocat buffers */ | 780 | /* For RX we need to prealocat buffers */ |
781 | wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch); | 781 | wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch); |