diff options
Diffstat (limited to 'drivers/net/wireless/ath/wcn36xx/dxe.c')
| -rw-r--r-- | drivers/net/wireless/ath/wcn36xx/dxe.c | 219 |
1 files changed, 129 insertions, 90 deletions
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 2c3b899a88fa..5ab3e31c9ffa 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c | |||
| @@ -78,7 +78,6 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch) | |||
| 78 | if (!cur_ctl) | 78 | if (!cur_ctl) |
| 79 | goto out_fail; | 79 | goto out_fail; |
| 80 | 80 | ||
| 81 | spin_lock_init(&cur_ctl->skb_lock); | ||
| 82 | cur_ctl->ctl_blk_order = i; | 81 | cur_ctl->ctl_blk_order = i; |
| 83 | if (i == 0) { | 82 | if (i == 0) { |
| 84 | ch->head_blk_ctl = cur_ctl; | 83 | ch->head_blk_ctl = cur_ctl; |
| @@ -175,13 +174,12 @@ static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn | |||
| 175 | int i; | 174 | int i; |
| 176 | 175 | ||
| 177 | size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); | 176 | size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc); |
| 178 | wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr, | 177 | wcn_ch->cpu_addr = dma_zalloc_coherent(dev, size, |
| 179 | GFP_KERNEL); | 178 | &wcn_ch->dma_addr, |
| 179 | GFP_KERNEL); | ||
| 180 | if (!wcn_ch->cpu_addr) | 180 | if (!wcn_ch->cpu_addr) |
| 181 | return -ENOMEM; | 181 | return -ENOMEM; |
| 182 | 182 | ||
| 183 | memset(wcn_ch->cpu_addr, 0, size); | ||
| 184 | |||
| 185 | cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr; | 183 | cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr; |
| 186 | cur_ctl = wcn_ch->head_blk_ctl; | 184 | cur_ctl = wcn_ch->head_blk_ctl; |
| 187 | 185 | ||
| @@ -275,12 +273,14 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch) | |||
| 275 | return 0; | 273 | return 0; |
| 276 | } | 274 | } |
| 277 | 275 | ||
| 278 | static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl) | 276 | static int wcn36xx_dxe_fill_skb(struct device *dev, |
| 277 | struct wcn36xx_dxe_ctl *ctl, | ||
| 278 | gfp_t gfp) | ||
| 279 | { | 279 | { |
| 280 | struct wcn36xx_dxe_desc *dxe = ctl->desc; | 280 | struct wcn36xx_dxe_desc *dxe = ctl->desc; |
| 281 | struct sk_buff *skb; | 281 | struct sk_buff *skb; |
| 282 | 282 | ||
| 283 | skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC); | 283 | skb = alloc_skb(WCN36XX_PKT_SIZE, gfp); |
| 284 | if (skb == NULL) | 284 | if (skb == NULL) |
| 285 | return -ENOMEM; | 285 | return -ENOMEM; |
| 286 | 286 | ||
| @@ -307,7 +307,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn, | |||
| 307 | cur_ctl = wcn_ch->head_blk_ctl; | 307 | cur_ctl = wcn_ch->head_blk_ctl; |
| 308 | 308 | ||
| 309 | for (i = 0; i < wcn_ch->desc_num; i++) { | 309 | for (i = 0; i < wcn_ch->desc_num; i++) { |
| 310 | wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl); | 310 | wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL); |
| 311 | cur_ctl = cur_ctl->next; | 311 | cur_ctl = cur_ctl->next; |
| 312 | } | 312 | } |
| 313 | 313 | ||
| @@ -367,9 +367,11 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) | |||
| 367 | spin_lock_irqsave(&ch->lock, flags); | 367 | spin_lock_irqsave(&ch->lock, flags); |
| 368 | ctl = ch->tail_blk_ctl; | 368 | ctl = ch->tail_blk_ctl; |
| 369 | do { | 369 | do { |
| 370 | if (ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD) | 370 | if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD) |
| 371 | break; | 371 | break; |
| 372 | if (ctl->skb) { | 372 | |
| 373 | if (ctl->skb && | ||
| 374 | READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) { | ||
| 373 | dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, | 375 | dma_unmap_single(wcn->dev, ctl->desc->src_addr_l, |
| 374 | ctl->skb->len, DMA_TO_DEVICE); | 376 | ctl->skb->len, DMA_TO_DEVICE); |
| 375 | info = IEEE80211_SKB_CB(ctl->skb); | 377 | info = IEEE80211_SKB_CB(ctl->skb); |
| @@ -377,18 +379,16 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch) | |||
| 377 | /* Keep frame until TX status comes */ | 379 | /* Keep frame until TX status comes */ |
| 378 | ieee80211_free_txskb(wcn->hw, ctl->skb); | 380 | ieee80211_free_txskb(wcn->hw, ctl->skb); |
| 379 | } | 381 | } |
| 380 | spin_lock(&ctl->skb_lock); | 382 | |
| 381 | if (wcn->queues_stopped) { | 383 | if (wcn->queues_stopped) { |
| 382 | wcn->queues_stopped = false; | 384 | wcn->queues_stopped = false; |
| 383 | ieee80211_wake_queues(wcn->hw); | 385 | ieee80211_wake_queues(wcn->hw); |
| 384 | } | 386 | } |
| 385 | spin_unlock(&ctl->skb_lock); | ||
| 386 | 387 | ||
| 387 | ctl->skb = NULL; | 388 | ctl->skb = NULL; |
| 388 | } | 389 | } |
| 389 | ctl = ctl->next; | 390 | ctl = ctl->next; |
| 390 | } while (ctl != ch->head_blk_ctl && | 391 | } while (ctl != ch->head_blk_ctl); |
| 391 | !(ctl->desc->ctrl & WCN36xx_DXE_CTRL_VLD)); | ||
| 392 | 392 | ||
| 393 | ch->tail_blk_ctl = ctl; | 393 | ch->tail_blk_ctl = ctl; |
| 394 | spin_unlock_irqrestore(&ch->lock, flags); | 394 | spin_unlock_irqrestore(&ch->lock, flags); |
| @@ -431,8 +431,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) | |||
| 431 | WCN36XX_INT_MASK_CHAN_TX_H); | 431 | WCN36XX_INT_MASK_CHAN_TX_H); |
| 432 | } | 432 | } |
| 433 | 433 | ||
| 434 | wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n"); | 434 | wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n", |
| 435 | reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); | 435 | int_reason); |
| 436 | |||
| 437 | if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | | ||
| 438 | WCN36XX_CH_STAT_INT_ED_MASK)) | ||
| 439 | reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch); | ||
| 436 | } | 440 | } |
| 437 | 441 | ||
| 438 | if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) { | 442 | if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) { |
| @@ -466,8 +470,12 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev) | |||
| 466 | WCN36XX_INT_MASK_CHAN_TX_L); | 470 | WCN36XX_INT_MASK_CHAN_TX_L); |
| 467 | } | 471 | } |
| 468 | 472 | ||
| 469 | wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n"); | 473 | wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n", |
| 470 | reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); | 474 | int_reason); |
| 475 | |||
| 476 | if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | | ||
| 477 | WCN36XX_CH_STAT_INT_ED_MASK)) | ||
| 478 | reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch); | ||
| 471 | } | 479 | } |
| 472 | 480 | ||
| 473 | return IRQ_HANDLED; | 481 | return IRQ_HANDLED; |
| @@ -477,9 +485,8 @@ static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev) | |||
| 477 | { | 485 | { |
| 478 | struct wcn36xx *wcn = (struct wcn36xx *)dev; | 486 | struct wcn36xx *wcn = (struct wcn36xx *)dev; |
| 479 | 487 | ||
| 480 | disable_irq_nosync(wcn->rx_irq); | ||
| 481 | wcn36xx_dxe_rx_frame(wcn); | 488 | wcn36xx_dxe_rx_frame(wcn); |
| 482 | enable_irq(wcn->rx_irq); | 489 | |
| 483 | return IRQ_HANDLED; | 490 | return IRQ_HANDLED; |
| 484 | } | 491 | } |
| 485 | 492 | ||
| @@ -513,27 +520,53 @@ out_err: | |||
| 513 | } | 520 | } |
| 514 | 521 | ||
| 515 | static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, | 522 | static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, |
| 516 | struct wcn36xx_dxe_ch *ch) | 523 | struct wcn36xx_dxe_ch *ch, |
| 524 | u32 ctrl, | ||
| 525 | u32 en_mask, | ||
| 526 | u32 int_mask, | ||
| 527 | u32 status_reg) | ||
| 517 | { | 528 | { |
| 518 | struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl; | 529 | struct wcn36xx_dxe_desc *dxe; |
| 519 | struct wcn36xx_dxe_desc *dxe = ctl->desc; | 530 | struct wcn36xx_dxe_ctl *ctl; |
| 520 | dma_addr_t dma_addr; | 531 | dma_addr_t dma_addr; |
| 521 | struct sk_buff *skb; | 532 | struct sk_buff *skb; |
| 522 | int ret = 0, int_mask; | 533 | u32 int_reason; |
| 523 | u32 value; | 534 | int ret; |
| 524 | 535 | ||
| 525 | if (ch->ch_type == WCN36XX_DXE_CH_RX_L) { | 536 | wcn36xx_dxe_read_register(wcn, status_reg, &int_reason); |
| 526 | value = WCN36XX_DXE_CTRL_RX_L; | 537 | wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask); |
| 527 | int_mask = WCN36XX_DXE_INT_CH1_MASK; | 538 | |
| 528 | } else { | 539 | if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) { |
| 529 | value = WCN36XX_DXE_CTRL_RX_H; | 540 | wcn36xx_dxe_write_register(wcn, |
| 530 | int_mask = WCN36XX_DXE_INT_CH3_MASK; | 541 | WCN36XX_DXE_0_INT_ERR_CLR, |
| 542 | int_mask); | ||
| 543 | |||
| 544 | wcn36xx_err("DXE IRQ reported error on RX channel\n"); | ||
| 531 | } | 545 | } |
| 532 | 546 | ||
| 533 | while (!(dxe->ctrl & WCN36xx_DXE_CTRL_VLD)) { | 547 | if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) |
| 548 | wcn36xx_dxe_write_register(wcn, | ||
| 549 | WCN36XX_DXE_0_INT_DONE_CLR, | ||
| 550 | int_mask); | ||
| 551 | |||
| 552 | if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) | ||
| 553 | wcn36xx_dxe_write_register(wcn, | ||
| 554 | WCN36XX_DXE_0_INT_ED_CLR, | ||
| 555 | int_mask); | ||
| 556 | |||
| 557 | if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK | | ||
| 558 | WCN36XX_CH_STAT_INT_ED_MASK))) | ||
| 559 | return 0; | ||
| 560 | |||
| 561 | spin_lock(&ch->lock); | ||
| 562 | |||
| 563 | ctl = ch->head_blk_ctl; | ||
| 564 | dxe = ctl->desc; | ||
| 565 | |||
| 566 | while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) { | ||
| 534 | skb = ctl->skb; | 567 | skb = ctl->skb; |
| 535 | dma_addr = dxe->dst_addr_l; | 568 | dma_addr = dxe->dst_addr_l; |
| 536 | ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl); | 569 | ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC); |
| 537 | if (0 == ret) { | 570 | if (0 == ret) { |
| 538 | /* new skb allocation ok. Use the new one and queue | 571 | /* new skb allocation ok. Use the new one and queue |
| 539 | * the old one to network system. | 572 | * the old one to network system. |
| @@ -543,13 +576,16 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn, | |||
| 543 | wcn36xx_rx_skb(wcn, skb); | 576 | wcn36xx_rx_skb(wcn, skb); |
| 544 | } /* else keep old skb not submitted and use it for rx DMA */ | 577 | } /* else keep old skb not submitted and use it for rx DMA */ |
| 545 | 578 | ||
| 546 | dxe->ctrl = value; | 579 | dxe->ctrl = ctrl; |
| 547 | ctl = ctl->next; | 580 | ctl = ctl->next; |
| 548 | dxe = ctl->desc; | 581 | dxe = ctl->desc; |
| 549 | } | 582 | } |
| 550 | wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, int_mask); | 583 | wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask); |
| 551 | 584 | ||
| 552 | ch->head_blk_ctl = ctl; | 585 | ch->head_blk_ctl = ctl; |
| 586 | |||
| 587 | spin_unlock(&ch->lock); | ||
| 588 | |||
| 553 | return 0; | 589 | return 0; |
| 554 | } | 590 | } |
| 555 | 591 | ||
| @@ -560,19 +596,20 @@ void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn) | |||
| 560 | wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); | 596 | wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src); |
| 561 | 597 | ||
| 562 | /* RX_LOW_PRI */ | 598 | /* RX_LOW_PRI */ |
| 563 | if (int_src & WCN36XX_DXE_INT_CH1_MASK) { | 599 | if (int_src & WCN36XX_DXE_INT_CH1_MASK) |
| 564 | wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, | 600 | wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch, |
| 565 | WCN36XX_DXE_INT_CH1_MASK); | 601 | WCN36XX_DXE_CTRL_RX_L, |
| 566 | wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch)); | 602 | WCN36XX_DXE_INT_CH1_MASK, |
| 567 | } | 603 | WCN36XX_INT_MASK_CHAN_RX_L, |
| 604 | WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L); | ||
| 568 | 605 | ||
| 569 | /* RX_HIGH_PRI */ | 606 | /* RX_HIGH_PRI */ |
| 570 | if (int_src & WCN36XX_DXE_INT_CH3_MASK) { | 607 | if (int_src & WCN36XX_DXE_INT_CH3_MASK) |
| 571 | /* Clean up all the INT within this channel */ | 608 | wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch, |
| 572 | wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, | 609 | WCN36XX_DXE_CTRL_RX_H, |
| 573 | WCN36XX_DXE_INT_CH3_MASK); | 610 | WCN36XX_DXE_INT_CH3_MASK, |
| 574 | wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch)); | 611 | WCN36XX_INT_MASK_CHAN_RX_H, |
| 575 | } | 612 | WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H); |
| 576 | 613 | ||
| 577 | if (!int_src) | 614 | if (!int_src) |
| 578 | wcn36xx_warn("No DXE interrupt pending\n"); | 615 | wcn36xx_warn("No DXE interrupt pending\n"); |
| @@ -590,13 +627,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) | |||
| 590 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); | 627 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); |
| 591 | 628 | ||
| 592 | s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; | 629 | s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H; |
| 593 | cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr, | 630 | cpu_addr = dma_zalloc_coherent(wcn->dev, s, |
| 594 | GFP_KERNEL); | 631 | &wcn->mgmt_mem_pool.phy_addr, |
| 632 | GFP_KERNEL); | ||
| 595 | if (!cpu_addr) | 633 | if (!cpu_addr) |
| 596 | goto out_err; | 634 | goto out_err; |
| 597 | 635 | ||
| 598 | wcn->mgmt_mem_pool.virt_addr = cpu_addr; | 636 | wcn->mgmt_mem_pool.virt_addr = cpu_addr; |
| 599 | memset(cpu_addr, 0, s); | ||
| 600 | 637 | ||
| 601 | /* Allocate BD headers for DATA frames */ | 638 | /* Allocate BD headers for DATA frames */ |
| 602 | 639 | ||
| @@ -605,13 +642,13 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn) | |||
| 605 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); | 642 | 16 - (WCN36XX_BD_CHUNK_SIZE % 8); |
| 606 | 643 | ||
| 607 | s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; | 644 | s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L; |
| 608 | cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr, | 645 | cpu_addr = dma_zalloc_coherent(wcn->dev, s, |
| 609 | GFP_KERNEL); | 646 | &wcn->data_mem_pool.phy_addr, |
| 647 | GFP_KERNEL); | ||
| 610 | if (!cpu_addr) | 648 | if (!cpu_addr) |
| 611 | goto out_err; | 649 | goto out_err; |
| 612 | 650 | ||
| 613 | wcn->data_mem_pool.virt_addr = cpu_addr; | 651 | wcn->data_mem_pool.virt_addr = cpu_addr; |
| 614 | memset(cpu_addr, 0, s); | ||
| 615 | 652 | ||
| 616 | return 0; | 653 | return 0; |
| 617 | 654 | ||
| @@ -643,8 +680,8 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, | |||
| 643 | struct sk_buff *skb, | 680 | struct sk_buff *skb, |
| 644 | bool is_low) | 681 | bool is_low) |
| 645 | { | 682 | { |
| 646 | struct wcn36xx_dxe_ctl *ctl = NULL; | 683 | struct wcn36xx_dxe_desc *desc_bd, *desc_skb; |
| 647 | struct wcn36xx_dxe_desc *desc = NULL; | 684 | struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb; |
| 648 | struct wcn36xx_dxe_ch *ch = NULL; | 685 | struct wcn36xx_dxe_ch *ch = NULL; |
| 649 | unsigned long flags; | 686 | unsigned long flags; |
| 650 | int ret; | 687 | int ret; |
| @@ -652,73 +689,75 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn, | |||
| 652 | ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; | 689 | ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch; |
| 653 | 690 | ||
| 654 | spin_lock_irqsave(&ch->lock, flags); | 691 | spin_lock_irqsave(&ch->lock, flags); |
| 655 | ctl = ch->head_blk_ctl; | 692 | ctl_bd = ch->head_blk_ctl; |
| 656 | 693 | ctl_skb = ctl_bd->next; | |
| 657 | spin_lock(&ctl->next->skb_lock); | ||
| 658 | 694 | ||
| 659 | /* | 695 | /* |
| 660 | * If skb is not null that means that we reached the tail of the ring | 696 | * If skb is not null that means that we reached the tail of the ring |
| 661 | * hence ring is full. Stop queues to let mac80211 back off until ring | 697 | * hence ring is full. Stop queues to let mac80211 back off until ring |
| 662 | * has an empty slot again. | 698 | * has an empty slot again. |
| 663 | */ | 699 | */ |
| 664 | if (NULL != ctl->next->skb) { | 700 | if (NULL != ctl_skb->skb) { |
| 665 | ieee80211_stop_queues(wcn->hw); | 701 | ieee80211_stop_queues(wcn->hw); |
| 666 | wcn->queues_stopped = true; | 702 | wcn->queues_stopped = true; |
| 667 | spin_unlock(&ctl->next->skb_lock); | ||
| 668 | spin_unlock_irqrestore(&ch->lock, flags); | 703 | spin_unlock_irqrestore(&ch->lock, flags); |
| 669 | return -EBUSY; | 704 | return -EBUSY; |
| 670 | } | 705 | } |
| 671 | spin_unlock(&ctl->next->skb_lock); | ||
| 672 | 706 | ||
| 673 | ctl->skb = NULL; | 707 | if (unlikely(ctl_skb->bd_cpu_addr)) { |
| 674 | desc = ctl->desc; | 708 | wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); |
| 709 | ret = -EINVAL; | ||
| 710 | goto unlock; | ||
| 711 | } | ||
| 712 | |||
| 713 | desc_bd = ctl_bd->desc; | ||
| 714 | desc_skb = ctl_skb->desc; | ||
| 715 | |||
| 716 | ctl_bd->skb = NULL; | ||
| 675 | 717 | ||
| 676 | /* write buffer descriptor */ | 718 | /* write buffer descriptor */ |
| 677 | memcpy(ctl->bd_cpu_addr, bd, sizeof(*bd)); | 719 | memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd)); |
| 678 | 720 | ||
| 679 | /* Set source address of the BD we send */ | 721 | /* Set source address of the BD we send */ |
| 680 | desc->src_addr_l = ctl->bd_phy_addr; | 722 | desc_bd->src_addr_l = ctl_bd->bd_phy_addr; |
| 681 | 723 | desc_bd->dst_addr_l = ch->dxe_wq; | |
| 682 | desc->dst_addr_l = ch->dxe_wq; | 724 | desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd); |
| 683 | desc->fr_len = sizeof(struct wcn36xx_tx_bd); | ||
| 684 | desc->ctrl = ch->ctrl_bd; | ||
| 685 | 725 | ||
| 686 | wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n"); | 726 | wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n"); |
| 687 | 727 | ||
| 688 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", | 728 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ", |
| 689 | (char *)desc, sizeof(*desc)); | 729 | (char *)desc_bd, sizeof(*desc_bd)); |
| 690 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, | 730 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, |
| 691 | "BD >>> ", (char *)ctl->bd_cpu_addr, | 731 | "BD >>> ", (char *)ctl_bd->bd_cpu_addr, |
| 692 | sizeof(struct wcn36xx_tx_bd)); | 732 | sizeof(struct wcn36xx_tx_bd)); |
| 693 | 733 | ||
| 694 | /* Set source address of the SKB we send */ | 734 | desc_skb->src_addr_l = dma_map_single(wcn->dev, |
| 695 | ctl = ctl->next; | 735 | skb->data, |
| 696 | ctl->skb = skb; | 736 | skb->len, |
| 697 | desc = ctl->desc; | 737 | DMA_TO_DEVICE); |
| 698 | if (ctl->bd_cpu_addr) { | 738 | if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) { |
| 699 | wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n"); | 739 | dev_err(wcn->dev, "unable to DMA map src_addr_l\n"); |
| 700 | ret = -EINVAL; | 740 | ret = -ENOMEM; |
| 701 | goto unlock; | 741 | goto unlock; |
| 702 | } | 742 | } |
| 703 | 743 | ||
| 704 | desc->src_addr_l = dma_map_single(wcn->dev, | 744 | ctl_skb->skb = skb; |
| 705 | ctl->skb->data, | 745 | desc_skb->dst_addr_l = ch->dxe_wq; |
| 706 | ctl->skb->len, | 746 | desc_skb->fr_len = ctl_skb->skb->len; |
| 707 | DMA_TO_DEVICE); | ||
| 708 | |||
| 709 | desc->dst_addr_l = ch->dxe_wq; | ||
| 710 | desc->fr_len = ctl->skb->len; | ||
| 711 | |||
| 712 | /* set dxe descriptor to VALID */ | ||
| 713 | desc->ctrl = ch->ctrl_skb; | ||
| 714 | 747 | ||
| 715 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", | 748 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ", |
| 716 | (char *)desc, sizeof(*desc)); | 749 | (char *)desc_skb, sizeof(*desc_skb)); |
| 717 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", | 750 | wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ", |
| 718 | (char *)ctl->skb->data, ctl->skb->len); | 751 | (char *)ctl_skb->skb->data, ctl_skb->skb->len); |
| 719 | 752 | ||
| 720 | /* Move the head of the ring to the next empty descriptor */ | 753 | /* Move the head of the ring to the next empty descriptor */ |
| 721 | ch->head_blk_ctl = ctl->next; | 754 | ch->head_blk_ctl = ctl_skb->next; |
| 755 | |||
| 756 | /* Commit all previous writes and set descriptors to VALID */ | ||
| 757 | wmb(); | ||
| 758 | desc_skb->ctrl = ch->ctrl_skb; | ||
| 759 | wmb(); | ||
| 760 | desc_bd->ctrl = ch->ctrl_bd; | ||
| 722 | 761 | ||
| 723 | /* | 762 | /* |
| 724 | * When connected and trying to send data frame chip can be in sleep | 763 | * When connected and trying to send data frame chip can be in sleep |
