aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/pci.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c446
1 files changed, 262 insertions, 184 deletions
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e2f9ef50b1bd..dff23d97bed0 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
36module_param(ath10k_target_ps, uint, 0644); 36module_param(ath10k_target_ps, uint, 0644);
37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option"); 37MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38 38
39#define QCA988X_1_0_DEVICE_ID (0xabcd)
40#define QCA988X_2_0_DEVICE_ID (0x003c) 39#define QCA988X_2_0_DEVICE_ID (0x003c)
41 40
42static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = { 41static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43 { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 42 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45 {0} 43 {0}
46}; 44};
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
50 48
51static void ath10k_pci_process_ce(struct ath10k *ar); 49static void ath10k_pci_process_ce(struct ath10k *ar);
52static int ath10k_pci_post_rx(struct ath10k *ar); 50static int ath10k_pci_post_rx(struct ath10k *ar);
53static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, 51static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
54 int num); 52 int num);
55static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info); 53static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
56static void ath10k_pci_stop_ce(struct ath10k *ar); 54static void ath10k_pci_stop_ce(struct ath10k *ar);
57static void ath10k_pci_device_reset(struct ath10k *ar); 55static void ath10k_pci_device_reset(struct ath10k *ar);
58static int ath10k_pci_reset_target(struct ath10k *ar); 56static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
60static void ath10k_pci_stop_intr(struct ath10k *ar); 58static void ath10k_pci_stop_intr(struct ath10k *ar);
61 59
62static const struct ce_attr host_ce_config_wlan[] = { 60static const struct ce_attr host_ce_config_wlan[] = {
63 /* host->target HTC control and raw streams */ 61 /* CE0: host->target HTC control and raw streams */
64 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,}, 62 {
65 /* could be moved to share CE3 */ 63 .flags = CE_ATTR_FLAGS,
66 /* target->host HTT + HTC control */ 64 .src_nentries = 16,
67 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,}, 65 .src_sz_max = 256,
68 /* target->host WMI */ 66 .dest_nentries = 0,
69 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,}, 67 },
70 /* host->target WMI */ 68
71 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,}, 69 /* CE1: target->host HTT + HTC control */
72 /* host->target HTT */ 70 {
73 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0, 71 .flags = CE_ATTR_FLAGS,
74 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,}, 72 .src_nentries = 0,
75 /* unused */ 73 .src_sz_max = 512,
76 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, 74 .dest_nentries = 512,
77 /* Target autonomous hif_memcpy */ 75 },
78 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,}, 76
79 /* ce_diag, the Diagnostic Window */ 77 /* CE2: target->host WMI */
80 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,}, 78 {
79 .flags = CE_ATTR_FLAGS,
80 .src_nentries = 0,
81 .src_sz_max = 2048,
82 .dest_nentries = 32,
83 },
84
85 /* CE3: host->target WMI */
86 {
87 .flags = CE_ATTR_FLAGS,
88 .src_nentries = 32,
89 .src_sz_max = 2048,
90 .dest_nentries = 0,
91 },
92
93 /* CE4: host->target HTT */
94 {
95 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97 .src_sz_max = 256,
98 .dest_nentries = 0,
99 },
100
101 /* CE5: unused */
102 {
103 .flags = CE_ATTR_FLAGS,
104 .src_nentries = 0,
105 .src_sz_max = 0,
106 .dest_nentries = 0,
107 },
108
109 /* CE6: target autonomous hif_memcpy */
110 {
111 .flags = CE_ATTR_FLAGS,
112 .src_nentries = 0,
113 .src_sz_max = 0,
114 .dest_nentries = 0,
115 },
116
117 /* CE7: ce_diag, the Diagnostic Window */
118 {
119 .flags = CE_ATTR_FLAGS,
120 .src_nentries = 2,
121 .src_sz_max = DIAG_TRANSFER_LIMIT,
122 .dest_nentries = 2,
123 },
81}; 124};
82 125
83/* Target firmware's Copy Engine configuration. */ 126/* Target firmware's Copy Engine configuration. */
84static const struct ce_pipe_config target_ce_config_wlan[] = { 127static const struct ce_pipe_config target_ce_config_wlan[] = {
85 /* host->target HTC control and raw streams */ 128 /* CE0: host->target HTC control and raw streams */
86 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,}, 129 {
87 /* target->host HTT + HTC control */ 130 .pipenum = 0,
88 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,}, 131 .pipedir = PIPEDIR_OUT,
89 /* target->host WMI */ 132 .nentries = 32,
90 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,}, 133 .nbytes_max = 256,
91 /* host->target WMI */ 134 .flags = CE_ATTR_FLAGS,
92 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, 135 .reserved = 0,
93 /* host->target HTT */ 136 },
94 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,}, 137
138 /* CE1: target->host HTT + HTC control */
139 {
140 .pipenum = 1,
141 .pipedir = PIPEDIR_IN,
142 .nentries = 32,
143 .nbytes_max = 512,
144 .flags = CE_ATTR_FLAGS,
145 .reserved = 0,
146 },
147
148 /* CE2: target->host WMI */
149 {
150 .pipenum = 2,
151 .pipedir = PIPEDIR_IN,
152 .nentries = 32,
153 .nbytes_max = 2048,
154 .flags = CE_ATTR_FLAGS,
155 .reserved = 0,
156 },
157
158 /* CE3: host->target WMI */
159 {
160 .pipenum = 3,
161 .pipedir = PIPEDIR_OUT,
162 .nentries = 32,
163 .nbytes_max = 2048,
164 .flags = CE_ATTR_FLAGS,
165 .reserved = 0,
166 },
167
168 /* CE4: host->target HTT */
169 {
170 .pipenum = 4,
171 .pipedir = PIPEDIR_OUT,
172 .nentries = 256,
173 .nbytes_max = 256,
174 .flags = CE_ATTR_FLAGS,
175 .reserved = 0,
176 },
177
95 /* NB: 50% of src nentries, since tx has 2 frags */ 178 /* NB: 50% of src nentries, since tx has 2 frags */
96 /* unused */ 179
97 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,}, 180 /* CE5: unused */
98 /* Reserved for target autonomous hif_memcpy */ 181 {
99 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,}, 182 .pipenum = 5,
183 .pipedir = PIPEDIR_OUT,
184 .nentries = 32,
185 .nbytes_max = 2048,
186 .flags = CE_ATTR_FLAGS,
187 .reserved = 0,
188 },
189
190 /* CE6: Reserved for target autonomous hif_memcpy */
191 {
192 .pipenum = 6,
193 .pipedir = PIPEDIR_INOUT,
194 .nentries = 32,
195 .nbytes_max = 4096,
196 .flags = CE_ATTR_FLAGS,
197 .reserved = 0,
198 },
199
100 /* CE7 used only by Host */ 200 /* CE7 used only by Host */
101}; 201};
102 202
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
114 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 214 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
115 unsigned int id; 215 unsigned int id;
116 unsigned int flags; 216 unsigned int flags;
117 struct ce_state *ce_diag; 217 struct ath10k_ce_pipe *ce_diag;
118 /* Host buffer address in CE space */ 218 /* Host buffer address in CE space */
119 u32 ce_data; 219 u32 ce_data;
120 dma_addr_t ce_data_base = 0; 220 dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
278 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 378 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
279 unsigned int id; 379 unsigned int id;
280 unsigned int flags; 380 unsigned int flags;
281 struct ce_state *ce_diag; 381 struct ath10k_ce_pipe *ce_diag;
282 void *data_buf = NULL; 382 void *data_buf = NULL;
283 u32 ce_data; /* Host buffer address in CE space */ 383 u32 ce_data; /* Host buffer address in CE space */
284 dma_addr_t ce_data_base = 0; 384 dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
437 ath10k_warn("Unable to wakeup target\n"); 537 ath10k_warn("Unable to wakeup target\n");
438} 538}
439 539
440void ath10k_do_pci_wake(struct ath10k *ar) 540int ath10k_do_pci_wake(struct ath10k *ar)
441{ 541{
442 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 542 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
443 void __iomem *pci_addr = ar_pci->mem; 543 void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
453 atomic_inc(&ar_pci->keep_awake_count); 553 atomic_inc(&ar_pci->keep_awake_count);
454 554
455 if (ar_pci->verified_awake) 555 if (ar_pci->verified_awake)
456 return; 556 return 0;
457 557
458 for (;;) { 558 for (;;) {
459 if (ath10k_pci_target_is_awake(ar)) { 559 if (ath10k_pci_target_is_awake(ar)) {
460 ar_pci->verified_awake = true; 560 ar_pci->verified_awake = true;
461 break; 561 return 0;
462 } 562 }
463 563
464 if (tot_delay > PCIE_WAKE_TIMEOUT) { 564 if (tot_delay > PCIE_WAKE_TIMEOUT) {
465 ath10k_warn("target takes too long to wake up (awake count %d)\n", 565 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566 PCIE_WAKE_TIMEOUT,
466 atomic_read(&ar_pci->keep_awake_count)); 567 atomic_read(&ar_pci->keep_awake_count));
467 break; 568 return -ETIMEDOUT;
468 } 569 }
469 570
470 udelay(curr_delay); 571 udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
493 * FIXME: Handle OOM properly. 594 * FIXME: Handle OOM properly.
494 */ 595 */
495static inline 596static inline
496struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info) 597struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
497{ 598{
498 struct ath10k_pci_compl *compl = NULL; 599 struct ath10k_pci_compl *compl = NULL;
499 600
@@ -511,39 +612,28 @@ exit:
511} 612}
512 613
513/* Called by lower (CE) layer when a send to Target completes. */ 614/* Called by lower (CE) layer when a send to Target completes. */
514static void ath10k_pci_ce_send_done(struct ce_state *ce_state, 615static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
515 void *transfer_context,
516 u32 ce_data,
517 unsigned int nbytes,
518 unsigned int transfer_id)
519{ 616{
520 struct ath10k *ar = ce_state->ar; 617 struct ath10k *ar = ce_state->ar;
521 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 618 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
522 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; 619 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
523 struct ath10k_pci_compl *compl; 620 struct ath10k_pci_compl *compl;
524 bool process = false; 621 void *transfer_context;
525 622 u32 ce_data;
526 do { 623 unsigned int nbytes;
527 /* 624 unsigned int transfer_id;
528 * For the send completion of an item in sendlist, just
529 * increment num_sends_allowed. The upper layer callback will
530 * be triggered when last fragment is done with send.
531 */
532 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
533 spin_lock_bh(&pipe_info->pipe_lock);
534 pipe_info->num_sends_allowed++;
535 spin_unlock_bh(&pipe_info->pipe_lock);
536 continue;
537 }
538 625
626 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
627 &ce_data, &nbytes,
628 &transfer_id) == 0) {
539 compl = get_free_compl(pipe_info); 629 compl = get_free_compl(pipe_info);
540 if (!compl) 630 if (!compl)
541 break; 631 break;
542 632
543 compl->send_or_recv = HIF_CE_COMPLETE_SEND; 633 compl->state = ATH10K_PCI_COMPL_SEND;
544 compl->ce_state = ce_state; 634 compl->ce_state = ce_state;
545 compl->pipe_info = pipe_info; 635 compl->pipe_info = pipe_info;
546 compl->transfer_context = transfer_context; 636 compl->skb = transfer_context;
547 compl->nbytes = nbytes; 637 compl->nbytes = nbytes;
548 compl->transfer_id = transfer_id; 638 compl->transfer_id = transfer_id;
549 compl->flags = 0; 639 compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
554 spin_lock_bh(&ar_pci->compl_lock); 644 spin_lock_bh(&ar_pci->compl_lock);
555 list_add_tail(&compl->list, &ar_pci->compl_process); 645 list_add_tail(&compl->list, &ar_pci->compl_process);
556 spin_unlock_bh(&ar_pci->compl_lock); 646 spin_unlock_bh(&ar_pci->compl_lock);
557 647 }
558 process = true;
559 } while (ath10k_ce_completed_send_next(ce_state,
560 &transfer_context,
561 &ce_data, &nbytes,
562 &transfer_id) == 0);
563
564 /*
565 * If only some of the items within a sendlist have completed,
566 * don't invoke completion processing until the entire sendlist
567 * has been sent.
568 */
569 if (!process)
570 return;
571 648
572 ath10k_pci_process_ce(ar); 649 ath10k_pci_process_ce(ar);
573} 650}
574 651
575/* Called by lower (CE) layer when data is received from the Target. */ 652/* Called by lower (CE) layer when data is received from the Target. */
576static void ath10k_pci_ce_recv_data(struct ce_state *ce_state, 653static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
577 void *transfer_context, u32 ce_data,
578 unsigned int nbytes,
579 unsigned int transfer_id,
580 unsigned int flags)
581{ 654{
582 struct ath10k *ar = ce_state->ar; 655 struct ath10k *ar = ce_state->ar;
583 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 656 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
584 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id]; 657 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
585 struct ath10k_pci_compl *compl; 658 struct ath10k_pci_compl *compl;
586 struct sk_buff *skb; 659 struct sk_buff *skb;
660 void *transfer_context;
661 u32 ce_data;
662 unsigned int nbytes;
663 unsigned int transfer_id;
664 unsigned int flags;
587 665
588 do { 666 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
667 &ce_data, &nbytes, &transfer_id,
668 &flags) == 0) {
589 compl = get_free_compl(pipe_info); 669 compl = get_free_compl(pipe_info);
590 if (!compl) 670 if (!compl)
591 break; 671 break;
592 672
593 compl->send_or_recv = HIF_CE_COMPLETE_RECV; 673 compl->state = ATH10K_PCI_COMPL_RECV;
594 compl->ce_state = ce_state; 674 compl->ce_state = ce_state;
595 compl->pipe_info = pipe_info; 675 compl->pipe_info = pipe_info;
596 compl->transfer_context = transfer_context; 676 compl->skb = transfer_context;
597 compl->nbytes = nbytes; 677 compl->nbytes = nbytes;
598 compl->transfer_id = transfer_id; 678 compl->transfer_id = transfer_id;
599 compl->flags = flags; 679 compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
608 spin_lock_bh(&ar_pci->compl_lock); 688 spin_lock_bh(&ar_pci->compl_lock);
609 list_add_tail(&compl->list, &ar_pci->compl_process); 689 list_add_tail(&compl->list, &ar_pci->compl_process);
610 spin_unlock_bh(&ar_pci->compl_lock); 690 spin_unlock_bh(&ar_pci->compl_lock);
611 691 }
612 } while (ath10k_ce_completed_recv_next(ce_state,
613 &transfer_context,
614 &ce_data, &nbytes,
615 &transfer_id,
616 &flags) == 0);
617 692
618 ath10k_pci_process_ce(ar); 693 ath10k_pci_process_ce(ar);
619} 694}
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
625{ 700{
626 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf); 701 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
627 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 702 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
628 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]); 703 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
629 struct ce_state *ce_hdl = pipe_info->ce_hdl; 704 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
630 struct ce_sendlist sendlist;
631 unsigned int len; 705 unsigned int len;
632 u32 flags = 0; 706 u32 flags = 0;
633 int ret; 707 int ret;
634 708
635 memset(&sendlist, 0, sizeof(struct ce_sendlist));
636
637 len = min(bytes, nbuf->len); 709 len = min(bytes, nbuf->len);
638 bytes -= len; 710 bytes -= len;
639 711
@@ -648,8 +720,6 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
648 "ath10k tx: data: ", 720 "ath10k tx: data: ",
649 nbuf->data, nbuf->len); 721 nbuf->data, nbuf->len);
650 722
651 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
652
653 /* Make sure we have resources to handle this request */ 723 /* Make sure we have resources to handle this request */
654 spin_lock_bh(&pipe_info->pipe_lock); 724 spin_lock_bh(&pipe_info->pipe_lock);
655 if (!pipe_info->num_sends_allowed) { 725 if (!pipe_info->num_sends_allowed) {
@@ -660,7 +730,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
660 pipe_info->num_sends_allowed--; 730 pipe_info->num_sends_allowed--;
661 spin_unlock_bh(&pipe_info->pipe_lock); 731 spin_unlock_bh(&pipe_info->pipe_lock);
662 732
663 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id); 733 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
734 skb_cb->paddr, len, flags);
664 if (ret) 735 if (ret)
665 ath10k_warn("CE send failed: %p\n", nbuf); 736 ath10k_warn("CE send failed: %p\n", nbuf);
666 737
@@ -670,7 +741,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
670static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 741static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
671{ 742{
672 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
673 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]); 744 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
674 int ret; 745 int ret;
675 746
676 spin_lock_bh(&pipe_info->pipe_lock); 747 spin_lock_bh(&pipe_info->pipe_lock);
@@ -764,9 +835,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
764static int ath10k_pci_start_ce(struct ath10k *ar) 835static int ath10k_pci_start_ce(struct ath10k *ar)
765{ 836{
766 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 837 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
767 struct ce_state *ce_diag = ar_pci->ce_diag; 838 struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
768 const struct ce_attr *attr; 839 const struct ce_attr *attr;
769 struct hif_ce_pipe_info *pipe_info; 840 struct ath10k_pci_pipe *pipe_info;
770 struct ath10k_pci_compl *compl; 841 struct ath10k_pci_compl *compl;
771 int i, pipe_num, completions, disable_interrupts; 842 int i, pipe_num, completions, disable_interrupts;
772 843
@@ -805,15 +876,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
805 continue; 876 continue;
806 877
807 for (i = 0; i < completions; i++) { 878 for (i = 0; i < completions; i++) {
808 compl = kmalloc(sizeof(struct ath10k_pci_compl), 879 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
809 GFP_KERNEL);
810 if (!compl) { 880 if (!compl) {
811 ath10k_warn("No memory for completion state\n"); 881 ath10k_warn("No memory for completion state\n");
812 ath10k_pci_stop_ce(ar); 882 ath10k_pci_stop_ce(ar);
813 return -ENOMEM; 883 return -ENOMEM;
814 } 884 }
815 885
816 compl->send_or_recv = HIF_CE_COMPLETE_FREE; 886 compl->state = ATH10K_PCI_COMPL_FREE;
817 list_add_tail(&compl->list, &pipe_info->compl_free); 887 list_add_tail(&compl->list, &pipe_info->compl_free);
818 } 888 }
819 } 889 }
@@ -840,7 +910,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
840 * their associated resources */ 910 * their associated resources */
841 spin_lock_bh(&ar_pci->compl_lock); 911 spin_lock_bh(&ar_pci->compl_lock);
842 list_for_each_entry(compl, &ar_pci->compl_process, list) { 912 list_for_each_entry(compl, &ar_pci->compl_process, list) {
843 skb = (struct sk_buff *)compl->transfer_context; 913 skb = compl->skb;
844 ATH10K_SKB_CB(skb)->is_aborted = true; 914 ATH10K_SKB_CB(skb)->is_aborted = true;
845 } 915 }
846 spin_unlock_bh(&ar_pci->compl_lock); 916 spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +920,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
850{ 920{
851 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 921 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
852 struct ath10k_pci_compl *compl, *tmp; 922 struct ath10k_pci_compl *compl, *tmp;
853 struct hif_ce_pipe_info *pipe_info; 923 struct ath10k_pci_pipe *pipe_info;
854 struct sk_buff *netbuf; 924 struct sk_buff *netbuf;
855 int pipe_num; 925 int pipe_num;
856 926
@@ -861,7 +931,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
861 931
862 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) { 932 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
863 list_del(&compl->list); 933 list_del(&compl->list);
864 netbuf = (struct sk_buff *)compl->transfer_context; 934 netbuf = compl->skb;
865 dev_kfree_skb_any(netbuf); 935 dev_kfree_skb_any(netbuf);
866 kfree(compl); 936 kfree(compl);
867 } 937 }
@@ -912,12 +982,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
912 list_del(&compl->list); 982 list_del(&compl->list);
913 spin_unlock_bh(&ar_pci->compl_lock); 983 spin_unlock_bh(&ar_pci->compl_lock);
914 984
915 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) { 985 switch (compl->state) {
986 case ATH10K_PCI_COMPL_SEND:
916 cb->tx_completion(ar, 987 cb->tx_completion(ar,
917 compl->transfer_context, 988 compl->skb,
918 compl->transfer_id); 989 compl->transfer_id);
919 send_done = 1; 990 send_done = 1;
920 } else { 991 break;
992 case ATH10K_PCI_COMPL_RECV:
921 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1); 993 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
922 if (ret) { 994 if (ret) {
923 ath10k_warn("Unable to post recv buffer for pipe: %d\n", 995 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +997,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
925 break; 997 break;
926 } 998 }
927 999
928 skb = (struct sk_buff *)compl->transfer_context; 1000 skb = compl->skb;
929 nbytes = compl->nbytes; 1001 nbytes = compl->nbytes;
930 1002
931 ath10k_dbg(ATH10K_DBG_PCI, 1003 ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,9 +1016,17 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
944 nbytes, 1016 nbytes,
945 skb->len + skb_tailroom(skb)); 1017 skb->len + skb_tailroom(skb));
946 } 1018 }
1019 break;
1020 case ATH10K_PCI_COMPL_FREE:
1021 ath10k_warn("free completion cannot be processed\n");
1022 break;
1023 default:
1024 ath10k_warn("invalid completion state (%d)\n",
1025 compl->state);
1026 break;
947 } 1027 }
948 1028
949 compl->send_or_recv = HIF_CE_COMPLETE_FREE; 1029 compl->state = ATH10K_PCI_COMPL_FREE;
950 1030
951 /* 1031 /*
952 * Add completion back to the pipe's free list. 1032 * Add completion back to the pipe's free list.
@@ -1037,12 +1117,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1037 &dl_is_polled); 1117 &dl_is_polled);
1038} 1118}
1039 1119
1040static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info, 1120static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1041 int num) 1121 int num)
1042{ 1122{
1043 struct ath10k *ar = pipe_info->hif_ce_state; 1123 struct ath10k *ar = pipe_info->hif_ce_state;
1044 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1124 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1045 struct ce_state *ce_state = pipe_info->ce_hdl; 1125 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1046 struct sk_buff *skb; 1126 struct sk_buff *skb;
1047 dma_addr_t ce_data; 1127 dma_addr_t ce_data;
1048 int i, ret = 0; 1128 int i, ret = 0;
@@ -1097,7 +1177,7 @@ err:
1097static int ath10k_pci_post_rx(struct ath10k *ar) 1177static int ath10k_pci_post_rx(struct ath10k *ar)
1098{ 1178{
1099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1179 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1100 struct hif_ce_pipe_info *pipe_info; 1180 struct ath10k_pci_pipe *pipe_info;
1101 const struct ce_attr *attr; 1181 const struct ce_attr *attr;
1102 int pipe_num, ret = 0; 1182 int pipe_num, ret = 0;
1103 1183
@@ -1147,11 +1227,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
1147 return 0; 1227 return 0;
1148} 1228}
1149 1229
1150static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) 1230static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1151{ 1231{
1152 struct ath10k *ar; 1232 struct ath10k *ar;
1153 struct ath10k_pci *ar_pci; 1233 struct ath10k_pci *ar_pci;
1154 struct ce_state *ce_hdl; 1234 struct ath10k_ce_pipe *ce_hdl;
1155 u32 buf_sz; 1235 u32 buf_sz;
1156 struct sk_buff *netbuf; 1236 struct sk_buff *netbuf;
1157 u32 ce_data; 1237 u32 ce_data;
@@ -1179,11 +1259,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1179 } 1259 }
1180} 1260}
1181 1261
1182static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info) 1262static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1183{ 1263{
1184 struct ath10k *ar; 1264 struct ath10k *ar;
1185 struct ath10k_pci *ar_pci; 1265 struct ath10k_pci *ar_pci;
1186 struct ce_state *ce_hdl; 1266 struct ath10k_ce_pipe *ce_hdl;
1187 struct sk_buff *netbuf; 1267 struct sk_buff *netbuf;
1188 u32 ce_data; 1268 u32 ce_data;
1189 unsigned int nbytes; 1269 unsigned int nbytes;
@@ -1206,15 +1286,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1206 1286
1207 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf, 1287 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1208 &ce_data, &nbytes, &id) == 0) { 1288 &ce_data, &nbytes, &id) == 0) {
1209 if (netbuf != CE_SENDLIST_ITEM_CTXT) 1289 /*
1210 /* 1290 * Indicate the completion to higer layer to free
1211 * Indicate the completion to higer layer to free 1291 * the buffer
1212 * the buffer 1292 */
1213 */ 1293 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1214 ATH10K_SKB_CB(netbuf)->is_aborted = true; 1294 ar_pci->msg_callbacks_current.tx_completion(ar,
1215 ar_pci->msg_callbacks_current.tx_completion(ar, 1295 netbuf,
1216 netbuf, 1296 id);
1217 id);
1218 } 1297 }
1219} 1298}
1220 1299
@@ -1232,7 +1311,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1232 int pipe_num; 1311 int pipe_num;
1233 1312
1234 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { 1313 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1235 struct hif_ce_pipe_info *pipe_info; 1314 struct ath10k_pci_pipe *pipe_info;
1236 1315
1237 pipe_info = &ar_pci->pipe_info[pipe_num]; 1316 pipe_info = &ar_pci->pipe_info[pipe_num];
1238 ath10k_pci_rx_pipe_cleanup(pipe_info); 1317 ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1322,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1243static void ath10k_pci_ce_deinit(struct ath10k *ar) 1322static void ath10k_pci_ce_deinit(struct ath10k *ar)
1244{ 1323{
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1246 struct hif_ce_pipe_info *pipe_info; 1325 struct ath10k_pci_pipe *pipe_info;
1247 int pipe_num; 1326 int pipe_num;
1248 1327
1249 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) { 1328 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1372,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1293 void *resp, u32 *resp_len) 1372 void *resp, u32 *resp_len)
1294{ 1373{
1295 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1374 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1296 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl; 1375 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1297 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl; 1376 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1377 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1378 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1298 dma_addr_t req_paddr = 0; 1379 dma_addr_t req_paddr = 0;
1299 dma_addr_t resp_paddr = 0; 1380 dma_addr_t resp_paddr = 0;
1300 struct bmi_xfer xfer = {}; 1381 struct bmi_xfer xfer = {};
@@ -1378,13 +1459,16 @@ err_dma:
1378 return ret; 1459 return ret;
1379} 1460}
1380 1461
1381static void ath10k_pci_bmi_send_done(struct ce_state *ce_state, 1462static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1382 void *transfer_context,
1383 u32 data,
1384 unsigned int nbytes,
1385 unsigned int transfer_id)
1386{ 1463{
1387 struct bmi_xfer *xfer = transfer_context; 1464 struct bmi_xfer *xfer;
1465 u32 ce_data;
1466 unsigned int nbytes;
1467 unsigned int transfer_id;
1468
1469 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1470 &nbytes, &transfer_id))
1471 return;
1388 1472
1389 if (xfer->wait_for_resp) 1473 if (xfer->wait_for_resp)
1390 return; 1474 return;
@@ -1392,14 +1476,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1392 complete(&xfer->done); 1476 complete(&xfer->done);
1393} 1477}
1394 1478
1395static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state, 1479static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1396 void *transfer_context,
1397 u32 data,
1398 unsigned int nbytes,
1399 unsigned int transfer_id,
1400 unsigned int flags)
1401{ 1480{
1402 struct bmi_xfer *xfer = transfer_context; 1481 struct bmi_xfer *xfer;
1482 u32 ce_data;
1483 unsigned int nbytes;
1484 unsigned int transfer_id;
1485 unsigned int flags;
1486
1487 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1488 &nbytes, &transfer_id, &flags))
1489 return;
1403 1490
1404 if (!xfer->wait_for_resp) { 1491 if (!xfer->wait_for_resp) {
1405 ath10k_warn("unexpected: BMI data received; ignoring\n"); 1492 ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1766,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
1679static int ath10k_pci_ce_init(struct ath10k *ar) 1766static int ath10k_pci_ce_init(struct ath10k *ar)
1680{ 1767{
1681 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1768 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1682 struct hif_ce_pipe_info *pipe_info; 1769 struct ath10k_pci_pipe *pipe_info;
1683 const struct ce_attr *attr; 1770 const struct ce_attr *attr;
1684 int pipe_num; 1771 int pipe_num;
1685 1772
@@ -1895,7 +1982,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1895 1982
1896static void ath10k_pci_ce_tasklet(unsigned long ptr) 1983static void ath10k_pci_ce_tasklet(unsigned long ptr)
1897{ 1984{
1898 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr; 1985 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1899 struct ath10k_pci *ar_pci = pipe->ar_pci; 1986 struct ath10k_pci *ar_pci = pipe->ar_pci;
1900 1987
1901 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); 1988 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2299,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
2212 2299
2213static void ath10k_pci_device_reset(struct ath10k *ar) 2300static void ath10k_pci_device_reset(struct ath10k *ar)
2214{ 2301{
2215 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2216 void __iomem *mem = ar_pci->mem;
2217 int i; 2302 int i;
2218 u32 val; 2303 u32 val;
2219 2304
2220 if (!SOC_GLOBAL_RESET_ADDRESS) 2305 if (!SOC_GLOBAL_RESET_ADDRESS)
2221 return; 2306 return;
2222 2307
2223 if (!mem) 2308 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2224 return;
2225
2226 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2227 PCIE_SOC_WAKE_V_MASK); 2309 PCIE_SOC_WAKE_V_MASK);
2228 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 2310 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2229 if (ath10k_pci_target_is_awake(ar)) 2311 if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2314,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
2232 } 2314 }
2233 2315
2234 /* Put Target, including PCIe, into RESET. */ 2316 /* Put Target, including PCIe, into RESET. */
2235 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS); 2317 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2236 val |= 1; 2318 val |= 1;
2237 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); 2319 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2238 2320
2239 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 2321 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2240 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & 2322 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2241 RTC_STATE_COLD_RESET_MASK) 2323 RTC_STATE_COLD_RESET_MASK)
2242 break; 2324 break;
2243 msleep(1); 2325 msleep(1);
@@ -2245,16 +2327,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
2245 2327
2246 /* Pull Target, including PCIe, out of RESET. */ 2328 /* Pull Target, including PCIe, out of RESET. */
2247 val &= ~1; 2329 val &= ~1;
2248 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val); 2330 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2249 2331
2250 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) { 2332 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2251 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) & 2333 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2252 RTC_STATE_COLD_RESET_MASK)) 2334 RTC_STATE_COLD_RESET_MASK))
2253 break; 2335 break;
2254 msleep(1); 2336 msleep(1);
2255 } 2337 }
2256 2338
2257 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET); 2339 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2258} 2340}
2259 2341
2260static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci) 2342static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2349,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2267 2349
2268 switch (i) { 2350 switch (i) {
2269 case ATH10K_PCI_FEATURE_MSI_X: 2351 case ATH10K_PCI_FEATURE_MSI_X:
2270 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n"); 2352 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2271 break;
2272 case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
2273 ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2274 break; 2353 break;
2275 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE: 2354 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2276 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n"); 2355 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2277 break; 2356 break;
2278 } 2357 }
2279 } 2358 }
@@ -2286,7 +2365,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2286 int ret = 0; 2365 int ret = 0;
2287 struct ath10k *ar; 2366 struct ath10k *ar;
2288 struct ath10k_pci *ar_pci; 2367 struct ath10k_pci *ar_pci;
2289 u32 lcr_val; 2368 u32 lcr_val, chip_id;
2290 2369
2291 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__); 2370 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2292 2371
@@ -2298,9 +2377,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2298 ar_pci->dev = &pdev->dev; 2377 ar_pci->dev = &pdev->dev;
2299 2378
2300 switch (pci_dev->device) { 2379 switch (pci_dev->device) {
2301 case QCA988X_1_0_DEVICE_ID:
2302 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
2303 break;
2304 case QCA988X_2_0_DEVICE_ID: 2380 case QCA988X_2_0_DEVICE_ID:
2305 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features); 2381 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2306 break; 2382 break;
@@ -2322,10 +2398,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2322 goto err_ar_pci; 2398 goto err_ar_pci;
2323 } 2399 }
2324 2400
2325 /* Enable QCA988X_1.0 HW workarounds */
2326 if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
2327 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2328
2329 ar_pci->ar = ar; 2401 ar_pci->ar = ar;
2330 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS; 2402 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2331 atomic_set(&ar_pci->keep_awake_count, 0); 2403 atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2467,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
2395 2467
2396 spin_lock_init(&ar_pci->ce_lock); 2468 spin_lock_init(&ar_pci->ce_lock);
2397 2469
2398 ar_pci->cacheline_sz = dma_get_cache_alignment(); 2470 ret = ath10k_do_pci_wake(ar);
2471 if (ret) {
2472 ath10k_err("Failed to get chip id: %d\n", ret);
2473 return ret;
2474 }
2475
2476 chip_id = ath10k_pci_read32(ar,
2477 RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
2478
2479 ath10k_do_pci_sleep(ar);
2480
2481 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2399 2482
2400 ret = ath10k_core_register(ar); 2483 ret = ath10k_core_register(ar, chip_id);
2401 if (ret) { 2484 if (ret) {
2402 ath10k_err("could not register driver core (%d)\n", ret); 2485 ath10k_err("could not register driver core (%d)\n", ret);
2403 goto err_iomap; 2486 goto err_iomap;
@@ -2414,7 +2497,6 @@ err_region:
2414err_device: 2497err_device:
2415 pci_disable_device(pdev); 2498 pci_disable_device(pdev);
2416err_ar: 2499err_ar:
2417 pci_set_drvdata(pdev, NULL);
2418 ath10k_core_destroy(ar); 2500 ath10k_core_destroy(ar);
2419err_ar_pci: 2501err_ar_pci:
2420 /* call HIF PCI free here */ 2502 /* call HIF PCI free here */
@@ -2442,7 +2524,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
2442 2524
2443 ath10k_core_unregister(ar); 2525 ath10k_core_unregister(ar);
2444 2526
2445 pci_set_drvdata(pdev, NULL);
2446 pci_iounmap(pdev, ar_pci->mem); 2527 pci_iounmap(pdev, ar_pci->mem);
2447 pci_release_region(pdev, BAR_NUM); 2528 pci_release_region(pdev, BAR_NUM);
2448 pci_clear_master(pdev); 2529 pci_clear_master(pdev);
@@ -2483,9 +2564,6 @@ module_exit(ath10k_pci_exit);
2483MODULE_AUTHOR("Qualcomm Atheros"); 2564MODULE_AUTHOR("Qualcomm Atheros");
2484MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 2565MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2485MODULE_LICENSE("Dual BSD/GPL"); 2566MODULE_LICENSE("Dual BSD/GPL");
2486MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2487MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2488MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2489MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 2567MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2490MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE); 2568MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2491MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 2569MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);