aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2015-08-07 07:53:41 -0400
committerTakashi Iwai <tiwai@suse.de>2015-08-07 07:53:41 -0400
commit6ac7ada210a8d23a56fbf18b6e1e00528844565c (patch)
treef62352a9b08e560f41cd4008ec0029b6b026d3a5 /drivers/net/wireless
parent73851b36fe73819f8c201971e913324d4846a7ea (diff)
parentd00a9e02178401433fc386e69c936f2039f07b57 (diff)
Merge tag 'asoc-fix-v4.2-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v4.2 There are a couple of small driver specific fixes here but the overwhelming bulk of these changes are fixes to the topology ABI that has been newly introduced in v4.2. Once this makes it into a release we will have to firm this up but for now getting enhancements in before they've made it into a release is the most expedient thing.
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c52
12 files changed, 144 insertions, 410 deletions
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
279 return; 279 return;
280 case AR9300_DEVID_QCA956X: 280 case AR9300_DEVID_QCA956X:
281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
282 return;
282 } 283 }
283 284
284 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_QUEUE_MASK 255 438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8 439#define RX_QUEUE_SIZE_LOG 8
440 440
441/*
442 * RX related structures and functions
443 */
444#define RX_FREE_BUFFERS 64
445#define RX_LOW_WATERMARK 8
446
441/** 447/**
442 * struct iwl_rb_status - reserve buffer status 448 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers 449 * host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
540 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542 542
543 /* The byte order is little endian 16 bit, meaning 214365 */ 543 /*
544 data->hw_addr[0] = hw_addr[1]; 544 * Store the MAC address from MAO section.
545 data->hw_addr[1] = hw_addr[0]; 545 * No byte swapping is required in MAO section
546 data->hw_addr[2] = hw_addr[3]; 546 */
547 data->hw_addr[3] = hw_addr[2]; 547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548 data->hw_addr[4] = hw_addr[5];
549 data->hw_addr[5] = hw_addr[4];
550 548
551 /* 549 /*
552 * Force the use of the OTP MAC address in case of reserved MAC 550 * Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
660 * iwl_umac_scan_flags 660 * iwl_umac_scan_flags
661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
662 * can be preempted by other scan requests with higher priority. 662 * can be preempted by other scan requests with higher priority.
663 * The low priority scan is aborted. 663 * The low priority scan will be resumed when the higher proirity scan is
664 * completed.
664 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
665 * when scan starts. 666 * when scan starts.
666 */ 667 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5000bfcded61 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1109 cmd->uid = cpu_to_le32(uid); 1109 cmd->uid = cpu_to_le32(uid);
1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1111 1111
1112 if (type == IWL_MVM_SCAN_SCHED)
1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1114
1112 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
1113 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1114 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1402 u8 sta_id; 1402 u8 sta_id;
1403 int ret; 1403 int ret;
1404 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1404 1405
1405 lockdep_assert_held(&mvm->mutex); 1406 lockdep_assert_held(&mvm->mutex);
1406 1407
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1467end: 1468end:
1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1469 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1470 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1470 sta->addr, ret); 1471 sta ? sta->addr : zero_addr, ret);
1471 return ret; 1472 return ret;
1472} 1473}
1473 1474
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
86{ 86{
87 lockdep_assert_held(&mvm->time_event_lock); 87 lockdep_assert_held(&mvm->time_event_lock);
88 88
89 if (te_data->id == TE_MAX) 89 if (!te_data->vif)
90 return; 90 return;
91 91
92 list_del(&te_data->list); 92 list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
252 252
253 if (info->band == IEEE80211_BAND_2GHZ && 253 if (info->band == IEEE80211_BAND_2GHZ &&
254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
255 rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
256 else 256 else
257 rate_flags = 257 rate_flags =
258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
377 379
378/* 7265 Series */ 380/* 7265 Series */
379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 428 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 429 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 433 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-op-mode.h" 45#include "iwl-op-mode.h"
46 46
47/*
48 * RX related structures and functions
49 */
50#define RX_NUM_QUEUES 1
51#define RX_POST_REQ_ALLOC 2
52#define RX_CLAIM_REQ_ALLOC 8
53#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
54#define RX_LOW_WATERMARK 8
55
56struct iwl_host_cmd; 47struct iwl_host_cmd;
57 48
58/*This file includes the declaration that are internal to the 49/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
86 * struct iwl_rxq - Rx queue 77 * struct iwl_rxq - Rx queue
87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
80 * @pool:
81 * @queue:
89 * @read: Shared index to newest available Rx buffer 82 * @read: Shared index to newest available Rx buffer
90 * @write: Shared index to oldest written Rx packet 83 * @write: Shared index to oldest written Rx packet
91 * @free_count: Number of pre-allocated buffers in rx_free 84 * @free_count: Number of pre-allocated buffers in rx_free
92 * @used_count: Number of RBDs handled to allocator to use for allocation
93 * @write_actual: 85 * @write_actual:
94 * @rx_free: list of RBDs with allocated RB ready for use 86 * @rx_free: list of free SKBs for use
95 * @rx_used: list of RBDs with no RB attached 87 * @rx_used: List of Rx buffers with no SKB
96 * @need_update: flag to indicate we need to update read/write index 88 * @need_update: flag to indicate we need to update read/write index
97 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts: driver's pointer to receive buffer status
98 * @rb_stts_dma: bus address of receive buffer status 90 * @rb_stts_dma: bus address of receive buffer status
99 * @lock: 91 * @lock:
100 * @pool: initial pool of iwl_rx_mem_buffer for the queue
101 * @queue: actual rx queue
102 * 92 *
103 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
104 */ 94 */
105struct iwl_rxq { 95struct iwl_rxq {
106 __le32 *bd; 96 __le32 *bd;
107 dma_addr_t bd_dma; 97 dma_addr_t bd_dma;
98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
108 u32 read; 100 u32 read;
109 u32 write; 101 u32 write;
110 u32 free_count; 102 u32 free_count;
111 u32 used_count;
112 u32 write_actual; 103 u32 write_actual;
113 struct list_head rx_free; 104 struct list_head rx_free;
114 struct list_head rx_used; 105 struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
116 struct iwl_rb_status *rb_stts; 107 struct iwl_rb_status *rb_stts;
117 dma_addr_t rb_stts_dma; 108 dma_addr_t rb_stts_dma;
118 spinlock_t lock; 109 spinlock_t lock;
119 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
120 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
121};
122
123/**
124 * struct iwl_rb_allocator - Rx allocator
125 * @pool: initial pool of allocator
126 * @req_pending: number of requests the allcator had not processed yet
127 * @req_ready: number of requests honored and ready for claiming
128 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
129 * the queue. This is a list of &struct iwl_rx_mem_buffer
130 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
131 * of &struct iwl_rx_mem_buffer
132 * @lock: protects the rbd_allocated and rbd_empty lists
133 * @alloc_wq: work queue for background calls
134 * @rx_alloc: work struct for background calls
135 */
136struct iwl_rb_allocator {
137 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
138 atomic_t req_pending;
139 atomic_t req_ready;
140 struct list_head rbd_allocated;
141 struct list_head rbd_empty;
142 spinlock_t lock;
143 struct workqueue_struct *alloc_wq;
144 struct work_struct rx_alloc;
145}; 110};
146 111
147struct iwl_dma_ptr { 112struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
285/** 250/**
286 * struct iwl_trans_pcie - PCIe transport specific data 251 * struct iwl_trans_pcie - PCIe transport specific data
287 * @rxq: all the RX queue data 252 * @rxq: all the RX queue data
288 * @rba: allocator for RX replenishing 253 * @rx_replenish: work that will be called when buffers need to be allocated
289 * @drv - pointer to iwl_drv 254 * @drv - pointer to iwl_drv
290 * @trans: pointer to the generic transport area 255 * @trans: pointer to the generic transport area
291 * @scd_base_addr: scheduler sram base address in SRAM 256 * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
308 */ 273 */
309struct iwl_trans_pcie { 274struct iwl_trans_pcie {
310 struct iwl_rxq rxq; 275 struct iwl_rxq rxq;
311 struct iwl_rb_allocator rba; 276 struct work_struct rx_replenish;
312 struct iwl_trans *trans; 277 struct iwl_trans *trans;
313 struct iwl_drv *drv; 278 struct iwl_drv *drv;
314 279
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
74 * resets the Rx queue buffers with new memory. 74 * resets the Rx queue buffers with new memory.
75 * 75 *
76 * The management in the driver is as follows: 76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * When the interrupt handler is called, the request is processed. 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * The page is either stolen - transferred to the upper layer 79 * to replenish the iwl->rxq->rx_free.
80 * or reused - added immediately to the iwl->rxq->rx_free list. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * + When the page is stolen - the driver updates the matching queue's used 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * count, detaches the RBD and transfers it to the queue used list. 82 * 'processed' and 'read' driver indexes as well)
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack, 83 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated. 84 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free, 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared. 88 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 * 89 *
@@ -105,32 +92,18 @@
105 * 92 *
106 * iwl_rxq_alloc() Allocates rx_free 93 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock. 95 * iwl_pcie_rxq_restock
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates 97 * queue, updates firmware pointers, and updates
112 * the WRITE index. 98 * the WRITE index. If insufficient rx_free buffers
113 * iwl_pcie_rx_allocator() Background work for allocating pages. 99 * are available, schedules iwl_pcie_rx_replenish
114 * 100 *
115 * -- enable interrupts -- 101 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool. 103 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used. 104 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty 105 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots. 106 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ... 107 * ...
135 * 108 *
136 */ 109 */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
267 rxq->free_count--; 240 rxq->free_count--;
268 } 241 }
269 spin_unlock(&rxq->lock); 242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
270 247
271 /* If we've added more space for the firmware to place data, tell it. 248 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */ 249 * Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
278} 255}
279 256
280/* 257/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
285{
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 struct iwl_rxq *rxq = &trans_pcie->rxq;
288 struct page *page;
289 gfp_t gfp_mask = GFP_KERNEL;
290
291 if (rxq->free_count > RX_LOW_WATERMARK)
292 gfp_mask |= __GFP_NOWARN;
293
294 if (trans_pcie->rx_page_order > 0)
295 gfp_mask |= __GFP_COMP;
296
297 /* Alloc a new receive buffer */
298 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
299 if (!page) {
300 if (net_ratelimit())
301 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
302 trans_pcie->rx_page_order);
303 /* Issue an error if the hardware has consumed more than half
304 * of its free buffer list and we don't have enough
305 * pre-allocated buffers.
306` */
307 if (rxq->free_count <= RX_LOW_WATERMARK &&
308 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
309 net_ratelimit())
310 IWL_CRIT(trans,
311 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
312 rxq->free_count);
313 return NULL;
314 }
315 return page;
316}
317
318/*
319 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
320 * 259 *
321 * A used RBD is an Rx buffer that has been given to the stack. To use it again 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
325 * allocated buffers. 264 * allocated buffers.
326 */ 265 */
327static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
328{ 267{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 struct iwl_rxq *rxq = &trans_pcie->rxq; 269 struct iwl_rxq *rxq = &trans_pcie->rxq;
331 struct iwl_rx_mem_buffer *rxb; 270 struct iwl_rx_mem_buffer *rxb;
332 struct page *page; 271 struct page *page;
272 gfp_t gfp_mask = priority;
333 273
334 while (1) { 274 while (1) {
335 spin_lock(&rxq->lock); 275 spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
339 } 279 }
340 spin_unlock(&rxq->lock); 280 spin_unlock(&rxq->lock);
341 281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
342 /* Alloc a new receive buffer */ 288 /* Alloc a new receive buffer */
343 page = iwl_pcie_rx_alloc_page(trans); 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
344 if (!page) 290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
345 return; 306 return;
307 }
346 308
347 spin_lock(&rxq->lock); 309 spin_lock(&rxq->lock);
348 310
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
393 355
394 lockdep_assert_held(&rxq->lock); 356 lockdep_assert_held(&rxq->lock);
395 357
396 for (i = 0; i < RX_QUEUE_SIZE; i++) { 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
397 if (!rxq->pool[i].page) 359 if (!rxq->pool[i].page)
398 continue; 360 continue;
399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
410 * When moving to rx_free an page is allocated for the slot. 372 * When moving to rx_free an page is allocated for the slot.
411 * 373 *
412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
413 * This is called only during initialization 375 * This is called as a scheduled work item (except for during initialization)
414 */ 376 */
415static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
416{ 378{
417 iwl_pcie_rxq_alloc_rbs(trans); 379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
418 380
419 iwl_pcie_rxq_restock(trans); 381 iwl_pcie_rxq_restock(trans);
420} 382}
421 383
422/* 384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
423 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
424 *
425 * Allocates for each received request 8 pages
426 * Called as a scheduled work item.
427 */
428static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 struct iwl_rb_allocator *rba = &trans_pcie->rba;
432
433 while (atomic_read(&rba->req_pending)) {
434 int i;
435 struct list_head local_empty;
436 struct list_head local_allocated;
437
438 INIT_LIST_HEAD(&local_allocated);
439 spin_lock(&rba->lock);
440 /* swap out the entire rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
445 struct iwl_rx_mem_buffer *rxb;
446 struct page *page;
447
448 /* List should never be empty - each reused RBD is
449 * returned to the list, and initial pool covers any
450 * possible gap between the time the page is allocated
451 * to the time the RBD is added.
452 */
453 BUG_ON(list_empty(&local_empty));
454 /* Get the first rxb from the rbd list */
455 rxb = list_first_entry(&local_empty,
456 struct iwl_rx_mem_buffer, list);
457 BUG_ON(rxb->page);
458
459 /* Alloc a new receive buffer */
460 page = iwl_pcie_rx_alloc_page(trans);
461 if (!page)
462 continue;
463 rxb->page = page;
464
465 /* Get physical address of the RB */
466 rxb->page_dma = dma_map_page(trans->dev, page, 0,
467 PAGE_SIZE << trans_pcie->rx_page_order,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
470 rxb->page = NULL;
471 __free_pages(page, trans_pcie->rx_page_order);
472 continue;
473 }
474 /* dma address must be no more than 36 bits */
475 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
476 /* and also 256 byte aligned! */
477 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
478
479 /* move the allocated entry to the out list */
480 list_move(&rxb->list, &local_allocated);
481 i++;
482 }
483
484 spin_lock(&rba->lock);
485 /* add the allocated rbds to the allocator allocated list */
486 list_splice_tail(&local_allocated, &rba->rbd_allocated);
487 /* add the unused rbds back to the allocator empty list */
488 list_splice_tail(&local_empty, &rba->rbd_empty);
489 spin_unlock(&rba->lock);
490
491 atomic_dec(&rba->req_pending);
492 atomic_inc(&rba->req_ready);
493 }
494}
495
496/*
497 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
498.*
499.* Called by queue when the queue posted allocation request and
500 * has freed 8 RBDs in order to restock itself.
501 */
502static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
503 struct iwl_rx_mem_buffer
504 *out[RX_CLAIM_REQ_ALLOC])
505{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_rb_allocator *rba = &trans_pcie->rba;
508 int i;
509
510 if (atomic_dec_return(&rba->req_ready) < 0) {
511 atomic_inc(&rba->req_ready);
512 IWL_DEBUG_RX(trans,
513 "Allocation request not ready, pending requests = %d\n",
514 atomic_read(&rba->req_pending));
515 return -ENOMEM;
516 }
517
518 spin_lock(&rba->lock);
519 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
520 /* Get next free Rx buffer, remove it from free list */
521 out[i] = list_first_entry(&rba->rbd_allocated,
522 struct iwl_rx_mem_buffer, list);
523 list_del(&out[i]->list);
524 }
525 spin_unlock(&rba->lock);
526
527 return 0;
528}
529
530static void iwl_pcie_rx_allocator_work(struct work_struct *data)
531{ 385{
532 struct iwl_rb_allocator *rba_p =
533 container_of(data, struct iwl_rb_allocator, rx_alloc);
534 struct iwl_trans_pcie *trans_pcie = 386 struct iwl_trans_pcie *trans_pcie =
535 container_of(rba_p, struct iwl_trans_pcie, rba); 387 container_of(data, struct iwl_trans_pcie, rx_replenish);
536 388
537 iwl_pcie_rx_allocator(trans_pcie->trans); 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
538} 390}
539 391
540static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
541{ 393{
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 struct iwl_rxq *rxq = &trans_pcie->rxq; 395 struct iwl_rxq *rxq = &trans_pcie->rxq;
544 struct iwl_rb_allocator *rba = &trans_pcie->rba;
545 struct device *dev = trans->dev; 396 struct device *dev = trans->dev;
546 397
547 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
548 399
549 spin_lock_init(&rxq->lock); 400 spin_lock_init(&rxq->lock);
550 spin_lock_init(&rba->lock);
551 401
552 if (WARN_ON(rxq->bd || rxq->rb_stts)) 402 if (WARN_ON(rxq->bd || rxq->rb_stts))
553 return -EINVAL; 403 return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
637 INIT_LIST_HEAD(&rxq->rx_free); 487 INIT_LIST_HEAD(&rxq->rx_free);
638 INIT_LIST_HEAD(&rxq->rx_used); 488 INIT_LIST_HEAD(&rxq->rx_used);
639 rxq->free_count = 0; 489 rxq->free_count = 0;
640 rxq->used_count = 0;
641 490
642 for (i = 0; i < RX_QUEUE_SIZE; i++) 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
643 list_add(&rxq->pool[i].list, &rxq->rx_used); 492 list_add(&rxq->pool[i].list, &rxq->rx_used);
644} 493}
645 494
646static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
647{
648 int i;
649
650 lockdep_assert_held(&rba->lock);
651
652 INIT_LIST_HEAD(&rba->rbd_allocated);
653 INIT_LIST_HEAD(&rba->rbd_empty);
654
655 for (i = 0; i < RX_POOL_SIZE; i++)
656 list_add(&rba->pool[i].list, &rba->rbd_empty);
657}
658
659static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
660{
661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
662 struct iwl_rb_allocator *rba = &trans_pcie->rba;
663 int i;
664
665 lockdep_assert_held(&rba->lock);
666
667 for (i = 0; i < RX_POOL_SIZE; i++) {
668 if (!rba->pool[i].page)
669 continue;
670 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
671 PAGE_SIZE << trans_pcie->rx_page_order,
672 DMA_FROM_DEVICE);
673 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
674 rba->pool[i].page = NULL;
675 }
676}
677
678int iwl_pcie_rx_init(struct iwl_trans *trans) 495int iwl_pcie_rx_init(struct iwl_trans *trans)
679{ 496{
680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681 struct iwl_rxq *rxq = &trans_pcie->rxq; 498 struct iwl_rxq *rxq = &trans_pcie->rxq;
682 struct iwl_rb_allocator *rba = &trans_pcie->rba;
683 int i, err; 499 int i, err;
684 500
685 if (!rxq->bd) { 501 if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
687 if (err) 503 if (err)
688 return err; 504 return err;
689 } 505 }
690 if (!rba->alloc_wq)
691 rba->alloc_wq = alloc_workqueue("rb_allocator",
692 WQ_HIGHPRI | WQ_UNBOUND, 1);
693 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
694
695 spin_lock(&rba->lock);
696 atomic_set(&rba->req_pending, 0);
697 atomic_set(&rba->req_ready, 0);
698 /* free all first - we might be reconfigured for a different size */
699 iwl_pcie_rx_free_rba(trans);
700 iwl_pcie_rx_init_rba(rba);
701 spin_unlock(&rba->lock);
702 506
703 spin_lock(&rxq->lock); 507 spin_lock(&rxq->lock);
704 508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
705 /* free all first - we might be reconfigured for a different size */ 511 /* free all first - we might be reconfigured for a different size */
706 iwl_pcie_rxq_free_rbs(trans); 512 iwl_pcie_rxq_free_rbs(trans);
707 iwl_pcie_rx_init_rxb_lists(rxq); 513 iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
717 spin_unlock(&rxq->lock); 523 spin_unlock(&rxq->lock);
718 524
719 iwl_pcie_rx_replenish(trans); 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
720 526
721 iwl_pcie_rx_hw_init(trans, rxq); 527 iwl_pcie_rx_hw_init(trans, rxq);
722 528
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
731{ 537{
732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
733 struct iwl_rxq *rxq = &trans_pcie->rxq; 539 struct iwl_rxq *rxq = &trans_pcie->rxq;
734 struct iwl_rb_allocator *rba = &trans_pcie->rba;
735 540
736 /*if rxq->bd is NULL, it means that nothing has been allocated, 541 /*if rxq->bd is NULL, it means that nothing has been allocated,
737 * exit now */ 542 * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
740 return; 545 return;
741 } 546 }
742 547
743 cancel_work_sync(&rba->rx_alloc); 548 cancel_work_sync(&trans_pcie->rx_replenish);
744 if (rba->alloc_wq) {
745 destroy_workqueue(rba->alloc_wq);
746 rba->alloc_wq = NULL;
747 }
748
749 spin_lock(&rba->lock);
750 iwl_pcie_rx_free_rba(trans);
751 spin_unlock(&rba->lock);
752 549
753 spin_lock(&rxq->lock); 550 spin_lock(&rxq->lock);
754 iwl_pcie_rxq_free_rbs(trans); 551 iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
769 rxq->rb_stts = NULL; 566 rxq->rb_stts = NULL;
770} 567}
771 568
772/*
773 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
774 *
775 * Called when a RBD can be reused. The RBD is transferred to the allocator.
776 * When there are 2 empty RBDs - a request for allocation is posted
777 */
778static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
779 struct iwl_rx_mem_buffer *rxb,
780 struct iwl_rxq *rxq)
781{
782 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 struct iwl_rb_allocator *rba = &trans_pcie->rba;
784
785 /* Count the used RBDs */
786 rxq->used_count++;
787
788 /* Move the RBD to the used list, will be moved to allocator in batches
789 * before claiming or posting a request*/
790 list_add_tail(&rxb->list, &rxq->rx_used);
791
792 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
793 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
794 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
795 * after but we still need to post another request.
796 */
797 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
798 /* Move the 2 RBDs to the allocator ownership.
799 Allocator has another 6 from pool for the request completion*/
800 spin_lock(&rba->lock);
801 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
802 spin_unlock(&rba->lock);
803
804 atomic_inc(&rba->req_pending);
805 queue_work(rba->alloc_wq, &rba->rx_alloc);
806 }
807}
808
809static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb) 570 struct iwl_rx_mem_buffer *rxb)
811{ 571{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
928 */ 688 */
929 __free_pages(rxb->page, trans_pcie->rx_page_order); 689 __free_pages(rxb->page, trans_pcie->rx_page_order);
930 rxb->page = NULL; 690 rxb->page = NULL;
931 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 691 list_add_tail(&rxb->list, &rxq->rx_used);
932 } else { 692 } else {
933 list_add_tail(&rxb->list, &rxq->rx_free); 693 list_add_tail(&rxb->list, &rxq->rx_free);
934 rxq->free_count++; 694 rxq->free_count++;
935 } 695 }
936 } else 696 } else
937 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 697 list_add_tail(&rxb->list, &rxq->rx_used);
938} 698}
939 699
940/* 700/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
944{ 704{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 struct iwl_rxq *rxq = &trans_pcie->rxq; 706 struct iwl_rxq *rxq = &trans_pcie->rxq;
947 u32 r, i, j; 707 u32 r, i;
708 u8 fill_rx = 0;
709 u32 count = 8;
710 int total_empty;
948 711
949restart: 712restart:
950 spin_lock(&rxq->lock); 713 spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
957 if (i == r) 720 if (i == r)
958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
959 722
723 /* calculate total frames need to be restock after handling RX */
724 total_empty = r - rxq->write_actual;
725 if (total_empty < 0)
726 total_empty += RX_QUEUE_SIZE;
727
728 if (total_empty > (RX_QUEUE_SIZE / 2))
729 fill_rx = 1;
730
960 while (i != r) { 731 while (i != r) {
961 struct iwl_rx_mem_buffer *rxb; 732 struct iwl_rx_mem_buffer *rxb;
962 733
@@ -968,48 +739,29 @@ restart:
968 iwl_pcie_rx_handle_rb(trans, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb);
969 740
970 i = (i + 1) & RX_QUEUE_MASK; 741 i = (i + 1) & RX_QUEUE_MASK;
971 742 /* If there are a lot of unused frames,
972 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 743 * restock the Rx queue so ucode wont assert. */
973 * try to claim the pre-allocated buffers from the allocator */ 744 if (fill_rx) {
974 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 745 count++;
975 struct iwl_rb_allocator *rba = &trans_pcie->rba; 746 if (count >= 8) {
976 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 747 rxq->read = i;
977 748 spin_unlock(&rxq->lock);
978 /* Add the remaining 6 empty RBDs for allocator use */ 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
979 spin_lock(&rba->lock); 750 count = 0;
980 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 751 goto restart;
981 spin_unlock(&rba->lock);
982
983 /* If not ready - continue, will try to reclaim later.
984 * No need to reschedule work - allocator exits only on
985 * success */
986 if (!iwl_pcie_rx_allocator_get(trans, out)) {
987 /* If success - then RX_CLAIM_REQ_ALLOC
988 * buffers were retrieved and should be added
989 * to free list */
990 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
991 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
992 list_add_tail(&out[j]->list,
993 &rxq->rx_free);
994 rxq->free_count++;
995 }
996 } 752 }
997 } 753 }
998 /* handle restock for two cases:
999 * - we just pulled buffers from the allocator
1000 * - we have 8+ unstolen pages accumulated */
1001 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1002 rxq->read = i;
1003 spin_unlock(&rxq->lock);
1004 iwl_pcie_rxq_restock(trans);
1005 goto restart;
1006 }
1007 } 754 }
1008 755
1009 /* Backtrack one entry */ 756 /* Backtrack one entry */
1010 rxq->read = i; 757 rxq->read = i;
1011 spin_unlock(&rxq->lock); 758 spin_unlock(&rxq->lock);
1012 759
760 if (fill_rx)
761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
762 else
763 iwl_pcie_rxq_restock(trans);
764
1013 if (trans_pcie->napi.poll) 765 if (trans_pcie->napi.poll)
1014 napi_gro_flush(&trans_pcie->napi, false); 766 napi_gro_flush(&trans_pcie->napi, false);
1015} 767}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..6203c4ad9bba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
182 182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{ 184{
185 if (!trans->cfg->apmg_not_supported) 185 if (trans->cfg->apmg_not_supported)
186 return; 186 return;
187 187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2459 struct iwl_trans_pcie *trans_pcie; 2459 struct iwl_trans_pcie *trans_pcie;
2460 struct iwl_trans *trans; 2460 struct iwl_trans *trans;
2461 u16 pci_cmd; 2461 u16 pci_cmd;
2462 int err; 2462 int ret;
2463 2463
2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2465 &pdev->dev, cfg, &trans_ops_pcie, 0); 2465 &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2474 spin_lock_init(&trans_pcie->ref_lock); 2474 spin_lock_init(&trans_pcie->ref_lock);
2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2476 2476
2477 err = pci_enable_device(pdev); 2477 ret = pci_enable_device(pdev);
2478 if (err) 2478 if (ret)
2479 goto out_no_pci; 2479 goto out_no_pci;
2480 2480
2481 if (!cfg->base_params->pcie_l1_allowed) { 2481 if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2491 2491
2492 pci_set_master(pdev); 2492 pci_set_master(pdev);
2493 2493
2494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2494 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2495 if (!err) 2495 if (!ret)
2496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2496 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2497 if (err) { 2497 if (ret) {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2498 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (!err) 2499 if (!ret)
2500 err = pci_set_consistent_dma_mask(pdev, 2500 ret = pci_set_consistent_dma_mask(pdev,
2501 DMA_BIT_MASK(32)); 2501 DMA_BIT_MASK(32));
2502 /* both attempts failed: */ 2502 /* both attempts failed: */
2503 if (err) { 2503 if (ret) {
2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2504 dev_err(&pdev->dev, "No suitable DMA available\n");
2505 goto out_pci_disable_device; 2505 goto out_pci_disable_device;
2506 } 2506 }
2507 } 2507 }
2508 2508
2509 err = pci_request_regions(pdev, DRV_NAME); 2509 ret = pci_request_regions(pdev, DRV_NAME);
2510 if (err) { 2510 if (ret) {
2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2511 dev_err(&pdev->dev, "pci_request_regions failed\n");
2512 goto out_pci_disable_device; 2512 goto out_pci_disable_device;
2513 } 2513 }
@@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2516 if (!trans_pcie->hw_base) { 2516 if (!trans_pcie->hw_base) {
2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2518 err = -ENODEV; 2518 ret = -ENODEV;
2519 goto out_pci_release_regions; 2519 goto out_pci_release_regions;
2520 } 2520 }
2521 2521
@@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2527 trans_pcie->pci_dev = pdev; 2527 trans_pcie->pci_dev = pdev;
2528 iwl_disable_interrupts(trans); 2528 iwl_disable_interrupts(trans);
2529 2529
2530 err = pci_enable_msi(pdev); 2530 ret = pci_enable_msi(pdev);
2531 if (err) { 2531 if (ret) {
2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2533 /* enable rfkill interrupt: hw bug w/a */ 2533 /* enable rfkill interrupt: hw bug w/a */
2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2547 */ 2547 */
2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2549 unsigned long flags; 2549 unsigned long flags;
2550 int ret;
2551 2550
2552 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2551 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2553 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2552 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2554 2553
2554 ret = iwl_pcie_prepare_card_hw(trans);
2555 if (ret) {
2556 IWL_WARN(trans, "Exit HW not ready\n");
2557 goto out_pci_disable_msi;
2558 }
2559
2555 /* 2560 /*
2556 * in-order to recognize C step driver should read chip version 2561 * in-order to recognize C step driver should read chip version
2557 * id located at the AUX bus MISC address space. 2562 * id located at the AUX bus MISC address space.
@@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2591 /* Initialize the wait queue for commands */ 2596 /* Initialize the wait queue for commands */
2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2597 init_waitqueue_head(&trans_pcie->wait_command_queue);
2593 2598
2594 if (iwl_pcie_alloc_ict(trans)) 2599 ret = iwl_pcie_alloc_ict(trans);
2600 if (ret)
2595 goto out_pci_disable_msi; 2601 goto out_pci_disable_msi;
2596 2602
2597 err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2603 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2598 iwl_pcie_irq_handler, 2604 iwl_pcie_irq_handler,
2599 IRQF_SHARED, DRV_NAME, trans); 2605 IRQF_SHARED, DRV_NAME, trans);
2600 if (err) { 2606 if (ret) {
2601 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2607 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2602 goto out_free_ict; 2608 goto out_free_ict;
2603 } 2609 }
@@ -2617,5 +2623,5 @@ out_pci_disable_device:
2617 pci_disable_device(pdev); 2623 pci_disable_device(pdev);
2618out_no_pci: 2624out_no_pci:
2619 iwl_trans_free(trans); 2625 iwl_trans_free(trans);
2620 return ERR_PTR(err); 2626 return ERR_PTR(ret);
2621} 2627}