aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2011-09-13 15:41:16 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-09-13 15:41:16 -0400
commitb4d3de8ca25fcdf697da38d9f9785cb508edea4d (patch)
tree8d2940fab556b05f4f1c7980c611211ffaa0f025
parent3857e3ee2209b7289c434103e366f765ec82a22d (diff)
parent2fa8b6a0e42570690a48a56cb65778211e3cc9cc (diff)
Merge branch 'master' of git://git.infradead.org/users/linville/wireless-next into for-davem
-rw-r--r--drivers/net/wireless/ath/ath.h48
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c312
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h47
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c234
-rw-r--r--drivers/net/wireless/ath/main.c8
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/dma.c31
-rw-r--r--drivers/net/wireless/b43/dma.h3
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c72
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c676
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c913
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c938
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c284
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c412
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h329
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c192
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c43
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.h116
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c42
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c97
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h430
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c106
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sv-open.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h314
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c862
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c506
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.c1615
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h216
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c12
-rw-r--r--drivers/net/wireless/wl12xx/acx.c322
-rw-r--r--drivers/net/wireless/wl12xx/acx.h394
-rw-r--r--drivers/net/wireless/wl12xx/boot.c43
-rw-r--r--drivers/net/wireless/wl12xx/boot.h3
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c793
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h329
-rw-r--r--drivers/net/wireless/wl12xx/conf.h352
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c17
-rw-r--r--drivers/net/wireless/wl12xx/event.c6
-rw-r--r--drivers/net/wireless/wl12xx/event.h80
-rw-r--r--drivers/net/wireless/wl12xx/init.c91
-rw-r--r--drivers/net/wireless/wl12xx/io.h1
-rw-r--r--drivers/net/wireless/wl12xx/main.c961
-rw-r--r--drivers/net/wireless/wl12xx/ps.c4
-rw-r--r--drivers/net/wireless/wl12xx/reg.h75
-rw-r--r--drivers/net/wireless/wl12xx/rx.c60
-rw-r--r--drivers/net/wireless/wl12xx/rx.h18
-rw-r--r--drivers/net/wireless/wl12xx/scan.c38
-rw-r--r--drivers/net/wireless/wl12xx/scan.h25
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c4
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c2
-rw-r--r--drivers/net/wireless/wl12xx/spi.c4
-rw-r--r--drivers/net/wireless/wl12xx/tx.c136
-rw-r--r--drivers/net/wireless/wl12xx/tx.h16
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h153
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h25
-rw-r--r--include/net/cfg80211.h1
-rw-r--r--net/mac80211/debugfs.c14
-rw-r--r--net/mac80211/mesh_pathtbl.c8
100 files changed, 7670 insertions, 6378 deletions
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 17c4b56c3874..9891fb605a01 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -178,23 +178,29 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
178void ath_hw_cycle_counters_update(struct ath_common *common); 178void ath_hw_cycle_counters_update(struct ath_common *common);
179int32_t ath_hw_get_listen_time(struct ath_common *common); 179int32_t ath_hw_get_listen_time(struct ath_common *common);
180 180
181extern __attribute__ ((format (printf, 3, 4))) int 181extern __attribute__((format (printf, 2, 3)))
182ath_printk(const char *level, struct ath_common *common, const char *fmt, ...); 182void ath_printk(const char *level, const char *fmt, ...);
183
184#define _ath_printk(level, common, fmt, ...) \
185do { \
186 __always_unused struct ath_common *unused = common; \
187 ath_printk(level, fmt, ##__VA_ARGS__); \
188} while (0)
183 189
184#define ath_emerg(common, fmt, ...) \ 190#define ath_emerg(common, fmt, ...) \
185 ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__) 191 _ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
186#define ath_alert(common, fmt, ...) \ 192#define ath_alert(common, fmt, ...) \
187 ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__) 193 _ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
188#define ath_crit(common, fmt, ...) \ 194#define ath_crit(common, fmt, ...) \
189 ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__) 195 _ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
190#define ath_err(common, fmt, ...) \ 196#define ath_err(common, fmt, ...) \
191 ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__) 197 _ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
192#define ath_warn(common, fmt, ...) \ 198#define ath_warn(common, fmt, ...) \
193 ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__) 199 _ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
194#define ath_notice(common, fmt, ...) \ 200#define ath_notice(common, fmt, ...) \
195 ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__) 201 _ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
196#define ath_info(common, fmt, ...) \ 202#define ath_info(common, fmt, ...) \
197 ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__) 203 _ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
198 204
199/** 205/**
200 * enum ath_debug_level - atheros wireless debug level 206 * enum ath_debug_level - atheros wireless debug level
@@ -246,27 +252,21 @@ enum ATH_DEBUG {
246 252
247#ifdef CONFIG_ATH_DEBUG 253#ifdef CONFIG_ATH_DEBUG
248 254
249#define ath_dbg(common, dbg_mask, fmt, ...) \ 255#define ath_dbg(common, dbg_mask, fmt, ...) \
250({ \ 256do { \
251 int rtn; \ 257 if ((common)->debug_mask & dbg_mask) \
252 if ((common)->debug_mask & dbg_mask) \ 258 _ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
253 rtn = ath_printk(KERN_DEBUG, common, fmt, \ 259} while (0)
254 ##__VA_ARGS__); \ 260
255 else \
256 rtn = 0; \
257 \
258 rtn; \
259})
260#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg) 261#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
261#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo) 262#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
262 263
263#else 264#else
264 265
265static inline __attribute__ ((format (printf, 3, 4))) int 266static inline __attribute__((format (printf, 3, 4)))
266ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask, 267void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
267 const char *fmt, ...) 268 const char *fmt, ...)
268{ 269{
269 return 0;
270} 270}
271#define ATH_DBG_WARN(foo, arg...) do {} while (0) 271#define ATH_DBG_WARN(foo, arg...) do {} while (0)
272#define ATH_DBG_WARN_ON_ONCE(foo) ({ \ 272#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index bfb6481f01f9..d969a11e3425 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -643,7 +643,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
643 listenTime = ath_hw_get_listen_time(common); 643 listenTime = ath_hw_get_listen_time(common);
644 644
645 if (listenTime <= 0) { 645 if (listenTime <= 0) {
646 ah->stats.ast_ani_lneg++; 646 ah->stats.ast_ani_lneg_or_lzero++;
647 ath9k_ani_restart(ah); 647 ath9k_ani_restart(ah);
648 return false; 648 return false;
649 } 649 }
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index dbab5b9ce494..a547005572e7 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -148,8 +148,7 @@ struct ar5416Stats {
148 u32 ast_ani_ofdmerrs; 148 u32 ast_ani_ofdmerrs;
149 u32 ast_ani_cckerrs; 149 u32 ast_ani_cckerrs;
150 u32 ast_ani_reset; 150 u32 ast_ani_reset;
151 u32 ast_ani_lzero; 151 u32 ast_ani_lneg_or_lzero;
152 u32 ast_ani_lneg;
153 u32 avgbrssi; 152 u32 avgbrssi;
154 struct ath9k_mib_stats ast_mibstats; 153 struct ath9k_mib_stats ast_mibstats;
155}; 154};
@@ -159,7 +158,5 @@ void ath9k_enable_mib_counters(struct ath_hw *ah);
159void ath9k_hw_disable_mib_counters(struct ath_hw *ah); 158void ath9k_hw_disable_mib_counters(struct ath_hw *ah);
160void ath9k_hw_ani_setup(struct ath_hw *ah); 159void ath9k_hw_ani_setup(struct ath_hw *ah);
161void ath9k_hw_ani_init(struct ath_hw *ah); 160void ath9k_hw_ani_init(struct ath_hw *ah);
162int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
163 struct ath9k_channel *chan);
164 161
165#endif /* ANI_H */ 162#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 45b262fe2c25..33deb0d574b0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -273,7 +273,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
273 273
274static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds, 274static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
275 u32 pktLen, enum ath9k_pkt_type type, 275 u32 pktLen, enum ath9k_pkt_type type,
276 u32 txPower, u32 keyIx, 276 u32 txPower, u8 keyIx,
277 enum ath9k_key_type keyType, u32 flags) 277 enum ath9k_key_type keyType, u32 flags)
278{ 278{
279 struct ar5416_desc *ads = AR5416DESC(ds); 279 struct ar5416_desc *ads = AR5416DESC(ds);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index cb8bcc4e6091..5f3ac251b486 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3318,7 +3318,7 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
3318 3318
3319 word = kzalloc(2048, GFP_KERNEL); 3319 word = kzalloc(2048, GFP_KERNEL);
3320 if (!word) 3320 if (!word)
3321 return -1; 3321 return -ENOMEM;
3322 3322
3323 memcpy(mptr, &ar9300_default, mdata_size); 3323 memcpy(mptr, &ar9300_default, mdata_size);
3324 3324
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 8ace36e77399..d08ab930e430 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -312,7 +312,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
312 312
313static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds, 313static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
314 u32 pktlen, enum ath9k_pkt_type type, u32 txpower, 314 u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
315 u32 keyIx, enum ath9k_key_type keyType, u32 flags) 315 u8 keyIx, enum ath9k_key_type keyType, u32 flags)
316{ 316{
317 struct ar9003_txc *ads = (struct ar9003_txc *) ds; 317 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
318 318
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 3a893e19d6c3..5d9a9aabe476 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -206,16 +206,17 @@ struct ath_atx_ac {
206}; 206};
207 207
208struct ath_frame_info { 208struct ath_frame_info {
209 struct ath_buf *bf;
209 int framelen; 210 int framelen;
210 u32 keyix;
211 enum ath9k_key_type keytype; 211 enum ath9k_key_type keytype;
212 u8 keyix;
212 u8 retries; 213 u8 retries;
213 u16 seqno;
214}; 214};
215 215
216struct ath_buf_state { 216struct ath_buf_state {
217 u8 bf_type; 217 u8 bf_type;
218 u8 bfs_paprd; 218 u8 bfs_paprd;
219 u16 seqno;
219 unsigned long bfs_paprd_timestamp; 220 unsigned long bfs_paprd_timestamp;
220}; 221};
221 222
@@ -235,7 +236,7 @@ struct ath_buf {
235 236
236struct ath_atx_tid { 237struct ath_atx_tid {
237 struct list_head list; 238 struct list_head list;
238 struct list_head buf_q; 239 struct sk_buff_head buf_q;
239 struct ath_node *an; 240 struct ath_node *an;
240 struct ath_atx_ac *ac; 241 struct ath_atx_ac *ac;
241 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)]; 242 unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index da45f325be7d..727e8de22fda 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -711,7 +711,7 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
711 " tid: %p %s %s %i %p %p\n", 711 " tid: %p %s %s %i %p %p\n",
712 tid, tid->sched ? "sched" : "idle", 712 tid, tid->sched ? "sched" : "idle",
713 tid->paused ? "paused" : "running", 713 tid->paused ? "paused" : "running",
714 list_empty(&tid->buf_q), 714 skb_queue_empty(&tid->buf_q),
715 tid->an, tid->ac); 715 tid->an, tid->ac);
716 if (len >= size) 716 if (len >= size)
717 goto done; 717 goto done;
@@ -828,6 +828,8 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
828void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 828void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
829 struct ath_tx_status *ts, struct ath_txq *txq) 829 struct ath_tx_status *ts, struct ath_txq *txq)
830{ 830{
831#define TX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].ts\
832 [sc->debug.tsidx].c)
831 int qnum = txq->axq_qnum; 833 int qnum = txq->axq_qnum;
832 834
833 TX_STAT_INC(qnum, tx_pkts_all); 835 TX_STAT_INC(qnum, tx_pkts_all);
@@ -857,6 +859,26 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
857 TX_STAT_INC(qnum, data_underrun); 859 TX_STAT_INC(qnum, data_underrun);
858 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) 860 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
859 TX_STAT_INC(qnum, delim_underrun); 861 TX_STAT_INC(qnum, delim_underrun);
862
863 spin_lock(&sc->debug.samp_lock);
864 TX_SAMP_DBG(jiffies) = jiffies;
865 TX_SAMP_DBG(rssi_ctl0) = ts->ts_rssi_ctl0;
866 TX_SAMP_DBG(rssi_ctl1) = ts->ts_rssi_ctl1;
867 TX_SAMP_DBG(rssi_ctl2) = ts->ts_rssi_ctl2;
868 TX_SAMP_DBG(rssi_ext0) = ts->ts_rssi_ext0;
869 TX_SAMP_DBG(rssi_ext1) = ts->ts_rssi_ext1;
870 TX_SAMP_DBG(rssi_ext2) = ts->ts_rssi_ext2;
871 TX_SAMP_DBG(rateindex) = ts->ts_rateindex;
872 TX_SAMP_DBG(isok) = !!(ts->ts_status & ATH9K_TXERR_MASK);
873 TX_SAMP_DBG(rts_fail_cnt) = ts->ts_shortretry;
874 TX_SAMP_DBG(data_fail_cnt) = ts->ts_longretry;
875 TX_SAMP_DBG(rssi) = ts->ts_rssi;
876 TX_SAMP_DBG(tid) = ts->tid;
877 TX_SAMP_DBG(qid) = ts->qid;
878 sc->debug.tsidx = (sc->debug.tsidx + 1) % ATH_DBG_MAX_SAMPLES;
879 spin_unlock(&sc->debug.samp_lock);
880
881#undef TX_SAMP_DBG
860} 882}
861 883
862static const struct file_operations fops_xmit = { 884static const struct file_operations fops_xmit = {
@@ -995,6 +1017,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
995{ 1017{
996#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++ 1018#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
997#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 1019#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
1020#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
1021 [sc->debug.rsidx].c)
998 1022
999 u32 phyerr; 1023 u32 phyerr;
1000 1024
@@ -1030,8 +1054,25 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
1030 1054
1031 sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna; 1055 sc->debug.stats.rxstats.rs_antenna = rs->rs_antenna;
1032 1056
1057 spin_lock(&sc->debug.samp_lock);
1058 RX_SAMP_DBG(jiffies) = jiffies;
1059 RX_SAMP_DBG(rssi_ctl0) = rs->rs_rssi_ctl0;
1060 RX_SAMP_DBG(rssi_ctl1) = rs->rs_rssi_ctl1;
1061 RX_SAMP_DBG(rssi_ctl2) = rs->rs_rssi_ctl2;
1062 RX_SAMP_DBG(rssi_ext0) = rs->rs_rssi_ext0;
1063 RX_SAMP_DBG(rssi_ext1) = rs->rs_rssi_ext1;
1064 RX_SAMP_DBG(rssi_ext2) = rs->rs_rssi_ext2;
1065 RX_SAMP_DBG(antenna) = rs->rs_antenna;
1066 RX_SAMP_DBG(rssi) = rs->rs_rssi;
1067 RX_SAMP_DBG(rate) = rs->rs_rate;
1068 RX_SAMP_DBG(is_mybeacon) = rs->is_mybeacon;
1069
1070 sc->debug.rsidx = (sc->debug.rsidx + 1) % ATH_DBG_MAX_SAMPLES;
1071 spin_unlock(&sc->debug.samp_lock);
1072
1033#undef RX_STAT_INC 1073#undef RX_STAT_INC
1034#undef RX_PHY_ERR_INC 1074#undef RX_PHY_ERR_INC
1075#undef RX_SAMP_DBG
1035} 1076}
1036 1077
1037static const struct file_operations fops_recv = { 1078static const struct file_operations fops_recv = {
@@ -1272,6 +1313,269 @@ static const struct file_operations fops_modal_eeprom = {
1272 .llseek = default_llseek, 1313 .llseek = default_llseek,
1273}; 1314};
1274 1315
1316void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
1317{
1318#define ATH_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].c)
1319 struct ath_hw *ah = sc->sc_ah;
1320 struct ath_common *common = ath9k_hw_common(ah);
1321 unsigned long flags;
1322 int i;
1323
1324 ath9k_ps_wakeup(sc);
1325
1326 spin_lock_irqsave(&common->cc_lock, flags);
1327 ath_hw_cycle_counters_update(common);
1328 spin_unlock_irqrestore(&common->cc_lock, flags);
1329
1330 spin_lock_bh(&sc->debug.samp_lock);
1331
1332 ATH_SAMP_DBG(cc.cycles) = common->cc_ani.cycles;
1333 ATH_SAMP_DBG(cc.rx_busy) = common->cc_ani.rx_busy;
1334 ATH_SAMP_DBG(cc.rx_frame) = common->cc_ani.rx_frame;
1335 ATH_SAMP_DBG(cc.tx_frame) = common->cc_ani.tx_frame;
1336 ATH_SAMP_DBG(noise) = ah->noise;
1337
1338 REG_WRITE_D(ah, AR_MACMISC,
1339 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
1340 (AR_MACMISC_MISC_OBS_BUS_1 <<
1341 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
1342
1343 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
1344 ATH_SAMP_DBG(dma_dbg_reg_vals[i]) = REG_READ_D(ah,
1345 AR_DMADBG_0 + (i * sizeof(u32)));
1346
1347 ATH_SAMP_DBG(pcu_obs) = REG_READ_D(ah, AR_OBS_BUS_1);
1348 ATH_SAMP_DBG(pcu_cr) = REG_READ_D(ah, AR_CR);
1349
1350 memcpy(ATH_SAMP_DBG(nfCalHist), sc->caldata.nfCalHist,
1351 sizeof(ATH_SAMP_DBG(nfCalHist)));
1352
1353 sc->debug.sampidx = (sc->debug.sampidx + 1) % ATH_DBG_MAX_SAMPLES;
1354 spin_unlock_bh(&sc->debug.samp_lock);
1355 ath9k_ps_restore(sc);
1356
1357#undef ATH_SAMP_DBG
1358}
1359
1360static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
1361{
1362#define ATH_SAMP_DBG(c) bb_mac_samp[sampidx].c
1363 struct ath_softc *sc = inode->i_private;
1364 struct ath_hw *ah = sc->sc_ah;
1365 struct ath_common *common = ath9k_hw_common(ah);
1366 struct ieee80211_conf *conf = &common->hw->conf;
1367 struct ath_dbg_bb_mac_samp *bb_mac_samp;
1368 struct ath9k_nfcal_hist *h;
1369 int i, j, qcuOffset = 0, dcuOffset = 0;
1370 u32 *qcuBase, *dcuBase, size = 30000, len = 0;
1371 u32 sampidx = 0;
1372 u8 *buf;
1373 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
1374 u8 nread;
1375
1376 buf = vmalloc(size);
1377 if (!buf)
1378 return -ENOMEM;
1379 bb_mac_samp = vmalloc(sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
1380 if (!bb_mac_samp) {
1381 vfree(buf);
1382 return -ENOMEM;
1383 }
1384
1385 spin_lock_bh(&sc->debug.samp_lock);
1386 memcpy(bb_mac_samp, sc->debug.bb_mac_samp,
1387 sizeof(*bb_mac_samp) * ATH_DBG_MAX_SAMPLES);
1388 spin_unlock_bh(&sc->debug.samp_lock);
1389
1390 len += snprintf(buf + len, size - len,
1391 "Raw DMA Debug Dump:\n");
1392 len += snprintf(buf + len, size - len, "Sample |\t");
1393 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
1394 len += snprintf(buf + len, size - len, " DMA Reg%d |\t", i);
1395 len += snprintf(buf + len, size - len, "\n");
1396
1397 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1398 len += snprintf(buf + len, size - len, "%d\t", sampidx);
1399
1400 for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++)
1401 len += snprintf(buf + len, size - len, " %08x\t",
1402 ATH_SAMP_DBG(dma_dbg_reg_vals[i]));
1403 len += snprintf(buf + len, size - len, "\n");
1404 }
1405 len += snprintf(buf + len, size - len, "\n");
1406
1407 len += snprintf(buf + len, size - len,
1408 "Sample Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
1409 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1410 qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
1411 dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
1412
1413 for (i = 0; i < ATH9K_NUM_QUEUES; i++,
1414 qcuOffset += 4, dcuOffset += 5) {
1415 if (i == 8) {
1416 qcuOffset = 0;
1417 qcuBase++;
1418 }
1419
1420 if (i == 6) {
1421 dcuOffset = 0;
1422 dcuBase++;
1423 }
1424 if (!sc->debug.stats.txstats[i].queued)
1425 continue;
1426
1427 len += snprintf(buf + len, size - len,
1428 "%4d %7d %2x %1x %2x %2x\n",
1429 sampidx, i,
1430 (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
1431 (*qcuBase & (0x8 << qcuOffset)) >>
1432 (qcuOffset + 3),
1433 ATH_SAMP_DBG(dma_dbg_reg_vals[2]) &
1434 (0x7 << (i * 3)) >> (i * 3),
1435 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
1436 }
1437 len += snprintf(buf + len, size - len, "\n");
1438 }
1439 len += snprintf(buf + len, size - len,
1440 "samp qcu_sh qcu_fh qcu_comp dcu_comp dcu_arb dcu_fp "
1441 "ch_idle_dur ch_idle_dur_val txfifo_val0 txfifo_val1 "
1442 "txfifo_dcu0 txfifo_dcu1 pcu_obs AR_CR\n");
1443
1444 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1445 qcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[0]);
1446 dcuBase = &ATH_SAMP_DBG(dma_dbg_reg_vals[4]);
1447
1448 len += snprintf(buf + len, size - len, "%4d %5x %5x ", sampidx,
1449 (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x003c0000) >> 18,
1450 (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x03c00000) >> 22);
1451 len += snprintf(buf + len, size - len, "%7x %8x ",
1452 (ATH_SAMP_DBG(dma_dbg_reg_vals[3]) & 0x1c000000) >> 26,
1453 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x3));
1454 len += snprintf(buf + len, size - len, "%7x %7x ",
1455 (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x06000000) >> 25,
1456 (ATH_SAMP_DBG(dma_dbg_reg_vals[5]) & 0x38000000) >> 27);
1457 len += snprintf(buf + len, size - len, "%7d %12d ",
1458 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x000003fc) >> 2,
1459 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000400) >> 10);
1460 len += snprintf(buf + len, size - len, "%12d %12d ",
1461 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00000800) >> 11,
1462 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x00001000) >> 12);
1463 len += snprintf(buf + len, size - len, "%12d %12d ",
1464 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x0001e000) >> 13,
1465 (ATH_SAMP_DBG(dma_dbg_reg_vals[6]) & 0x001e0000) >> 17);
1466 len += snprintf(buf + len, size - len, "0x%07x 0x%07x\n",
1467 ATH_SAMP_DBG(pcu_obs), ATH_SAMP_DBG(pcu_cr));
1468 }
1469
1470 len += snprintf(buf + len, size - len,
1471 "Sample ChNoise Chain privNF #Reading Readings\n");
1472 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1473 h = ATH_SAMP_DBG(nfCalHist);
1474 if (!ATH_SAMP_DBG(noise))
1475 continue;
1476
1477 for (i = 0; i < NUM_NF_READINGS; i++) {
1478 if (!(chainmask & (1 << i)) ||
1479 ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
1480 continue;
1481
1482 nread = AR_PHY_CCA_FILTERWINDOW_LENGTH -
1483 h[i].invalidNFcount;
1484 len += snprintf(buf + len, size - len,
1485 "%4d %5d %4d\t %d\t %d\t",
1486 sampidx, ATH_SAMP_DBG(noise),
1487 i, h[i].privNF, nread);
1488 for (j = 0; j < nread; j++)
1489 len += snprintf(buf + len, size - len,
1490 " %d", h[i].nfCalBuffer[j]);
1491 len += snprintf(buf + len, size - len, "\n");
1492 }
1493 }
1494 len += snprintf(buf + len, size - len, "\nCycle counters:\n"
1495 "Sample Total Rxbusy Rxframes Txframes\n");
1496 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1497 if (!ATH_SAMP_DBG(cc.cycles))
1498 continue;
1499 len += snprintf(buf + len, size - len,
1500 "%4d %08x %08x %08x %08x\n",
1501 sampidx, ATH_SAMP_DBG(cc.cycles),
1502 ATH_SAMP_DBG(cc.rx_busy),
1503 ATH_SAMP_DBG(cc.rx_frame),
1504 ATH_SAMP_DBG(cc.tx_frame));
1505 }
1506
1507 len += snprintf(buf + len, size - len, "Tx status Dump :\n");
1508 len += snprintf(buf + len, size - len,
1509 "Sample rssi:- ctl0 ctl1 ctl2 ext0 ext1 ext2 comb "
1510 "isok rts_fail data_fail rate tid qid tx_before(ms)\n");
1511 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1512 for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
1513 if (!ATH_SAMP_DBG(ts[i].jiffies))
1514 continue;
1515 len += snprintf(buf + len, size - len, "%4d \t"
1516 "%8d %4d %4d %4d %4d %4d %4d %4d %4d "
1517 "%4d %4d %2d %2d %d\n",
1518 sampidx,
1519 ATH_SAMP_DBG(ts[i].rssi_ctl0),
1520 ATH_SAMP_DBG(ts[i].rssi_ctl1),
1521 ATH_SAMP_DBG(ts[i].rssi_ctl2),
1522 ATH_SAMP_DBG(ts[i].rssi_ext0),
1523 ATH_SAMP_DBG(ts[i].rssi_ext1),
1524 ATH_SAMP_DBG(ts[i].rssi_ext2),
1525 ATH_SAMP_DBG(ts[i].rssi),
1526 ATH_SAMP_DBG(ts[i].isok),
1527 ATH_SAMP_DBG(ts[i].rts_fail_cnt),
1528 ATH_SAMP_DBG(ts[i].data_fail_cnt),
1529 ATH_SAMP_DBG(ts[i].rateindex),
1530 ATH_SAMP_DBG(ts[i].tid),
1531 ATH_SAMP_DBG(ts[i].qid),
1532 jiffies_to_msecs(jiffies -
1533 ATH_SAMP_DBG(ts[i].jiffies)));
1534 }
1535 }
1536
1537 len += snprintf(buf + len, size - len, "Rx status Dump :\n");
1538 len += snprintf(buf + len, size - len, "Sample rssi:- ctl0 ctl1 ctl2 "
1539 "ext0 ext1 ext2 comb beacon ant rate rx_before(ms)\n");
1540 for (sampidx = 0; sampidx < ATH_DBG_MAX_SAMPLES; sampidx++) {
1541 for (i = 0; i < ATH_DBG_MAX_SAMPLES; i++) {
1542 if (!ATH_SAMP_DBG(rs[i].jiffies))
1543 continue;
1544 len += snprintf(buf + len, size - len, "%4d \t"
1545 "%8d %4d %4d %4d %4d %4d %4d %s %4d %02x %d\n",
1546 sampidx,
1547 ATH_SAMP_DBG(rs[i].rssi_ctl0),
1548 ATH_SAMP_DBG(rs[i].rssi_ctl1),
1549 ATH_SAMP_DBG(rs[i].rssi_ctl2),
1550 ATH_SAMP_DBG(rs[i].rssi_ext0),
1551 ATH_SAMP_DBG(rs[i].rssi_ext1),
1552 ATH_SAMP_DBG(rs[i].rssi_ext2),
1553 ATH_SAMP_DBG(rs[i].rssi),
1554 ATH_SAMP_DBG(rs[i].is_mybeacon) ?
1555 "True" : "False",
1556 ATH_SAMP_DBG(rs[i].antenna),
1557 ATH_SAMP_DBG(rs[i].rate),
1558 jiffies_to_msecs(jiffies -
1559 ATH_SAMP_DBG(rs[i].jiffies)));
1560 }
1561 }
1562
1563 vfree(bb_mac_samp);
1564 file->private_data = buf;
1565
1566 return 0;
1567#undef ATH_SAMP_DBG
1568}
1569
1570static const struct file_operations fops_samps = {
1571 .open = open_file_bb_mac_samps,
1572 .read = ath9k_debugfs_read_buf,
1573 .release = ath9k_debugfs_release_buf,
1574 .owner = THIS_MODULE,
1575 .llseek = default_llseek,
1576};
1577
1578
1275int ath9k_init_debug(struct ath_hw *ah) 1579int ath9k_init_debug(struct ath_hw *ah)
1276{ 1580{
1277 struct ath_common *common = ath9k_hw_common(ah); 1581 struct ath_common *common = ath9k_hw_common(ah);
@@ -1321,6 +1625,8 @@ int ath9k_init_debug(struct ath_hw *ah)
1321 &fops_base_eeprom); 1625 &fops_base_eeprom);
1322 debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc, 1626 debugfs_create_file("modal_eeprom", S_IRUSR, sc->debug.debugfs_phy, sc,
1323 &fops_modal_eeprom); 1627 &fops_modal_eeprom);
1628 debugfs_create_file("samples", S_IRUSR, sc->debug.debugfs_phy, sc,
1629 &fops_samps);
1324 1630
1325 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR, 1631 debugfs_create_u32("gpio_mask", S_IRUSR | S_IWUSR,
1326 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask); 1632 sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
@@ -1329,5 +1635,9 @@ int ath9k_init_debug(struct ath_hw *ah)
1329 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val); 1635 sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
1330 1636
1331 sc->debug.regidx = 0; 1637 sc->debug.regidx = 0;
1638 memset(&sc->debug.bb_mac_samp, 0, sizeof(sc->debug.bb_mac_samp));
1639 sc->debug.sampidx = 0;
1640 sc->debug.tsidx = 0;
1641 sc->debug.rsidx = 0;
1332 return 0; 1642 return 0;
1333} 1643}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 4a04510e1111..95f85bdc8db7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -177,14 +177,57 @@ struct ath_stats {
177 struct ath_rx_stats rxstats; 177 struct ath_rx_stats rxstats;
178}; 178};
179 179
180#define ATH_DBG_MAX_SAMPLES 10
181struct ath_dbg_bb_mac_samp {
182 u32 dma_dbg_reg_vals[ATH9K_NUM_DMA_DEBUG_REGS];
183 u32 pcu_obs, pcu_cr, noise;
184 struct {
185 u64 jiffies;
186 int8_t rssi_ctl0;
187 int8_t rssi_ctl1;
188 int8_t rssi_ctl2;
189 int8_t rssi_ext0;
190 int8_t rssi_ext1;
191 int8_t rssi_ext2;
192 int8_t rssi;
193 bool isok;
194 u8 rts_fail_cnt;
195 u8 data_fail_cnt;
196 u8 rateindex;
197 u8 qid;
198 u8 tid;
199 } ts[ATH_DBG_MAX_SAMPLES];
200 struct {
201 u64 jiffies;
202 int8_t rssi_ctl0;
203 int8_t rssi_ctl1;
204 int8_t rssi_ctl2;
205 int8_t rssi_ext0;
206 int8_t rssi_ext1;
207 int8_t rssi_ext2;
208 int8_t rssi;
209 bool is_mybeacon;
210 u8 antenna;
211 u8 rate;
212 } rs[ATH_DBG_MAX_SAMPLES];
213 struct ath_cycle_counters cc;
214 struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS];
215};
216
180struct ath9k_debug { 217struct ath9k_debug {
181 struct dentry *debugfs_phy; 218 struct dentry *debugfs_phy;
182 u32 regidx; 219 u32 regidx;
183 struct ath_stats stats; 220 struct ath_stats stats;
221 spinlock_t samp_lock;
222 struct ath_dbg_bb_mac_samp bb_mac_samp[ATH_DBG_MAX_SAMPLES];
223 u8 sampidx;
224 u8 tsidx;
225 u8 rsidx;
184}; 226};
185 227
186int ath9k_init_debug(struct ath_hw *ah); 228int ath9k_init_debug(struct ath_hw *ah);
187 229
230void ath9k_debug_samp_bb_mac(struct ath_softc *sc);
188void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 231void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
189void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 232void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
190 struct ath_tx_status *ts, struct ath_txq *txq); 233 struct ath_tx_status *ts, struct ath_txq *txq);
@@ -197,6 +240,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
197 return 0; 240 return 0;
198} 241}
199 242
243static inline void ath9k_debug_samp_bb_mac(struct ath_softc *sc)
244{
245}
246
200static inline void ath_debug_stat_interrupt(struct ath_softc *sc, 247static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
201 enum ath9k_int status) 248 enum ath9k_int status)
202{ 249{
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0248024da56a..b9de1511add9 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1300,6 +1300,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1300 if (priv->op_flags & OP_INVALID) { 1300 if (priv->op_flags & OP_INVALID) {
1301 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY, 1301 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
1302 "Unable to configure filter on invalid state\n"); 1302 "Unable to configure filter on invalid state\n");
1303 mutex_unlock(&priv->mutex);
1303 return; 1304 return;
1304 } 1305 }
1305 ath9k_htc_ps_wakeup(priv); 1306 ath9k_htc_ps_wakeup(priv);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a0d1147844fb..3a16ba256ef9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -440,7 +440,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
440 if (AR_SREV_9100(ah)) 440 if (AR_SREV_9100(ah))
441 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; 441 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
442 ah->enable_32kHz_clock = DONT_USE_32KHZ; 442 ah->enable_32kHz_clock = DONT_USE_32KHZ;
443 ah->slottime = 20; 443 ah->slottime = ATH9K_SLOT_TIME_9;
444 ah->globaltxtimeout = (u32) -1; 444 ah->globaltxtimeout = (u32) -1;
445 ah->power_mode = ATH9K_PM_UNDEFINED; 445 ah->power_mode = ATH9K_PM_UNDEFINED;
446} 446}
@@ -997,8 +997,14 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
997 slottime = 21; 997 slottime = 21;
998 sifstime = 64; 998 sifstime = 64;
999 } else { 999 } else {
1000 eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/common->clockrate; 1000 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
1001 reg = REG_READ(ah, AR_USEC); 1001 eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
1002 reg = AR_USEC_ASYNC_FIFO;
1003 } else {
1004 eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/
1005 common->clockrate;
1006 reg = REG_READ(ah, AR_USEC);
1007 }
1002 rx_lat = MS(reg, AR_USEC_RX_LAT); 1008 rx_lat = MS(reg, AR_USEC_RX_LAT);
1003 tx_lat = MS(reg, AR_USEC_TX_LAT); 1009 tx_lat = MS(reg, AR_USEC_TX_LAT);
1004 1010
@@ -2754,6 +2760,7 @@ static struct {
2754 { AR_SREV_VERSION_9271, "9271" }, 2760 { AR_SREV_VERSION_9271, "9271" },
2755 { AR_SREV_VERSION_9300, "9300" }, 2761 { AR_SREV_VERSION_9300, "9300" },
2756 { AR_SREV_VERSION_9330, "9330" }, 2762 { AR_SREV_VERSION_9330, "9330" },
2763 { AR_SREV_VERSION_9340, "9340" },
2757 { AR_SREV_VERSION_9485, "9485" }, 2764 { AR_SREV_VERSION_9485, "9485" },
2758}; 2765};
2759 2766
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 3aa3fb191775..c8af86c795e5 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -623,7 +623,7 @@ struct ath_hw_ops {
623 struct ath_tx_status *ts); 623 struct ath_tx_status *ts);
624 void (*set11n_txdesc)(struct ath_hw *ah, void *ds, 624 void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
625 u32 pktLen, enum ath9k_pkt_type type, 625 u32 pktLen, enum ath9k_pkt_type type,
626 u32 txPower, u32 keyIx, 626 u32 txPower, u8 keyIx,
627 enum ath9k_key_type keyType, 627 enum ath9k_key_type keyType,
628 u32 flags); 628 u32 flags);
629 void (*set11n_ratescenario)(struct ath_hw *ah, void *ds, 629 void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d7761d1fc5ba..dd71a5f77516 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -572,6 +572,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
572 mutex_init(&sc->mutex); 572 mutex_init(&sc->mutex);
573#ifdef CONFIG_ATH9K_DEBUGFS 573#ifdef CONFIG_ATH9K_DEBUGFS
574 spin_lock_init(&sc->nodes_lock); 574 spin_lock_init(&sc->nodes_lock);
575 spin_lock_init(&sc->debug.samp_lock);
575 INIT_LIST_HEAD(&sc->nodes); 576 INIT_LIST_HEAD(&sc->nodes);
576#endif 577#endif
577 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); 578 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 153859ccc2a1..acb83bfd05a0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -146,6 +146,7 @@ struct ath_rx_status {
146 u8 rs_moreaggr; 146 u8 rs_moreaggr;
147 u8 rs_num_delims; 147 u8 rs_num_delims;
148 u8 rs_flags; 148 u8 rs_flags;
149 bool is_mybeacon;
149 u32 evm0; 150 u32 evm0;
150 u32 evm1; 151 u32 evm1;
151 u32 evm2; 152 u32 evm2;
@@ -194,7 +195,7 @@ struct ath_htc_rx_status {
194#define ATH9K_RX_DECRYPT_BUSY 0x40 195#define ATH9K_RX_DECRYPT_BUSY 0x40
195 196
196#define ATH9K_RXKEYIX_INVALID ((u8)-1) 197#define ATH9K_RXKEYIX_INVALID ((u8)-1)
197#define ATH9K_TXKEYIX_INVALID ((u32)-1) 198#define ATH9K_TXKEYIX_INVALID ((u8)-1)
198 199
199enum ath9k_phyerr { 200enum ath9k_phyerr {
200 ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */ 201 ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5ac4f3f2ad60..7b7864dfab75 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -546,6 +546,7 @@ set_timer:
546 * The interval must be the shortest necessary to satisfy ANI, 546 * The interval must be the shortest necessary to satisfy ANI,
547 * short calibration and long calibration. 547 * short calibration and long calibration.
548 */ 548 */
549 ath9k_debug_samp_bb_mac(sc);
549 cal_interval = ATH_LONG_CALINTERVAL; 550 cal_interval = ATH_LONG_CALINTERVAL;
550 if (sc->sc_ah->config.enable_ani) 551 if (sc->sc_ah->config.enable_ani)
551 cal_interval = min(cal_interval, 552 cal_interval = min(cal_interval,
@@ -978,6 +979,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
978 979
979 sc->hw_busy_count = 0; 980 sc->hw_busy_count = 0;
980 981
982 ath9k_debug_samp_bb_mac(sc);
981 /* Stop ANI */ 983 /* Stop ANI */
982 984
983 del_timer_sync(&common->ani.timer); 985 del_timer_sync(&common->ani.timer);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ad5f9bd2f0b9..9c7f905f3871 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -937,7 +937,7 @@ static int ath9k_process_rate(struct ath_common *common,
937 * No valid hardware bitrate found -- we should not get here 937 * No valid hardware bitrate found -- we should not get here
938 * because hardware has already validated this frame as OK. 938 * because hardware has already validated this frame as OK.
939 */ 939 */
940 ath_dbg(common, ATH_DBG_XMIT, 940 ath_dbg(common, ATH_DBG_ANY,
941 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 941 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
942 rx_stats->rs_rate); 942 rx_stats->rs_rate);
943 943
@@ -952,23 +952,12 @@ static void ath9k_process_rssi(struct ath_common *common,
952 struct ath_softc *sc = hw->priv; 952 struct ath_softc *sc = hw->priv;
953 struct ath_hw *ah = common->ah; 953 struct ath_hw *ah = common->ah;
954 int last_rssi; 954 int last_rssi;
955 __le16 fc;
956 955
957 if ((ah->opmode != NL80211_IFTYPE_STATION) && 956 if (!rx_stats->is_mybeacon ||
958 (ah->opmode != NL80211_IFTYPE_ADHOC)) 957 ((ah->opmode != NL80211_IFTYPE_STATION) &&
958 (ah->opmode != NL80211_IFTYPE_ADHOC)))
959 return; 959 return;
960 960
961 fc = hdr->frame_control;
962 if (!ieee80211_is_beacon(fc) ||
963 compare_ether_addr(hdr->addr3, common->curbssid)) {
964 /* TODO: This doesn't work well if you have stations
965 * associated to two different APs because curbssid
966 * is just the last AP that any of the stations associated
967 * with.
968 */
969 return;
970 }
971
972 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 961 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
973 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 962 ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
974 963
@@ -1838,6 +1827,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1838 1827
1839 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1828 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1840 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1829 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1830 if (ieee80211_is_beacon(hdr->frame_control) &&
1831 !compare_ether_addr(hdr->addr3, common->curbssid))
1832 rs.is_mybeacon = true;
1833 else
1834 rs.is_mybeacon = false;
1841 1835
1842 ath_debug_stat_rx(sc, &rs); 1836 ath_debug_stat_rx(sc, &rs);
1843 1837
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index a3b8bbc6c063..17a272f4d8d6 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -619,6 +619,7 @@
619#define AR_D_GBL_IFS_EIFS 0x10b0 619#define AR_D_GBL_IFS_EIFS 0x10b0
620#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF 620#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF
621#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000 621#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000
622#define AR_D_GBL_IFS_EIFS_ASYNC_FIFO 363
622 623
623#define AR_D_GBL_IFS_MISC 0x10f0 624#define AR_D_GBL_IFS_MISC 0x10f0
624#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007 625#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007
@@ -1503,6 +1504,7 @@ enum {
1503#define AR_USEC_TX_LAT_S 14 1504#define AR_USEC_TX_LAT_S 14
1504#define AR_USEC_RX_LAT 0x1F800000 1505#define AR_USEC_RX_LAT 0x1F800000
1505#define AR_USEC_RX_LAT_S 23 1506#define AR_USEC_RX_LAT_S 23
1507#define AR_USEC_ASYNC_FIFO 0x12E00074
1506 1508
1507#define AR_RESET_TSF 0x8020 1509#define AR_RESET_TSF 0x8020
1508#define AR_RESET_TSF_ONCE 0x01000000 1510#define AR_RESET_TSF_ONCE 0x01000000
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5e2982938ffc..68066c56e4e5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -48,8 +48,9 @@ static u16 bits_per_symbol[][2] = {
48#define IS_HT_RATE(_rate) ((_rate) & 0x80) 48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49 49
50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid, 51 struct ath_atx_tid *tid, struct sk_buff *skb);
52 struct list_head *bf_head); 52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
53static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
54 struct ath_txq *txq, struct list_head *bf_q, 55 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar); 56 struct ath_tx_status *ts, int txok, int sendbar);
@@ -61,6 +62,10 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 int txok, bool update_rc); 62 int txok, bool update_rc);
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 63static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno); 64 int seqno);
65static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
66 struct ath_txq *txq,
67 struct ath_atx_tid *tid,
68 struct sk_buff *skb);
64 69
65enum { 70enum {
66 MCS_HT20, 71 MCS_HT20,
@@ -129,7 +134,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
129 spin_lock_bh(&txq->axq_lock); 134 spin_lock_bh(&txq->axq_lock);
130 tid->paused = false; 135 tid->paused = false;
131 136
132 if (list_empty(&tid->buf_q)) 137 if (skb_queue_empty(&tid->buf_q))
133 goto unlock; 138 goto unlock;
134 139
135 ath_tx_queue_tid(txq, tid); 140 ath_tx_queue_tid(txq, tid);
@@ -149,6 +154,7 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 154static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
150{ 155{
151 struct ath_txq *txq = tid->ac->txq; 156 struct ath_txq *txq = tid->ac->txq;
157 struct sk_buff *skb;
152 struct ath_buf *bf; 158 struct ath_buf *bf;
153 struct list_head bf_head; 159 struct list_head bf_head;
154 struct ath_tx_status ts; 160 struct ath_tx_status ts;
@@ -159,17 +165,17 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
159 memset(&ts, 0, sizeof(ts)); 165 memset(&ts, 0, sizeof(ts));
160 spin_lock_bh(&txq->axq_lock); 166 spin_lock_bh(&txq->axq_lock);
161 167
162 while (!list_empty(&tid->buf_q)) { 168 while ((skb = __skb_dequeue(&tid->buf_q))) {
163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 169 fi = get_frame_info(skb);
164 list_move_tail(&bf->list, &bf_head); 170 bf = fi->bf;
165 171
166 spin_unlock_bh(&txq->axq_lock); 172 spin_unlock_bh(&txq->axq_lock);
167 fi = get_frame_info(bf->bf_mpdu); 173 if (bf && fi->retries) {
168 if (fi->retries) { 174 list_add_tail(&bf->list, &bf_head);
169 ath_tx_update_baw(sc, tid, fi->seqno); 175 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); 176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
171 } else { 177 } else {
172 ath_tx_send_normal(sc, txq, NULL, &bf_head); 178 ath_tx_send_normal(sc, txq, NULL, skb);
173 } 179 }
174 spin_lock_bh(&txq->axq_lock); 180 spin_lock_bh(&txq->axq_lock);
175 } 181 }
@@ -219,6 +225,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
219 struct ath_atx_tid *tid) 225 struct ath_atx_tid *tid)
220 226
221{ 227{
228 struct sk_buff *skb;
222 struct ath_buf *bf; 229 struct ath_buf *bf;
223 struct list_head bf_head; 230 struct list_head bf_head;
224 struct ath_tx_status ts; 231 struct ath_tx_status ts;
@@ -227,16 +234,21 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
227 memset(&ts, 0, sizeof(ts)); 234 memset(&ts, 0, sizeof(ts));
228 INIT_LIST_HEAD(&bf_head); 235 INIT_LIST_HEAD(&bf_head);
229 236
230 for (;;) { 237 while ((skb = __skb_dequeue(&tid->buf_q))) {
231 if (list_empty(&tid->buf_q)) 238 fi = get_frame_info(skb);
232 break; 239 bf = fi->bf;
240
241 if (!bf) {
242 spin_unlock(&txq->axq_lock);
243 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
244 spin_lock(&txq->axq_lock);
245 continue;
246 }
233 247
234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 248 list_add_tail(&bf->list, &bf_head);
235 list_move_tail(&bf->list, &bf_head);
236 249
237 fi = get_frame_info(bf->bf_mpdu);
238 if (fi->retries) 250 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno); 251 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
240 252
241 spin_unlock(&txq->axq_lock); 253 spin_unlock(&txq->axq_lock);
242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 254 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@@ -326,7 +338,7 @@ static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
326 338
327 while (bf) { 339 while (bf) {
328 fi = get_frame_info(bf->bf_mpdu); 340 fi = get_frame_info(bf->bf_mpdu);
329 ba_index = ATH_BA_INDEX(seq_st, fi->seqno); 341 ba_index = ATH_BA_INDEX(seq_st, bf->bf_state.seqno);
330 342
331 (*nframes)++; 343 (*nframes)++;
332 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) 344 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
@@ -349,7 +361,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
349 struct ieee80211_tx_info *tx_info; 361 struct ieee80211_tx_info *tx_info;
350 struct ath_atx_tid *tid = NULL; 362 struct ath_atx_tid *tid = NULL;
351 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 363 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
352 struct list_head bf_head, bf_pending; 364 struct list_head bf_head;
365 struct sk_buff_head bf_pending;
353 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 366 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
354 u32 ba[WME_BA_BMP_SIZE >> 5]; 367 u32 ba[WME_BA_BMP_SIZE >> 5];
355 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 368 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
@@ -422,11 +435,12 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
422 } 435 }
423 } 436 }
424 437
425 INIT_LIST_HEAD(&bf_pending); 438 __skb_queue_head_init(&bf_pending);
426 INIT_LIST_HEAD(&bf_head);
427 439
428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad); 440 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
429 while (bf) { 441 while (bf) {
442 u16 seqno = bf->bf_state.seqno;
443
430 txfail = txpending = sendbar = 0; 444 txfail = txpending = sendbar = 0;
431 bf_next = bf->bf_next; 445 bf_next = bf->bf_next;
432 446
@@ -434,7 +448,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
434 tx_info = IEEE80211_SKB_CB(skb); 448 tx_info = IEEE80211_SKB_CB(skb);
435 fi = get_frame_info(skb); 449 fi = get_frame_info(skb);
436 450
437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) { 451 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, seqno))) {
438 /* transmit completion, subframe is 452 /* transmit completion, subframe is
439 * acked by block ack */ 453 * acked by block ack */
440 acked_cnt++; 454 acked_cnt++;
@@ -467,10 +481,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
467 * Make sure the last desc is reclaimed if it 481 * Make sure the last desc is reclaimed if it
468 * not a holding desc. 482 * not a holding desc.
469 */ 483 */
470 if (!bf_last->bf_stale || bf_next != NULL) 484 INIT_LIST_HEAD(&bf_head);
485 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
486 bf_next != NULL || !bf_last->bf_stale)
471 list_move_tail(&bf->list, &bf_head); 487 list_move_tail(&bf->list, &bf_head);
472 else
473 INIT_LIST_HEAD(&bf_head);
474 488
475 if (!txpending || (tid->state & AGGR_CLEANUP)) { 489 if (!txpending || (tid->state & AGGR_CLEANUP)) {
476 /* 490 /*
@@ -478,7 +492,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
478 * block-ack window 492 * block-ack window
479 */ 493 */
480 spin_lock_bh(&txq->axq_lock); 494 spin_lock_bh(&txq->axq_lock);
481 ath_tx_update_baw(sc, tid, fi->seqno); 495 ath_tx_update_baw(sc, tid, seqno);
482 spin_unlock_bh(&txq->axq_lock); 496 spin_unlock_bh(&txq->axq_lock);
483 497
484 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 498 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
@@ -506,7 +520,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
506 */ 520 */
507 if (!tbf) { 521 if (!tbf) {
508 spin_lock_bh(&txq->axq_lock); 522 spin_lock_bh(&txq->axq_lock);
509 ath_tx_update_baw(sc, tid, fi->seqno); 523 ath_tx_update_baw(sc, tid, seqno);
510 spin_unlock_bh(&txq->axq_lock); 524 spin_unlock_bh(&txq->axq_lock);
511 525
512 bf->bf_state.bf_type |= 526 bf->bf_state.bf_type |=
@@ -521,7 +535,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
521 535
522 ath9k_hw_cleartxdesc(sc->sc_ah, 536 ath9k_hw_cleartxdesc(sc->sc_ah,
523 tbf->bf_desc); 537 tbf->bf_desc);
524 list_add_tail(&tbf->list, &bf_head); 538 fi->bf = tbf;
525 } else { 539 } else {
526 /* 540 /*
527 * Clear descriptor status words for 541 * Clear descriptor status words for
@@ -536,21 +550,21 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
536 * Put this buffer to the temporary pending 550 * Put this buffer to the temporary pending
537 * queue to retain ordering 551 * queue to retain ordering
538 */ 552 */
539 list_splice_tail_init(&bf_head, &bf_pending); 553 __skb_queue_tail(&bf_pending, skb);
540 } 554 }
541 555
542 bf = bf_next; 556 bf = bf_next;
543 } 557 }
544 558
545 /* prepend un-acked frames to the beginning of the pending frame queue */ 559 /* prepend un-acked frames to the beginning of the pending frame queue */
546 if (!list_empty(&bf_pending)) { 560 if (!skb_queue_empty(&bf_pending)) {
547 if (an->sleeping) 561 if (an->sleeping)
548 ieee80211_sta_set_tim(sta); 562 ieee80211_sta_set_tim(sta);
549 563
550 spin_lock_bh(&txq->axq_lock); 564 spin_lock_bh(&txq->axq_lock);
551 if (clear_filter) 565 if (clear_filter)
552 tid->ac->clear_ps_filter = true; 566 tid->ac->clear_ps_filter = true;
553 list_splice(&bf_pending, &tid->buf_q); 567 skb_queue_splice(&bf_pending, &tid->buf_q);
554 if (!an->sleeping) 568 if (!an->sleeping)
555 ath_tx_queue_tid(txq, tid); 569 ath_tx_queue_tid(txq, tid);
556 spin_unlock_bh(&txq->axq_lock); 570 spin_unlock_bh(&txq->axq_lock);
@@ -582,7 +596,10 @@ static bool ath_lookup_legacy(struct ath_buf *bf)
582 tx_info = IEEE80211_SKB_CB(skb); 596 tx_info = IEEE80211_SKB_CB(skb);
583 rates = tx_info->control.rates; 597 rates = tx_info->control.rates;
584 598
585 for (i = 3; i >= 0; i--) { 599 for (i = 0; i < 4; i++) {
600 if (!rates[i].count || rates[i].idx < 0)
601 break;
602
586 if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) 603 if (!(rates[i].flags & IEEE80211_TX_RC_MCS))
587 return true; 604 return true;
588 } 605 }
@@ -740,22 +757,33 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
740 int *aggr_len) 757 int *aggr_len)
741{ 758{
742#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 759#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
743 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 760 struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
744 int rl = 0, nframes = 0, ndelim, prev_al = 0; 761 int rl = 0, nframes = 0, ndelim, prev_al = 0;
745 u16 aggr_limit = 0, al = 0, bpad = 0, 762 u16 aggr_limit = 0, al = 0, bpad = 0,
746 al_delta, h_baw = tid->baw_size / 2; 763 al_delta, h_baw = tid->baw_size / 2;
747 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 764 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
748 struct ieee80211_tx_info *tx_info; 765 struct ieee80211_tx_info *tx_info;
749 struct ath_frame_info *fi; 766 struct ath_frame_info *fi;
750 767 struct sk_buff *skb;
751 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 768 u16 seqno;
752 769
753 do { 770 do {
754 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 771 skb = skb_peek(&tid->buf_q);
755 fi = get_frame_info(bf->bf_mpdu); 772 fi = get_frame_info(skb);
773 bf = fi->bf;
774 if (!fi->bf)
775 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
776
777 if (!bf)
778 continue;
779
780 bf->bf_state.bf_type |= BUF_AMPDU;
781 seqno = bf->bf_state.seqno;
782 if (!bf_first)
783 bf_first = bf;
756 784
757 /* do not step over block-ack window */ 785 /* do not step over block-ack window */
758 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) { 786 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
759 status = ATH_AGGR_BAW_CLOSED; 787 status = ATH_AGGR_BAW_CLOSED;
760 break; 788 break;
761 } 789 }
@@ -803,9 +831,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
803 831
804 /* link buffers of this frame to the aggregate */ 832 /* link buffers of this frame to the aggregate */
805 if (!fi->retries) 833 if (!fi->retries)
806 ath_tx_addto_baw(sc, tid, fi->seqno); 834 ath_tx_addto_baw(sc, tid, seqno);
807 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 835 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
808 list_move_tail(&bf->list, bf_q); 836
837 __skb_unlink(skb, &tid->buf_q);
838 list_add_tail(&bf->list, bf_q);
809 if (bf_prev) { 839 if (bf_prev) {
810 bf_prev->bf_next = bf; 840 bf_prev->bf_next = bf;
811 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc, 841 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
@@ -813,7 +843,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
813 } 843 }
814 bf_prev = bf; 844 bf_prev = bf;
815 845
816 } while (!list_empty(&tid->buf_q)); 846 } while (!skb_queue_empty(&tid->buf_q));
817 847
818 *aggr_len = al; 848 *aggr_len = al;
819 849
@@ -831,7 +861,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
831 int aggr_len; 861 int aggr_len;
832 862
833 do { 863 do {
834 if (list_empty(&tid->buf_q)) 864 if (skb_queue_empty(&tid->buf_q))
835 return; 865 return;
836 866
837 INIT_LIST_HEAD(&bf_q); 867 INIT_LIST_HEAD(&bf_q);
@@ -952,7 +982,7 @@ bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
952 982
953 spin_lock_bh(&txq->axq_lock); 983 spin_lock_bh(&txq->axq_lock);
954 984
955 if (!list_empty(&tid->buf_q)) 985 if (!skb_queue_empty(&tid->buf_q))
956 buffered = true; 986 buffered = true;
957 987
958 tid->sched = false; 988 tid->sched = false;
@@ -985,7 +1015,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
985 spin_lock_bh(&txq->axq_lock); 1015 spin_lock_bh(&txq->axq_lock);
986 ac->clear_ps_filter = true; 1016 ac->clear_ps_filter = true;
987 1017
988 if (!list_empty(&tid->buf_q) && !tid->paused) { 1018 if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
989 ath_tx_queue_tid(txq, tid); 1019 ath_tx_queue_tid(txq, tid);
990 ath_txq_schedule(sc, txq); 1020 ath_txq_schedule(sc, txq);
991 } 1021 }
@@ -1329,7 +1359,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1329 * add tid to round-robin queue if more frames 1359 * add tid to round-robin queue if more frames
1330 * are pending for the tid 1360 * are pending for the tid
1331 */ 1361 */
1332 if (!list_empty(&tid->buf_q)) 1362 if (!skb_queue_empty(&tid->buf_q))
1333 ath_tx_queue_tid(txq, tid); 1363 ath_tx_queue_tid(txq, tid);
1334 1364
1335 if (tid == last_tid || 1365 if (tid == last_tid ||
@@ -1421,12 +1451,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1421} 1451}
1422 1452
1423static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1453static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1424 struct ath_buf *bf, struct ath_tx_control *txctl) 1454 struct sk_buff *skb, struct ath_tx_control *txctl)
1425{ 1455{
1426 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 1456 struct ath_frame_info *fi = get_frame_info(skb);
1427 struct list_head bf_head; 1457 struct list_head bf_head;
1428 1458 struct ath_buf *bf;
1429 bf->bf_state.bf_type |= BUF_AMPDU;
1430 1459
1431 /* 1460 /*
1432 * Do not queue to h/w when any of the following conditions is true: 1461 * Do not queue to h/w when any of the following conditions is true:
@@ -1435,26 +1464,30 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1435 * - seqno is not within block-ack window 1464 * - seqno is not within block-ack window
1436 * - h/w queue depth exceeds low water mark 1465 * - h/w queue depth exceeds low water mark
1437 */ 1466 */
1438 if (!list_empty(&tid->buf_q) || tid->paused || 1467 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1439 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) || 1468 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1440 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1469 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1441 /* 1470 /*
1442 * Add this frame to software queue for scheduling later 1471 * Add this frame to software queue for scheduling later
1443 * for aggregation. 1472 * for aggregation.
1444 */ 1473 */
1445 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1474 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1446 list_add_tail(&bf->list, &tid->buf_q); 1475 __skb_queue_tail(&tid->buf_q, skb);
1447 if (!txctl->an || !txctl->an->sleeping) 1476 if (!txctl->an || !txctl->an->sleeping)
1448 ath_tx_queue_tid(txctl->txq, tid); 1477 ath_tx_queue_tid(txctl->txq, tid);
1449 return; 1478 return;
1450 } 1479 }
1451 1480
1481 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1482 if (!bf)
1483 return;
1484
1485 bf->bf_state.bf_type |= BUF_AMPDU;
1452 INIT_LIST_HEAD(&bf_head); 1486 INIT_LIST_HEAD(&bf_head);
1453 list_add(&bf->list, &bf_head); 1487 list_add(&bf->list, &bf_head);
1454 1488
1455 /* Add sub-frame to BAW */ 1489 /* Add sub-frame to BAW */
1456 if (!fi->retries) 1490 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1457 ath_tx_addto_baw(sc, tid, fi->seqno);
1458 1491
1459 /* Queue to h/w without aggregation */ 1492 /* Queue to h/w without aggregation */
1460 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1493 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
@@ -1464,13 +1497,21 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1464} 1497}
1465 1498
1466static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1499static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1467 struct ath_atx_tid *tid, 1500 struct ath_atx_tid *tid, struct sk_buff *skb)
1468 struct list_head *bf_head)
1469{ 1501{
1470 struct ath_frame_info *fi; 1502 struct ath_frame_info *fi = get_frame_info(skb);
1503 struct list_head bf_head;
1471 struct ath_buf *bf; 1504 struct ath_buf *bf;
1472 1505
1473 bf = list_first_entry(bf_head, struct ath_buf, list); 1506 bf = fi->bf;
1507 if (!bf)
1508 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1509
1510 if (!bf)
1511 return;
1512
1513 INIT_LIST_HEAD(&bf_head);
1514 list_add_tail(&bf->list, &bf_head);
1474 bf->bf_state.bf_type &= ~BUF_AMPDU; 1515 bf->bf_state.bf_type &= ~BUF_AMPDU;
1475 1516
1476 /* update starting sequence number for subsequent ADDBA request */ 1517 /* update starting sequence number for subsequent ADDBA request */
@@ -1478,9 +1519,8 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1478 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1519 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1479 1520
1480 bf->bf_lastbf = bf; 1521 bf->bf_lastbf = bf;
1481 fi = get_frame_info(bf->bf_mpdu);
1482 ath_buf_set_rate(sc, bf, fi->framelen); 1522 ath_buf_set_rate(sc, bf, fi->framelen);
1483 ath_tx_txqaddbuf(sc, txq, bf_head, false); 1523 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1484 TX_STAT_INC(txq->axq_qnum, queued); 1524 TX_STAT_INC(txq->axq_qnum, queued);
1485} 1525}
1486 1526
@@ -1510,39 +1550,19 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1510static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb, 1550static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1511 int framelen) 1551 int framelen)
1512{ 1552{
1513 struct ath_softc *sc = hw->priv;
1514 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1553 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1515 struct ieee80211_sta *sta = tx_info->control.sta; 1554 struct ieee80211_sta *sta = tx_info->control.sta;
1516 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; 1555 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1517 struct ieee80211_hdr *hdr; 1556 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1518 struct ath_frame_info *fi = get_frame_info(skb); 1557 struct ath_frame_info *fi = get_frame_info(skb);
1519 struct ath_node *an = NULL; 1558 struct ath_node *an = NULL;
1520 struct ath_atx_tid *tid;
1521 enum ath9k_key_type keytype; 1559 enum ath9k_key_type keytype;
1522 u16 seqno = 0;
1523 u8 tidno;
1524 1560
1525 keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1561 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1526 1562
1527 if (sta) 1563 if (sta)
1528 an = (struct ath_node *) sta->drv_priv; 1564 an = (struct ath_node *) sta->drv_priv;
1529 1565
1530 hdr = (struct ieee80211_hdr *)skb->data;
1531 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
1532 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1533
1534 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1535
1536 /*
1537 * Override seqno set by upper layer with the one
1538 * in tx aggregation state.
1539 */
1540 tid = ATH_AN_2_TID(an, tidno);
1541 seqno = tid->seq_next;
1542 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1543 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1544 }
1545
1546 memset(fi, 0, sizeof(*fi)); 1566 memset(fi, 0, sizeof(*fi));
1547 if (hw_key) 1567 if (hw_key)
1548 fi->keyix = hw_key->hw_key_idx; 1568 fi->keyix = hw_key->hw_key_idx;
@@ -1552,7 +1572,6 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1552 fi->keyix = ATH9K_TXKEYIX_INVALID; 1572 fi->keyix = ATH9K_TXKEYIX_INVALID;
1553 fi->keytype = keytype; 1573 fi->keytype = keytype;
1554 fi->framelen = framelen; 1574 fi->framelen = framelen;
1555 fi->seqno = seqno;
1556} 1575}
1557 1576
1558static int setup_tx_flags(struct sk_buff *skb) 1577static int setup_tx_flags(struct sk_buff *skb)
@@ -1724,26 +1743,39 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1724 1743
1725} 1744}
1726 1745
1727static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw, 1746/*
1747 * Assign a descriptor (and sequence number if necessary,
1748 * and map buffer for DMA. Frees skb on error
1749 */
1750static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1728 struct ath_txq *txq, 1751 struct ath_txq *txq,
1752 struct ath_atx_tid *tid,
1729 struct sk_buff *skb) 1753 struct sk_buff *skb)
1730{ 1754{
1731 struct ath_softc *sc = hw->priv;
1732 struct ath_hw *ah = sc->sc_ah; 1755 struct ath_hw *ah = sc->sc_ah;
1733 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1756 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1734 struct ath_frame_info *fi = get_frame_info(skb); 1757 struct ath_frame_info *fi = get_frame_info(skb);
1758 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1735 struct ath_buf *bf; 1759 struct ath_buf *bf;
1736 struct ath_desc *ds; 1760 struct ath_desc *ds;
1737 int frm_type; 1761 int frm_type;
1762 u16 seqno;
1738 1763
1739 bf = ath_tx_get_buffer(sc); 1764 bf = ath_tx_get_buffer(sc);
1740 if (!bf) { 1765 if (!bf) {
1741 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1766 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
1742 return NULL; 1767 goto error;
1743 } 1768 }
1744 1769
1745 ATH_TXBUF_RESET(bf); 1770 ATH_TXBUF_RESET(bf);
1746 1771
1772 if (tid) {
1773 seqno = tid->seq_next;
1774 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1775 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1776 bf->bf_state.seqno = seqno;
1777 }
1778
1747 bf->bf_flags = setup_tx_flags(skb); 1779 bf->bf_flags = setup_tx_flags(skb);
1748 bf->bf_mpdu = skb; 1780 bf->bf_mpdu = skb;
1749 1781
@@ -1755,7 +1787,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1755 ath_err(ath9k_hw_common(sc->sc_ah), 1787 ath_err(ath9k_hw_common(sc->sc_ah),
1756 "dma_mapping_error() on TX\n"); 1788 "dma_mapping_error() on TX\n");
1757 ath_tx_return_buffer(sc, bf); 1789 ath_tx_return_buffer(sc, bf);
1758 return NULL; 1790 goto error;
1759 } 1791 }
1760 1792
1761 frm_type = get_hw_packet_type(skb); 1793 frm_type = get_hw_packet_type(skb);
@@ -1774,19 +1806,23 @@ static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1774 bf->bf_buf_addr, 1806 bf->bf_buf_addr,
1775 txq->axq_qnum); 1807 txq->axq_qnum);
1776 1808
1809 fi->bf = bf;
1777 1810
1778 return bf; 1811 return bf;
1812
1813error:
1814 dev_kfree_skb_any(skb);
1815 return NULL;
1779} 1816}
1780 1817
1781/* FIXME: tx power */ 1818/* FIXME: tx power */
1782static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, 1819static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1783 struct ath_tx_control *txctl) 1820 struct ath_tx_control *txctl)
1784{ 1821{
1785 struct sk_buff *skb = bf->bf_mpdu;
1786 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1822 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1787 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1788 struct list_head bf_head;
1789 struct ath_atx_tid *tid = NULL; 1824 struct ath_atx_tid *tid = NULL;
1825 struct ath_buf *bf;
1790 u8 tidno; 1826 u8 tidno;
1791 1827
1792 spin_lock_bh(&txctl->txq->axq_lock); 1828 spin_lock_bh(&txctl->txq->axq_lock);
@@ -1804,10 +1840,11 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1804 * Try aggregation if it's a unicast data frame 1840 * Try aggregation if it's a unicast data frame
1805 * and the destination is HT capable. 1841 * and the destination is HT capable.
1806 */ 1842 */
1807 ath_tx_send_ampdu(sc, tid, bf, txctl); 1843 ath_tx_send_ampdu(sc, tid, skb, txctl);
1808 } else { 1844 } else {
1809 INIT_LIST_HEAD(&bf_head); 1845 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1810 list_add_tail(&bf->list, &bf_head); 1846 if (!bf)
1847 goto out;
1811 1848
1812 bf->bf_state.bfs_paprd = txctl->paprd; 1849 bf->bf_state.bfs_paprd = txctl->paprd;
1813 1850
@@ -1821,9 +1858,10 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1821 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1858 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1822 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true); 1859 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1823 1860
1824 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1861 ath_tx_send_normal(sc, txctl->txq, tid, skb);
1825 } 1862 }
1826 1863
1864out:
1827 spin_unlock_bh(&txctl->txq->axq_lock); 1865 spin_unlock_bh(&txctl->txq->axq_lock);
1828} 1866}
1829 1867
@@ -1837,7 +1875,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1837 struct ieee80211_vif *vif = info->control.vif; 1875 struct ieee80211_vif *vif = info->control.vif;
1838 struct ath_softc *sc = hw->priv; 1876 struct ath_softc *sc = hw->priv;
1839 struct ath_txq *txq = txctl->txq; 1877 struct ath_txq *txq = txctl->txq;
1840 struct ath_buf *bf;
1841 int padpos, padsize; 1878 int padpos, padsize;
1842 int frmlen = skb->len + FCS_LEN; 1879 int frmlen = skb->len + FCS_LEN;
1843 int q; 1880 int q;
@@ -1884,10 +1921,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1884 * info are no longer valid (overwritten by the ath_frame_info data. 1921 * info are no longer valid (overwritten by the ath_frame_info data.
1885 */ 1922 */
1886 1923
1887 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
1888 if (unlikely(!bf))
1889 return -ENOMEM;
1890
1891 q = skb_get_queue_mapping(skb); 1924 q = skb_get_queue_mapping(skb);
1892 spin_lock_bh(&txq->axq_lock); 1925 spin_lock_bh(&txq->axq_lock);
1893 if (txq == sc->tx.txq_map[q] && 1926 if (txq == sc->tx.txq_map[q] &&
@@ -1897,8 +1930,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1897 } 1930 }
1898 spin_unlock_bh(&txq->axq_lock); 1931 spin_unlock_bh(&txq->axq_lock);
1899 1932
1900 ath_tx_start_dma(sc, bf, txctl); 1933 ath_tx_start_dma(sc, skb, txctl);
1901
1902 return 0; 1934 return 0;
1903} 1935}
1904 1936
@@ -2391,7 +2423,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2391 tid->sched = false; 2423 tid->sched = false;
2392 tid->paused = false; 2424 tid->paused = false;
2393 tid->state &= ~AGGR_CLEANUP; 2425 tid->state &= ~AGGR_CLEANUP;
2394 INIT_LIST_HEAD(&tid->buf_q); 2426 __skb_queue_head_init(&tid->buf_q);
2395 acno = TID_TO_WME_AC(tidno); 2427 acno = TID_TO_WME_AC(tidno);
2396 tid->ac = &an->ac[acno]; 2428 tid->ac = &an->ac[acno];
2397 tid->state &= ~AGGR_ADDBA_COMPLETE; 2429 tid->state &= ~AGGR_ADDBA_COMPLETE;
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index c325202fdc5f..d9218fe02036 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -57,22 +57,18 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
57} 57}
58EXPORT_SYMBOL(ath_rxbuf_alloc); 58EXPORT_SYMBOL(ath_rxbuf_alloc);
59 59
60int ath_printk(const char *level, struct ath_common *common, 60void ath_printk(const char *level, const char *fmt, ...)
61 const char *fmt, ...)
62{ 61{
63 struct va_format vaf; 62 struct va_format vaf;
64 va_list args; 63 va_list args;
65 int rtn;
66 64
67 va_start(args, fmt); 65 va_start(args, fmt);
68 66
69 vaf.fmt = fmt; 67 vaf.fmt = fmt;
70 vaf.va = &args; 68 vaf.va = &args;
71 69
72 rtn = printk("%sath: %pV", level, &vaf); 70 printk("%sath: %pV", level, &vaf);
73 71
74 va_end(args); 72 va_end(args);
75
76 return rtn;
77} 73}
78EXPORT_SYMBOL(ath_printk); 74EXPORT_SYMBOL(ath_printk);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index df2b7c0856ed..b97a40ed5fff 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -124,12 +124,12 @@ config B43_PHY_LP
124 (802.11a support is optional, and currently disabled). 124 (802.11a support is optional, and currently disabled).
125 125
126config B43_PHY_HT 126config B43_PHY_HT
127 bool "Support for HT-PHY devices (BROKEN)" 127 bool "Support for HT-PHY (high throughput) devices (EXPERIMENTAL)"
128 depends on B43 && BROKEN 128 depends on B43 && EXPERIMENTAL
129 ---help--- 129 ---help---
130 Support for the HT-PHY. 130 Support for the HT-PHY.
131 131
132 Say N, this is BROKEN and crashes driver. 132 Enables support for BCM4331 and possibly other chipsets with that PHY.
133 133
134config B43_PHY_LCN 134config B43_PHY_LCN
135 bool "Support for LCN-PHY devices (BROKEN)" 135 bool "Support for LCN-PHY devices (BROKEN)"
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index c5d890e74a1e..5e45604f0f5d 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -419,33 +419,34 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
419 gfp_t flags = GFP_KERNEL; 419 gfp_t flags = GFP_KERNEL;
420 420
421 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 421 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
422 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing 422 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
423 * has shown that 4K is sufficient for the latter as long as the buffer 423 * In practice we could use smaller buffers for the latter, but the
424 * does not cross an 8K boundary. 424 * alignment is really important because of the hardware bug. If bit
425 * 425 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
426 * For unknown reasons - possibly a hardware error - the BCM4311 rev 426 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
427 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, 427 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
428 * which accounts for the GFP_DMA flag below. 428 * more than 256 slots for ring.
429 *
430 * The flags here must match the flags in free_ringmemory below!
431 */ 429 */
432 if (ring->type == B43_DMA_64BIT) 430 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
433 flags |= GFP_DMA; 431 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
432
434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 433 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
435 B43_DMA_RINGMEMSIZE, 434 ring_mem_size, &(ring->dmabase),
436 &(ring->dmabase), flags); 435 flags);
437 if (!ring->descbase) { 436 if (!ring->descbase) {
438 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 437 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
439 return -ENOMEM; 438 return -ENOMEM;
440 } 439 }
441 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); 440 memset(ring->descbase, 0, ring_mem_size);
442 441
443 return 0; 442 return 0;
444} 443}
445 444
446static void free_ringmemory(struct b43_dmaring *ring) 445static void free_ringmemory(struct b43_dmaring *ring)
447{ 446{
448 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, 447 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
448 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
449 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
449 ring->descbase, ring->dmabase); 450 ring->descbase, ring->dmabase);
450} 451}
451 452
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 7e20b04fa51a..315b96ed1d90 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -161,7 +161,8 @@ struct b43_dmadesc_generic {
161} __packed; 161} __packed;
162 162
163/* Misc DMA constants */ 163/* Misc DMA constants */
164#define B43_DMA_RINGMEMSIZE PAGE_SIZE 164#define B43_DMA32_RINGMEMSIZE 4096
165#define B43_DMA64_RINGMEMSIZE 8192
165/* Offset of frame with actual data */ 166/* Offset of frame with actual data */
166#define B43_DMA0_RX_FW598_FO 38 167#define B43_DMA0_RX_FW598_FO 38
167#define B43_DMA0_RX_FW351_FO 30 168#define B43_DMA0_RX_FW351_FO 30
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index d2b1d1fe202b..172294170df8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4131,10 +4131,13 @@ out_unlock:
4131 * because the core might be gone away while we unlocked the mutex. */ 4131 * because the core might be gone away while we unlocked the mutex. */
4132static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) 4132static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
4133{ 4133{
4134 struct b43_wl *wl = dev->wl; 4134 struct b43_wl *wl;
4135 struct b43_wldev *orig_dev; 4135 struct b43_wldev *orig_dev;
4136 u32 mask; 4136 u32 mask;
4137 4137
4138 if (!dev)
4139 return NULL;
4140 wl = dev->wl;
4138redo: 4141redo:
4139 if (!dev || b43_status(dev) < B43_STAT_STARTED) 4142 if (!dev || b43_status(dev) < B43_STAT_STARTED)
4140 return dev; 4143 return dev;
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ccdbed567171..4766c3a1a2f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -43,6 +43,8 @@
43#include "iwl-agn.h" 43#include "iwl-agn.h"
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-agn-hw.h" 45#include "iwl-agn-hw.h"
46#include "iwl-shared.h"
47#include "iwl-pci.h"
46 48
47/* Highest firmware API version supported */ 49/* Highest firmware API version supported */
48#define IWL1000_UCODE_API_MAX 6 50#define IWL1000_UCODE_API_MAX 6
@@ -76,21 +78,21 @@
76static void iwl1000_set_ct_threshold(struct iwl_priv *priv) 78static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
77{ 79{
78 /* want Celsius */ 80 /* want Celsius */
79 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; 81 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
80 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; 82 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
81} 83}
82 84
83/* NIC configuration for 1000 series */ 85/* NIC configuration for 1000 series */
84static void iwl1000_nic_config(struct iwl_priv *priv) 86static void iwl1000_nic_config(struct iwl_priv *priv)
85{ 87{
86 /* set CSR_HW_CONFIG_REG for uCode use */ 88 /* set CSR_HW_CONFIG_REG for uCode use */
87 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 89 iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
88 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 90 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
89 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 91 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
90 92
91 /* Setting digital SVR for 1000 card to 1.32V */ 93 /* Setting digital SVR for 1000 card to 1.32V */
92 /* locking is acquired in iwl_set_bits_mask_prph() function */ 94 /* locking is acquired in iwl_set_bits_mask_prph() function */
93 iwl_set_bits_mask_prph(priv, APMG_DIGITAL_SVR_REG, 95 iwl_set_bits_mask_prph(bus(priv), APMG_DIGITAL_SVR_REG,
94 APMG_SVR_DIGITAL_VOLTAGE_1_32, 96 APMG_SVR_DIGITAL_VOLTAGE_1_32,
95 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK); 97 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
96} 98}
@@ -127,43 +129,39 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
127 priv->cfg->base_params->num_of_queues = 129 priv->cfg->base_params->num_of_queues =
128 iwlagn_mod_params.num_of_queues; 130 iwlagn_mod_params.num_of_queues;
129 131
130 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 132 hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
131 priv->hw_params.scd_bc_tbls_size = 133 hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
132 priv->cfg->base_params->num_of_queues *
133 sizeof(struct iwlagn_scd_bc_tbl);
134 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
135 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
136 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; 134 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
137 135
138 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 136 hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
139 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 137 hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
140 138
141 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ); 139 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
142 140
143 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 141 hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
144 if (priv->cfg->rx_with_siso_diversity) 142 if (priv->cfg->rx_with_siso_diversity)
145 priv->hw_params.rx_chains_num = 1; 143 hw_params(priv).rx_chains_num = 1;
146 else 144 else
147 priv->hw_params.rx_chains_num = 145 hw_params(priv).rx_chains_num =
148 num_of_ant(priv->cfg->valid_rx_ant); 146 num_of_ant(priv->cfg->valid_rx_ant);
149 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 147 hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
150 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 148 hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
151 149
152 iwl1000_set_ct_threshold(priv); 150 iwl1000_set_ct_threshold(priv);
153 151
154 /* Set initial sensitivity parameters */ 152 /* Set initial sensitivity parameters */
155 /* Set initial calibration set */ 153 /* Set initial calibration set */
156 priv->hw_params.sens = &iwl1000_sensitivity; 154 hw_params(priv).sens = &iwl1000_sensitivity;
157 priv->hw_params.calib_init_cfg = 155 hw_params(priv).calib_init_cfg =
158 BIT(IWL_CALIB_XTAL) | 156 BIT(IWL_CALIB_XTAL) |
159 BIT(IWL_CALIB_LO) | 157 BIT(IWL_CALIB_LO) |
160 BIT(IWL_CALIB_TX_IQ) | 158 BIT(IWL_CALIB_TX_IQ) |
161 BIT(IWL_CALIB_TX_IQ_PERD) | 159 BIT(IWL_CALIB_TX_IQ_PERD) |
162 BIT(IWL_CALIB_BASE_BAND); 160 BIT(IWL_CALIB_BASE_BAND);
163 if (priv->cfg->need_dc_calib) 161 if (priv->cfg->need_dc_calib)
164 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC); 162 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
165 163
166 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 164 hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
167 165
168 return 0; 166 return 0;
169} 167}
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 54d931d614fb..764d3104e128 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -44,6 +44,8 @@
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-agn-hw.h" 45#include "iwl-agn-hw.h"
46#include "iwl-6000-hw.h" 46#include "iwl-6000-hw.h"
47#include "iwl-shared.h"
48#include "iwl-pci.h"
47 49
48/* Highest firmware API version supported */ 50/* Highest firmware API version supported */
49#define IWL2030_UCODE_API_MAX 6 51#define IWL2030_UCODE_API_MAX 6
@@ -78,8 +80,8 @@
78static void iwl2000_set_ct_threshold(struct iwl_priv *priv) 80static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
79{ 81{
80 /* want Celsius */ 82 /* want Celsius */
81 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; 83 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
82 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; 84 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
83} 85}
84 86
85/* NIC configuration for 2000 series */ 87/* NIC configuration for 2000 series */
@@ -88,7 +90,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
88 iwl_rf_config(priv); 90 iwl_rf_config(priv);
89 91
90 if (priv->cfg->iq_invert) 92 if (priv->cfg->iq_invert)
91 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 93 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
92 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER); 94 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
93} 95}
94 96
@@ -124,44 +126,40 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
124 priv->cfg->base_params->num_of_queues = 126 priv->cfg->base_params->num_of_queues =
125 iwlagn_mod_params.num_of_queues; 127 iwlagn_mod_params.num_of_queues;
126 128
127 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 129 hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
128 priv->hw_params.scd_bc_tbls_size = 130 hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
129 priv->cfg->base_params->num_of_queues *
130 sizeof(struct iwlagn_scd_bc_tbl);
131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
132 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
133 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; 131 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
134 132
135 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 133 hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
136 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 134 hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
137 135
138 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ); 136 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
139 137
140 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 138 hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
141 if (priv->cfg->rx_with_siso_diversity) 139 if (priv->cfg->rx_with_siso_diversity)
142 priv->hw_params.rx_chains_num = 1; 140 hw_params(priv).rx_chains_num = 1;
143 else 141 else
144 priv->hw_params.rx_chains_num = 142 hw_params(priv).rx_chains_num =
145 num_of_ant(priv->cfg->valid_rx_ant); 143 num_of_ant(priv->cfg->valid_rx_ant);
146 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 144 hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
147 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 145 hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
148 146
149 iwl2000_set_ct_threshold(priv); 147 iwl2000_set_ct_threshold(priv);
150 148
151 /* Set initial sensitivity parameters */ 149 /* Set initial sensitivity parameters */
152 /* Set initial calibration set */ 150 /* Set initial calibration set */
153 priv->hw_params.sens = &iwl2000_sensitivity; 151 hw_params(priv).sens = &iwl2000_sensitivity;
154 priv->hw_params.calib_init_cfg = 152 hw_params(priv).calib_init_cfg =
155 BIT(IWL_CALIB_XTAL) | 153 BIT(IWL_CALIB_XTAL) |
156 BIT(IWL_CALIB_LO) | 154 BIT(IWL_CALIB_LO) |
157 BIT(IWL_CALIB_TX_IQ) | 155 BIT(IWL_CALIB_TX_IQ) |
158 BIT(IWL_CALIB_BASE_BAND); 156 BIT(IWL_CALIB_BASE_BAND);
159 if (priv->cfg->need_dc_calib) 157 if (priv->cfg->need_dc_calib)
160 priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; 158 hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
161 if (priv->cfg->need_temp_offset_calib) 159 if (priv->cfg->need_temp_offset_calib)
162 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); 160 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
163 161
164 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 162 hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
165 163
166 return 0; 164 return 0;
167} 165}
@@ -179,7 +177,7 @@ static struct iwl_lib_ops iwl2000_lib = {
179 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 177 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
180 EEPROM_REGULATORY_BAND_NO_HT40, 178 EEPROM_REGULATORY_BAND_NO_HT40,
181 }, 179 },
182 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 180 .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
183 }, 181 },
184 .temperature = iwlagn_temperature, 182 .temperature = iwlagn_temperature,
185}; 183};
@@ -200,7 +198,7 @@ static struct iwl_lib_ops iwl2030_lib = {
200 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 198 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
201 EEPROM_REGULATORY_BAND_NO_HT40, 199 EEPROM_REGULATORY_BAND_NO_HT40,
202 }, 200 },
203 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 201 .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
204 }, 202 },
205 .temperature = iwlagn_temperature, 203 .temperature = iwlagn_temperature,
206}; 204};
@@ -284,6 +282,11 @@ struct iwl_cfg iwl2000_2bg_cfg = {
284 IWL_DEVICE_2000, 282 IWL_DEVICE_2000,
285}; 283};
286 284
285struct iwl_cfg iwl2000_2bgn_d_cfg = {
286 .name = "2000D Series 2x2 BGN",
287 IWL_DEVICE_2000,
288};
289
287#define IWL_DEVICE_2030 \ 290#define IWL_DEVICE_2030 \
288 .fw_name_pre = IWL2030_FW_PRE, \ 291 .fw_name_pre = IWL2030_FW_PRE, \
289 .ucode_api_max = IWL2030_UCODE_API_MAX, \ 292 .ucode_api_max = IWL2030_UCODE_API_MAX, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index a9adee5634d8..7cb4d69e0c37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -46,6 +46,8 @@
46#include "iwl-agn-hw.h" 46#include "iwl-agn-hw.h"
47#include "iwl-5000-hw.h" 47#include "iwl-5000-hw.h"
48#include "iwl-trans.h" 48#include "iwl-trans.h"
49#include "iwl-shared.h"
50#include "iwl-pci.h"
49 51
50/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
51#define IWL5000_UCODE_API_MAX 5 53#define IWL5000_UCODE_API_MAX 5
@@ -68,18 +70,18 @@ static void iwl5000_nic_config(struct iwl_priv *priv)
68 70
69 iwl_rf_config(priv); 71 iwl_rf_config(priv);
70 72
71 spin_lock_irqsave(&priv->lock, flags); 73 spin_lock_irqsave(&priv->shrd->lock, flags);
72 74
73 /* W/A : NIC is stuck in a reset state after Early PCIe power off 75 /* W/A : NIC is stuck in a reset state after Early PCIe power off
74 * (PCIe power is lost before PERST# is asserted), 76 * (PCIe power is lost before PERST# is asserted),
75 * causing ME FW to lose ownership and not being able to obtain it back. 77 * causing ME FW to lose ownership and not being able to obtain it back.
76 */ 78 */
77 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 79 iwl_set_bits_mask_prph(bus(priv), APMG_PS_CTRL_REG,
78 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS, 80 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
79 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); 81 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
80 82
81 83
82 spin_unlock_irqrestore(&priv->lock, flags); 84 spin_unlock_irqrestore(&priv->shrd->lock, flags);
83} 85}
84 86
85static struct iwl_sensitivity_ranges iwl5000_sensitivity = { 87static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
@@ -139,13 +141,13 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
139 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) - 141 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
140 iwl_temp_calib_to_offset(priv); 142 iwl_temp_calib_to_offset(priv);
141 143
142 priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef; 144 hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
143} 145}
144 146
145static void iwl5000_set_ct_threshold(struct iwl_priv *priv) 147static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
146{ 148{
147 /* want Celsius */ 149 /* want Celsius */
148 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; 150 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
149} 151}
150 152
151static int iwl5000_hw_set_hw_params(struct iwl_priv *priv) 153static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
@@ -155,38 +157,34 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
155 priv->cfg->base_params->num_of_queues = 157 priv->cfg->base_params->num_of_queues =
156 iwlagn_mod_params.num_of_queues; 158 iwlagn_mod_params.num_of_queues;
157 159
158 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 160 hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
159 priv->hw_params.scd_bc_tbls_size = 161 hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
160 priv->cfg->base_params->num_of_queues *
161 sizeof(struct iwlagn_scd_bc_tbl);
162 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
163 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
164 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; 162 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
165 163
166 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 164 hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
167 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 165 hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
168 166
169 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 167 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
170 BIT(IEEE80211_BAND_5GHZ); 168 BIT(IEEE80211_BAND_5GHZ);
171 169
172 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 170 hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
173 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 171 hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
174 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 172 hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
175 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 173 hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
176 174
177 iwl5000_set_ct_threshold(priv); 175 iwl5000_set_ct_threshold(priv);
178 176
179 /* Set initial sensitivity parameters */ 177 /* Set initial sensitivity parameters */
180 /* Set initial calibration set */ 178 /* Set initial calibration set */
181 priv->hw_params.sens = &iwl5000_sensitivity; 179 hw_params(priv).sens = &iwl5000_sensitivity;
182 priv->hw_params.calib_init_cfg = 180 hw_params(priv).calib_init_cfg =
183 BIT(IWL_CALIB_XTAL) | 181 BIT(IWL_CALIB_XTAL) |
184 BIT(IWL_CALIB_LO) | 182 BIT(IWL_CALIB_LO) |
185 BIT(IWL_CALIB_TX_IQ) | 183 BIT(IWL_CALIB_TX_IQ) |
186 BIT(IWL_CALIB_TX_IQ_PERD) | 184 BIT(IWL_CALIB_TX_IQ_PERD) |
187 BIT(IWL_CALIB_BASE_BAND); 185 BIT(IWL_CALIB_BASE_BAND);
188 186
189 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 187 hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
190 188
191 return 0; 189 return 0;
192} 190}
@@ -198,38 +196,34 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
198 priv->cfg->base_params->num_of_queues = 196 priv->cfg->base_params->num_of_queues =
199 iwlagn_mod_params.num_of_queues; 197 iwlagn_mod_params.num_of_queues;
200 198
201 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 199 hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
202 priv->hw_params.scd_bc_tbls_size = 200 hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
203 priv->cfg->base_params->num_of_queues *
204 sizeof(struct iwlagn_scd_bc_tbl);
205 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
206 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
207 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; 201 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
208 202
209 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE; 203 hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
210 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE; 204 hw_params(priv).max_inst_size = IWLAGN_RTC_INST_SIZE;
211 205
212 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 206 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
213 BIT(IEEE80211_BAND_5GHZ); 207 BIT(IEEE80211_BAND_5GHZ);
214 208
215 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 209 hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
216 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 210 hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
217 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 211 hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
218 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 212 hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
219 213
220 iwl5150_set_ct_threshold(priv); 214 iwl5150_set_ct_threshold(priv);
221 215
222 /* Set initial sensitivity parameters */ 216 /* Set initial sensitivity parameters */
223 /* Set initial calibration set */ 217 /* Set initial calibration set */
224 priv->hw_params.sens = &iwl5150_sensitivity; 218 hw_params(priv).sens = &iwl5150_sensitivity;
225 priv->hw_params.calib_init_cfg = 219 hw_params(priv).calib_init_cfg =
226 BIT(IWL_CALIB_LO) | 220 BIT(IWL_CALIB_LO) |
227 BIT(IWL_CALIB_TX_IQ) | 221 BIT(IWL_CALIB_TX_IQ) |
228 BIT(IWL_CALIB_BASE_BAND); 222 BIT(IWL_CALIB_BASE_BAND);
229 if (priv->cfg->need_dc_calib) 223 if (priv->cfg->need_dc_calib)
230 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_DC); 224 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
231 225
232 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 226 hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
233 227
234 return 0; 228 return 0;
235} 229}
@@ -314,7 +308,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
314 return -EFAULT; 308 return -EFAULT;
315 } 309 }
316 310
317 return trans_send_cmd(&priv->trans, &hcmd); 311 return iwl_trans_send_cmd(trans(priv), &hcmd);
318} 312}
319 313
320static struct iwl_lib_ops iwl5000_lib = { 314static struct iwl_lib_ops iwl5000_lib = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 339de88d9ae2..2a98e65ca84c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -45,6 +45,8 @@
45#include "iwl-agn-hw.h" 45#include "iwl-agn-hw.h"
46#include "iwl-6000-hw.h" 46#include "iwl-6000-hw.h"
47#include "iwl-trans.h" 47#include "iwl-trans.h"
48#include "iwl-shared.h"
49#include "iwl-pci.h"
48 50
49/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
50#define IWL6000_UCODE_API_MAX 4 52#define IWL6000_UCODE_API_MAX 4
@@ -74,15 +76,15 @@
74static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 76static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
75{ 77{
76 /* want Celsius */ 78 /* want Celsius */
77 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD; 79 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
78 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD; 80 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
79} 81}
80 82
81static void iwl6050_additional_nic_config(struct iwl_priv *priv) 83static void iwl6050_additional_nic_config(struct iwl_priv *priv)
82{ 84{
83 /* Indicate calibration version to uCode. */ 85 /* Indicate calibration version to uCode. */
84 if (iwlagn_eeprom_calib_version(priv) >= 6) 86 if (iwlagn_eeprom_calib_version(priv) >= 6)
85 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 87 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
86 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 88 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
87} 89}
88 90
@@ -90,9 +92,9 @@ static void iwl6150_additional_nic_config(struct iwl_priv *priv)
90{ 92{
91 /* Indicate calibration version to uCode. */ 93 /* Indicate calibration version to uCode. */
92 if (iwlagn_eeprom_calib_version(priv) >= 6) 94 if (iwlagn_eeprom_calib_version(priv) >= 6)
93 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 95 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
94 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6); 96 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
95 iwl_set_bit(priv, CSR_GP_DRIVER_REG, 97 iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
96 CSR_GP_DRIVER_REG_BIT_6050_1x2); 98 CSR_GP_DRIVER_REG_BIT_6050_1x2);
97} 99}
98 100
@@ -104,7 +106,7 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
104 /* no locking required for register write */ 106 /* no locking required for register write */
105 if (priv->cfg->pa_type == IWL_PA_INTERNAL) { 107 if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
106 /* 2x2 IPA phy type */ 108 /* 2x2 IPA phy type */
107 iwl_write32(priv, CSR_GP_DRIVER_REG, 109 iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
108 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); 110 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
109 } 111 }
110 /* do additional nic configuration if needed */ 112 /* do additional nic configuration if needed */
@@ -144,45 +146,41 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
144 priv->cfg->base_params->num_of_queues = 146 priv->cfg->base_params->num_of_queues =
145 iwlagn_mod_params.num_of_queues; 147 iwlagn_mod_params.num_of_queues;
146 148
147 priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues; 149 hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
148 priv->hw_params.scd_bc_tbls_size = 150 hw_params(priv).max_stations = IWLAGN_STATION_COUNT;
149 priv->cfg->base_params->num_of_queues *
150 sizeof(struct iwlagn_scd_bc_tbl);
151 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
152 priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
153 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID; 151 priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
154 152
155 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE; 153 hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
156 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE; 154 hw_params(priv).max_inst_size = IWL60_RTC_INST_SIZE;
157 155
158 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 156 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
159 BIT(IEEE80211_BAND_5GHZ); 157 BIT(IEEE80211_BAND_5GHZ);
160 158
161 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 159 hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
162 if (priv->cfg->rx_with_siso_diversity) 160 if (priv->cfg->rx_with_siso_diversity)
163 priv->hw_params.rx_chains_num = 1; 161 hw_params(priv).rx_chains_num = 1;
164 else 162 else
165 priv->hw_params.rx_chains_num = 163 hw_params(priv).rx_chains_num =
166 num_of_ant(priv->cfg->valid_rx_ant); 164 num_of_ant(priv->cfg->valid_rx_ant);
167 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 165 hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
168 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 166 hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
169 167
170 iwl6000_set_ct_threshold(priv); 168 iwl6000_set_ct_threshold(priv);
171 169
172 /* Set initial sensitivity parameters */ 170 /* Set initial sensitivity parameters */
173 /* Set initial calibration set */ 171 /* Set initial calibration set */
174 priv->hw_params.sens = &iwl6000_sensitivity; 172 hw_params(priv).sens = &iwl6000_sensitivity;
175 priv->hw_params.calib_init_cfg = 173 hw_params(priv).calib_init_cfg =
176 BIT(IWL_CALIB_XTAL) | 174 BIT(IWL_CALIB_XTAL) |
177 BIT(IWL_CALIB_LO) | 175 BIT(IWL_CALIB_LO) |
178 BIT(IWL_CALIB_TX_IQ) | 176 BIT(IWL_CALIB_TX_IQ) |
179 BIT(IWL_CALIB_BASE_BAND); 177 BIT(IWL_CALIB_BASE_BAND);
180 if (priv->cfg->need_dc_calib) 178 if (priv->cfg->need_dc_calib)
181 priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX; 179 hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
182 if (priv->cfg->need_temp_offset_calib) 180 if (priv->cfg->need_temp_offset_calib)
183 priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET); 181 hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
184 182
185 priv->hw_params.beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS; 183 hw_params(priv).beacon_time_tsf_bits = IWLAGN_EXT_BEACON_TIME_POS;
186 184
187 return 0; 185 return 0;
188} 186}
@@ -255,7 +253,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
255 return -EFAULT; 253 return -EFAULT;
256 } 254 }
257 255
258 return trans_send_cmd(&priv->trans, &hcmd); 256 return iwl_trans_send_cmd(trans(priv), &hcmd);
259} 257}
260 258
261static struct iwl_lib_ops iwl6000_lib = { 259static struct iwl_lib_ops iwl6000_lib = {
@@ -272,7 +270,7 @@ static struct iwl_lib_ops iwl6000_lib = {
272 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 270 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
273 EEPROM_REG_BAND_52_HT40_CHANNELS 271 EEPROM_REG_BAND_52_HT40_CHANNELS
274 }, 272 },
275 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 273 .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
276 }, 274 },
277 .temperature = iwlagn_temperature, 275 .temperature = iwlagn_temperature,
278}; 276};
@@ -294,7 +292,7 @@ static struct iwl_lib_ops iwl6030_lib = {
294 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 292 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
295 EEPROM_REG_BAND_52_HT40_CHANNELS 293 EEPROM_REG_BAND_52_HT40_CHANNELS
296 }, 294 },
297 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 295 .update_enhanced_txpower = iwl_eeprom_enhanced_txpower,
298 }, 296 },
299 .temperature = iwlagn_temperature, 297 .temperature = iwlagn_temperature,
300}; 298};
@@ -395,6 +393,12 @@ struct iwl_cfg iwl6005_2bg_cfg = {
395 IWL_DEVICE_6005, 393 IWL_DEVICE_6005,
396}; 394};
397 395
396struct iwl_cfg iwl6005_2agn_sff_cfg = {
397 .name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
398 IWL_DEVICE_6005,
399 .ht_params = &iwl6000_ht_params,
400};
401
398#define IWL_DEVICE_6030 \ 402#define IWL_DEVICE_6030 \
399 .fw_name_pre = IWL6030_FW_PRE, \ 403 .fw_name_pre = IWL6030_FW_PRE, \
400 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 404 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 1789e3af8101..b725f6970dee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -93,12 +93,12 @@ int iwl_send_calib_results(struct iwl_priv *priv)
93 }; 93 };
94 94
95 for (i = 0; i < IWL_CALIB_MAX; i++) { 95 for (i = 0; i < IWL_CALIB_MAX; i++) {
96 if ((BIT(i) & priv->hw_params.calib_init_cfg) && 96 if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
97 priv->calib_results[i].buf) { 97 priv->calib_results[i].buf) {
98 hcmd.len[0] = priv->calib_results[i].buf_len; 98 hcmd.len[0] = priv->calib_results[i].buf_len;
99 hcmd.data[0] = priv->calib_results[i].buf; 99 hcmd.data[0] = priv->calib_results[i].buf;
100 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; 100 hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
101 ret = trans_send_cmd(&priv->trans, &hcmd); 101 ret = iwl_trans_send_cmd(trans(priv), &hcmd);
102 if (ret) { 102 if (ret) {
103 IWL_ERR(priv, "Error %d iteration %d\n", 103 IWL_ERR(priv, "Error %d iteration %d\n",
104 ret, i); 104 ret, i);
@@ -174,7 +174,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
174 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; 174 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
175 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; 175 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
176 struct iwl_sensitivity_data *data = NULL; 176 struct iwl_sensitivity_data *data = NULL;
177 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 177 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
178 178
179 data = &(priv->sensitivity_data); 179 data = &(priv->sensitivity_data);
180 180
@@ -357,7 +357,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
357 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; 357 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
358 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; 358 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
359 struct iwl_sensitivity_data *data = NULL; 359 struct iwl_sensitivity_data *data = NULL;
360 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 360 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
361 361
362 data = &(priv->sensitivity_data); 362 data = &(priv->sensitivity_data);
363 363
@@ -484,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
484 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 484 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
485 sizeof(u16)*HD_TABLE_SIZE); 485 sizeof(u16)*HD_TABLE_SIZE);
486 486
487 return trans_send_cmd(&priv->trans, &cmd_out); 487 return iwl_trans_send_cmd(trans(priv), &cmd_out);
488} 488}
489 489
490/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 490/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
@@ -573,7 +573,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
573 &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]), 573 &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
574 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES); 574 sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
575 575
576 return trans_send_cmd(&priv->trans, &cmd_out); 576 return iwl_trans_send_cmd(trans(priv), &cmd_out);
577} 577}
578 578
579void iwl_init_sensitivity(struct iwl_priv *priv) 579void iwl_init_sensitivity(struct iwl_priv *priv)
@@ -581,7 +581,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
581 int ret = 0; 581 int ret = 0;
582 int i; 582 int i;
583 struct iwl_sensitivity_data *data = NULL; 583 struct iwl_sensitivity_data *data = NULL;
584 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens; 584 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens;
585 585
586 if (priv->disable_sens_cal) 586 if (priv->disable_sens_cal)
587 return; 587 return;
@@ -658,13 +658,13 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
658 return; 658 return;
659 } 659 }
660 660
661 spin_lock_irqsave(&priv->lock, flags); 661 spin_lock_irqsave(&priv->shrd->lock, flags);
662 rx_info = &priv->statistics.rx_non_phy; 662 rx_info = &priv->statistics.rx_non_phy;
663 ofdm = &priv->statistics.rx_ofdm; 663 ofdm = &priv->statistics.rx_ofdm;
664 cck = &priv->statistics.rx_cck; 664 cck = &priv->statistics.rx_cck;
665 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 665 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
666 IWL_DEBUG_CALIB(priv, "<< invalid data.\n"); 666 IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
667 spin_unlock_irqrestore(&priv->lock, flags); 667 spin_unlock_irqrestore(&priv->shrd->lock, flags);
668 return; 668 return;
669 } 669 }
670 670
@@ -688,7 +688,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
688 statis.beacon_energy_c = 688 statis.beacon_energy_c =
689 le32_to_cpu(rx_info->beacon_energy_c); 689 le32_to_cpu(rx_info->beacon_energy_c);
690 690
691 spin_unlock_irqrestore(&priv->lock, flags); 691 spin_unlock_irqrestore(&priv->shrd->lock, flags);
692 692
693 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); 693 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
694 694
@@ -821,21 +821,21 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
821 * To be safe, simply mask out any chains that we know 821 * To be safe, simply mask out any chains that we know
822 * are not on the device. 822 * are not on the device.
823 */ 823 */
824 active_chains &= priv->hw_params.valid_rx_ant; 824 active_chains &= hw_params(priv).valid_rx_ant;
825 825
826 num_tx_chains = 0; 826 num_tx_chains = 0;
827 for (i = 0; i < NUM_RX_CHAINS; i++) { 827 for (i = 0; i < NUM_RX_CHAINS; i++) {
828 /* loops on all the bits of 828 /* loops on all the bits of
829 * priv->hw_setting.valid_tx_ant */ 829 * priv->hw_setting.valid_tx_ant */
830 u8 ant_msk = (1 << i); 830 u8 ant_msk = (1 << i);
831 if (!(priv->hw_params.valid_tx_ant & ant_msk)) 831 if (!(hw_params(priv).valid_tx_ant & ant_msk))
832 continue; 832 continue;
833 833
834 num_tx_chains++; 834 num_tx_chains++;
835 if (data->disconn_array[i] == 0) 835 if (data->disconn_array[i] == 0)
836 /* there is a Tx antenna connected */ 836 /* there is a Tx antenna connected */
837 break; 837 break;
838 if (num_tx_chains == priv->hw_params.tx_chains_num && 838 if (num_tx_chains == hw_params(priv).tx_chains_num &&
839 data->disconn_array[i]) { 839 data->disconn_array[i]) {
840 /* 840 /*
841 * If all chains are disconnected 841 * If all chains are disconnected
@@ -852,12 +852,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
852 } 852 }
853 } 853 }
854 854
855 if (active_chains != priv->hw_params.valid_rx_ant && 855 if (active_chains != hw_params(priv).valid_rx_ant &&
856 active_chains != priv->chain_noise_data.active_chains) 856 active_chains != priv->chain_noise_data.active_chains)
857 IWL_DEBUG_CALIB(priv, 857 IWL_DEBUG_CALIB(priv,
858 "Detected that not all antennas are connected! " 858 "Detected that not all antennas are connected! "
859 "Connected: %#x, valid: %#x.\n", 859 "Connected: %#x, valid: %#x.\n",
860 active_chains, priv->hw_params.valid_rx_ant); 860 active_chains,
861 hw_params(priv).valid_rx_ant);
861 862
862 /* Save for use within RXON, TX, SCAN commands, etc. */ 863 /* Save for use within RXON, TX, SCAN commands, etc. */
863 data->active_chains = active_chains; 864 data->active_chains = active_chains;
@@ -917,7 +918,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
917 priv->phy_calib_chain_noise_gain_cmd); 918 priv->phy_calib_chain_noise_gain_cmd);
918 cmd.delta_gain_1 = data->delta_gain_code[1]; 919 cmd.delta_gain_1 = data->delta_gain_code[1];
919 cmd.delta_gain_2 = data->delta_gain_code[2]; 920 cmd.delta_gain_2 = data->delta_gain_code[2];
920 trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD, 921 iwl_trans_send_cmd_pdu(trans(priv), REPLY_PHY_CALIBRATION_CMD,
921 CMD_ASYNC, sizeof(cmd), &cmd); 922 CMD_ASYNC, sizeof(cmd), &cmd);
922 923
923 data->radio_write = 1; 924 data->radio_write = 1;
@@ -975,13 +976,13 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
975 return; 976 return;
976 } 977 }
977 978
978 spin_lock_irqsave(&priv->lock, flags); 979 spin_lock_irqsave(&priv->shrd->lock, flags);
979 980
980 rx_info = &priv->statistics.rx_non_phy; 981 rx_info = &priv->statistics.rx_non_phy;
981 982
982 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) { 983 if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
983 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n"); 984 IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
984 spin_unlock_irqrestore(&priv->lock, flags); 985 spin_unlock_irqrestore(&priv->shrd->lock, flags);
985 return; 986 return;
986 } 987 }
987 988
@@ -996,7 +997,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
996 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) { 997 if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
997 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n", 998 IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
998 rxon_chnum, rxon_band24); 999 rxon_chnum, rxon_band24);
999 spin_unlock_irqrestore(&priv->lock, flags); 1000 spin_unlock_irqrestore(&priv->shrd->lock, flags);
1000 return; 1001 return;
1001 } 1002 }
1002 1003
@@ -1015,7 +1016,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1015 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER; 1016 chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
1016 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER; 1017 chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
1017 1018
1018 spin_unlock_irqrestore(&priv->lock, flags); 1019 spin_unlock_irqrestore(&priv->shrd->lock, flags);
1019 1020
1020 data->beacon_count++; 1021 data->beacon_count++;
1021 1022
@@ -1046,7 +1047,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1046 priv->cfg->bt_params->advanced_bt_coexist) { 1047 priv->cfg->bt_params->advanced_bt_coexist) {
1047 /* Disable disconnected antenna algorithm for advanced 1048 /* Disable disconnected antenna algorithm for advanced
1048 bt coex, assuming valid antennas are connected */ 1049 bt coex, assuming valid antennas are connected */
1049 data->active_chains = priv->hw_params.valid_rx_ant; 1050 data->active_chains = hw_params(priv).valid_rx_ant;
1050 for (i = 0; i < NUM_RX_CHAINS; i++) 1051 for (i = 0; i < NUM_RX_CHAINS; i++)
1051 if (!(data->active_chains & (1<<i))) 1052 if (!(data->active_chains & (1<<i)))
1052 data->disconn_array[i] = 1; 1053 data->disconn_array[i] = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index b8347db850e7..c62ddc2a31bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -195,7 +195,7 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
195} 195}
196 196
197static void 197static void
198iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv, 198iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
199 struct iwl_eeprom_enhanced_txpwr *txp, 199 struct iwl_eeprom_enhanced_txpwr *txp,
200 s8 max_txpower_avg) 200 s8 max_txpower_avg)
201{ 201{
@@ -235,7 +235,7 @@ iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
235#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \ 235#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
236 ? # x " " : "") 236 ? # x " " : "")
237 237
238void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) 238void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
239{ 239{
240 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; 240 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
241 int idx, entries; 241 int idx, entries;
@@ -294,6 +294,6 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
294 if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm) 294 if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
295 priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm; 295 priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
296 296
297 iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg); 297 iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
298 } 298 }
299} 299}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index 47c43042ba4f..33951a11327d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -95,17 +95,4 @@
95#define IWLAGN_NUM_AMPDU_QUEUES 9 95#define IWLAGN_NUM_AMPDU_QUEUES 9
96#define IWLAGN_FIRST_AMPDU_QUEUE 11 96#define IWLAGN_FIRST_AMPDU_QUEUE 11
97 97
98/* Fixed (non-configurable) rx data from phy */
99
100/**
101 * struct iwlagn_schedq_bc_tbl scheduler byte count table
102 * base physical address provided by SCD_DRAM_BASE_ADDR
103 * @tfd_offset 0-12 - tx command byte count
104 * 12-16 - station index
105 */
106struct iwlagn_scd_bc_tbl {
107 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
108} __packed;
109
110
111#endif /* __iwl_agn_hw_h__ */ 98#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 4edb6cfc5488..7c036b9c2b30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -40,449 +40,7 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-sta.h" 41#include "iwl-sta.h"
42#include "iwl-trans.h" 42#include "iwl-trans.h"
43 43#include "iwl-shared.h"
44static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
45{
46 return le32_to_cpup((__le32 *)&tx_resp->status +
47 tx_resp->frame_count) & MAX_SN;
48}
49
50static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
51{
52 status &= TX_STATUS_MSK;
53
54 switch (status) {
55 case TX_STATUS_POSTPONE_DELAY:
56 priv->reply_tx_stats.pp_delay++;
57 break;
58 case TX_STATUS_POSTPONE_FEW_BYTES:
59 priv->reply_tx_stats.pp_few_bytes++;
60 break;
61 case TX_STATUS_POSTPONE_BT_PRIO:
62 priv->reply_tx_stats.pp_bt_prio++;
63 break;
64 case TX_STATUS_POSTPONE_QUIET_PERIOD:
65 priv->reply_tx_stats.pp_quiet_period++;
66 break;
67 case TX_STATUS_POSTPONE_CALC_TTAK:
68 priv->reply_tx_stats.pp_calc_ttak++;
69 break;
70 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
71 priv->reply_tx_stats.int_crossed_retry++;
72 break;
73 case TX_STATUS_FAIL_SHORT_LIMIT:
74 priv->reply_tx_stats.short_limit++;
75 break;
76 case TX_STATUS_FAIL_LONG_LIMIT:
77 priv->reply_tx_stats.long_limit++;
78 break;
79 case TX_STATUS_FAIL_FIFO_UNDERRUN:
80 priv->reply_tx_stats.fifo_underrun++;
81 break;
82 case TX_STATUS_FAIL_DRAIN_FLOW:
83 priv->reply_tx_stats.drain_flow++;
84 break;
85 case TX_STATUS_FAIL_RFKILL_FLUSH:
86 priv->reply_tx_stats.rfkill_flush++;
87 break;
88 case TX_STATUS_FAIL_LIFE_EXPIRE:
89 priv->reply_tx_stats.life_expire++;
90 break;
91 case TX_STATUS_FAIL_DEST_PS:
92 priv->reply_tx_stats.dest_ps++;
93 break;
94 case TX_STATUS_FAIL_HOST_ABORTED:
95 priv->reply_tx_stats.host_abort++;
96 break;
97 case TX_STATUS_FAIL_BT_RETRY:
98 priv->reply_tx_stats.bt_retry++;
99 break;
100 case TX_STATUS_FAIL_STA_INVALID:
101 priv->reply_tx_stats.sta_invalid++;
102 break;
103 case TX_STATUS_FAIL_FRAG_DROPPED:
104 priv->reply_tx_stats.frag_drop++;
105 break;
106 case TX_STATUS_FAIL_TID_DISABLE:
107 priv->reply_tx_stats.tid_disable++;
108 break;
109 case TX_STATUS_FAIL_FIFO_FLUSHED:
110 priv->reply_tx_stats.fifo_flush++;
111 break;
112 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
113 priv->reply_tx_stats.insuff_cf_poll++;
114 break;
115 case TX_STATUS_FAIL_PASSIVE_NO_RX:
116 priv->reply_tx_stats.fail_hw_drop++;
117 break;
118 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
119 priv->reply_tx_stats.sta_color_mismatch++;
120 break;
121 default:
122 priv->reply_tx_stats.unknown++;
123 break;
124 }
125}
126
127static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
128{
129 status &= AGG_TX_STATUS_MSK;
130
131 switch (status) {
132 case AGG_TX_STATE_UNDERRUN_MSK:
133 priv->reply_agg_tx_stats.underrun++;
134 break;
135 case AGG_TX_STATE_BT_PRIO_MSK:
136 priv->reply_agg_tx_stats.bt_prio++;
137 break;
138 case AGG_TX_STATE_FEW_BYTES_MSK:
139 priv->reply_agg_tx_stats.few_bytes++;
140 break;
141 case AGG_TX_STATE_ABORT_MSK:
142 priv->reply_agg_tx_stats.abort++;
143 break;
144 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
145 priv->reply_agg_tx_stats.last_sent_ttl++;
146 break;
147 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
148 priv->reply_agg_tx_stats.last_sent_try++;
149 break;
150 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
151 priv->reply_agg_tx_stats.last_sent_bt_kill++;
152 break;
153 case AGG_TX_STATE_SCD_QUERY_MSK:
154 priv->reply_agg_tx_stats.scd_query++;
155 break;
156 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
157 priv->reply_agg_tx_stats.bad_crc32++;
158 break;
159 case AGG_TX_STATE_RESPONSE_MSK:
160 priv->reply_agg_tx_stats.response++;
161 break;
162 case AGG_TX_STATE_DUMP_TX_MSK:
163 priv->reply_agg_tx_stats.dump_tx++;
164 break;
165 case AGG_TX_STATE_DELAY_TX_MSK:
166 priv->reply_agg_tx_stats.delay_tx++;
167 break;
168 default:
169 priv->reply_agg_tx_stats.unknown++;
170 break;
171 }
172}
173
174static void iwlagn_set_tx_status(struct iwl_priv *priv,
175 struct ieee80211_tx_info *info,
176 struct iwl_rxon_context *ctx,
177 struct iwlagn_tx_resp *tx_resp,
178 int txq_id, bool is_agg)
179{
180 u16 status = le16_to_cpu(tx_resp->status.status);
181
182 info->status.rates[0].count = tx_resp->failure_frame + 1;
183 if (is_agg)
184 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
185 info->flags |= iwl_tx_status_to_mac80211(status);
186 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
187 info);
188 if (!iwl_is_tx_success(status))
189 iwlagn_count_tx_err_status(priv, status);
190
191 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
192 iwl_is_associated_ctx(ctx) && ctx->vif &&
193 ctx->vif->type == NL80211_IFTYPE_STATION) {
194 ctx->last_tx_rejected = true;
195 iwl_stop_queue(priv, &priv->txq[txq_id]);
196 }
197
198 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
199 "0x%x retries %d\n",
200 txq_id,
201 iwl_get_tx_fail_reason(status), status,
202 le32_to_cpu(tx_resp->rate_n_flags),
203 tx_resp->failure_frame);
204}
205
206#ifdef CONFIG_IWLWIFI_DEBUG
207#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
208
209const char *iwl_get_agg_tx_fail_reason(u16 status)
210{
211 status &= AGG_TX_STATUS_MSK;
212 switch (status) {
213 case AGG_TX_STATE_TRANSMITTED:
214 return "SUCCESS";
215 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
216 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
217 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
218 AGG_TX_STATE_FAIL(ABORT_MSK);
219 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
220 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
221 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
222 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
223 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
224 AGG_TX_STATE_FAIL(RESPONSE_MSK);
225 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
226 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
227 }
228
229 return "UNKNOWN";
230}
231#endif /* CONFIG_IWLWIFI_DEBUG */
232
233static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
234 struct iwl_ht_agg *agg,
235 struct iwlagn_tx_resp *tx_resp,
236 int txq_id, u16 start_idx)
237{
238 u16 status;
239 struct agg_tx_status *frame_status = &tx_resp->status;
240 struct ieee80211_hdr *hdr = NULL;
241 int i, sh, idx;
242 u16 seq;
243
244 if (agg->wait_for_ba)
245 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
246
247 agg->frame_count = tx_resp->frame_count;
248 agg->start_idx = start_idx;
249 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
250 agg->bitmap = 0;
251
252 /* # frames attempted by Tx command */
253 if (agg->frame_count == 1) {
254 struct iwl_tx_info *txb;
255
256 /* Only one frame was attempted; no block-ack will arrive */
257 idx = start_idx;
258
259 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
260 agg->frame_count, agg->start_idx, idx);
261 txb = &priv->txq[txq_id].txb[idx];
262 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
263 txb->ctx, tx_resp, txq_id, true);
264 agg->wait_for_ba = 0;
265 } else {
266 /* Two or more frames were attempted; expect block-ack */
267 u64 bitmap = 0;
268
269 /*
270 * Start is the lowest frame sent. It may not be the first
271 * frame in the batch; we figure this out dynamically during
272 * the following loop.
273 */
274 int start = agg->start_idx;
275
276 /* Construct bit-map of pending frames within Tx window */
277 for (i = 0; i < agg->frame_count; i++) {
278 u16 sc;
279 status = le16_to_cpu(frame_status[i].status);
280 seq = le16_to_cpu(frame_status[i].sequence);
281 idx = SEQ_TO_INDEX(seq);
282 txq_id = SEQ_TO_QUEUE(seq);
283
284 if (status & AGG_TX_STATUS_MSK)
285 iwlagn_count_agg_tx_err_status(priv, status);
286
287 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
288 AGG_TX_STATE_ABORT_MSK))
289 continue;
290
291 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
292 agg->frame_count, txq_id, idx);
293 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
294 "try-count (0x%08x)\n",
295 iwl_get_agg_tx_fail_reason(status),
296 status & AGG_TX_STATUS_MSK,
297 status & AGG_TX_TRY_MSK);
298
299 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
300 if (!hdr) {
301 IWL_ERR(priv,
302 "BUG_ON idx doesn't point to valid skb"
303 " idx=%d, txq_id=%d\n", idx, txq_id);
304 return -1;
305 }
306
307 sc = le16_to_cpu(hdr->seq_ctrl);
308 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
309 IWL_ERR(priv,
310 "BUG_ON idx doesn't match seq control"
311 " idx=%d, seq_idx=%d, seq=%d\n",
312 idx, SEQ_TO_SN(sc),
313 hdr->seq_ctrl);
314 return -1;
315 }
316
317 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
318 i, idx, SEQ_TO_SN(sc));
319
320 /*
321 * sh -> how many frames ahead of the starting frame is
322 * the current one?
323 *
324 * Note that all frames sent in the batch must be in a
325 * 64-frame window, so this number should be in [0,63].
326 * If outside of this window, then we've found a new
327 * "first" frame in the batch and need to change start.
328 */
329 sh = idx - start;
330
331 /*
332 * If >= 64, out of window. start must be at the front
333 * of the circular buffer, idx must be near the end of
334 * the buffer, and idx is the new "first" frame. Shift
335 * the indices around.
336 */
337 if (sh >= 64) {
338 /* Shift bitmap by start - idx, wrapped */
339 sh = 0x100 - idx + start;
340 bitmap = bitmap << sh;
341 /* Now idx is the new start so sh = 0 */
342 sh = 0;
343 start = idx;
344 /*
345 * If <= -64 then wraps the 256-pkt circular buffer
346 * (e.g., start = 255 and idx = 0, sh should be 1)
347 */
348 } else if (sh <= -64) {
349 sh = 0x100 - start + idx;
350 /*
351 * If < 0 but > -64, out of window. idx is before start
352 * but not wrapped. Shift the indices around.
353 */
354 } else if (sh < 0) {
355 /* Shift by how far start is ahead of idx */
356 sh = start - idx;
357 bitmap = bitmap << sh;
358 /* Now idx is the new start so sh = 0 */
359 start = idx;
360 sh = 0;
361 }
362 /* Sequence number start + sh was sent in this batch */
363 bitmap |= 1ULL << sh;
364 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
365 start, (unsigned long long)bitmap);
366 }
367
368 /*
369 * Store the bitmap and possibly the new start, if we wrapped
370 * the buffer above
371 */
372 agg->bitmap = bitmap;
373 agg->start_idx = start;
374 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
375 agg->frame_count, agg->start_idx,
376 (unsigned long long)agg->bitmap);
377
378 if (bitmap)
379 agg->wait_for_ba = 1;
380 }
381 return 0;
382}
383
384void iwl_check_abort_status(struct iwl_priv *priv,
385 u8 frame_count, u32 status)
386{
387 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
388 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
389 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
390 queue_work(priv->workqueue, &priv->tx_flush);
391 }
392}
393
394void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
395{
396 struct iwl_rx_packet *pkt = rxb_addr(rxb);
397 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
398 int txq_id = SEQ_TO_QUEUE(sequence);
399 int index = SEQ_TO_INDEX(sequence);
400 struct iwl_tx_queue *txq = &priv->txq[txq_id];
401 struct ieee80211_tx_info *info;
402 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
403 struct ieee80211_hdr *hdr;
404 struct iwl_tx_info *txb;
405 u32 status = le16_to_cpu(tx_resp->status.status);
406 int tid;
407 int sta_id;
408 int freed;
409 unsigned long flags;
410
411 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
412 IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
413 "index %d is out of range [0-%d] %d %d\n", __func__,
414 txq_id, index, txq->q.n_bd, txq->q.write_ptr,
415 txq->q.read_ptr);
416 return;
417 }
418
419 txq->time_stamp = jiffies;
420 txb = &txq->txb[txq->q.read_ptr];
421 info = IEEE80211_SKB_CB(txb->skb);
422 memset(&info->status, 0, sizeof(info->status));
423
424 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
425 IWLAGN_TX_RES_TID_POS;
426 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
427 IWLAGN_TX_RES_RA_POS;
428
429 spin_lock_irqsave(&priv->sta_lock, flags);
430
431 hdr = (void *)txb->skb->data;
432 if (!ieee80211_is_data_qos(hdr->frame_control))
433 priv->last_seq_ctl = tx_resp->seq_ctl;
434
435 if (txq->sched_retry) {
436 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
437 struct iwl_ht_agg *agg;
438
439 agg = &priv->stations[sta_id].tid[tid].agg;
440 /*
441 * If the BT kill count is non-zero, we'll get this
442 * notification again.
443 */
444 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
445 priv->cfg->bt_params &&
446 priv->cfg->bt_params->advanced_bt_coexist) {
447 IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
448 }
449 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
450
451 /* check if BAR is needed */
452 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
453 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
454
455 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
456 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
457 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
458 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
459 scd_ssn , index, txq_id, txq->swq_id);
460
461 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
462 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
463
464 if (priv->mac80211_registered &&
465 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
466 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
467 iwl_wake_queue(priv, txq);
468 }
469 } else {
470 iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
471 txq_id, false);
472 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
473 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
474
475 if (priv->mac80211_registered &&
476 iwl_queue_space(&txq->q) > txq->q.low_mark &&
477 status != TX_STATUS_FAIL_PASSIVE_NO_RX)
478 iwl_wake_queue(priv, txq);
479 }
480
481 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
482
483 iwl_check_abort_status(priv, tx_resp->frame_count, status);
484 spin_unlock_irqrestore(&priv->sta_lock, flags);
485}
486 44
487int iwlagn_hw_valid_rtc_data_addr(u32 addr) 45int iwlagn_hw_valid_rtc_data_addr(u32 addr)
488{ 46{
@@ -495,7 +53,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
495 struct iwlagn_tx_power_dbm_cmd tx_power_cmd; 53 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
496 u8 tx_ant_cfg_cmd; 54 u8 tx_ant_cfg_cmd;
497 55
498 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status), 56 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
499 "TX Power requested while scanning!\n")) 57 "TX Power requested while scanning!\n"))
500 return -EAGAIN; 58 return -EAGAIN;
501 59
@@ -525,7 +83,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
525 else 83 else
526 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 84 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
527 85
528 return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC, 86 return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
529 sizeof(tx_power_cmd), &tx_power_cmd); 87 sizeof(tx_power_cmd), &tx_power_cmd);
530} 88}
531 89
@@ -609,6 +167,9 @@ struct iwl_mod_params iwlagn_mod_params = {
609 .bt_coex_active = true, 167 .bt_coex_active = true,
610 .no_sleep_autoadjust = true, 168 .no_sleep_autoadjust = true,
611 .power_level = IWL_POWER_INDEX_1, 169 .power_level = IWL_POWER_INDEX_1,
170 .bt_ch_announce = true,
171 .wanted_ucode_alternative = 1,
172 .auto_agg = true,
612 /* the rest are 0 by default */ 173 /* the rest are 0 by default */
613}; 174};
614 175
@@ -767,15 +328,15 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
767 u16 rx_chain = 0; 328 u16 rx_chain = 0;
768 enum ieee80211_band band; 329 enum ieee80211_band band;
769 u8 n_probes = 0; 330 u8 n_probes = 0;
770 u8 rx_ant = priv->hw_params.valid_rx_ant; 331 u8 rx_ant = hw_params(priv).valid_rx_ant;
771 u8 rate; 332 u8 rate;
772 bool is_active = false; 333 bool is_active = false;
773 int chan_mod; 334 int chan_mod;
774 u8 active_chains; 335 u8 active_chains;
775 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant; 336 u8 scan_tx_antennas = hw_params(priv).valid_tx_ant;
776 int ret; 337 int ret;
777 338
778 lockdep_assert_held(&priv->mutex); 339 lockdep_assert_held(&priv->shrd->mutex);
779 340
780 if (vif) 341 if (vif)
781 ctx = iwl_rxon_ctx_from_vif(vif); 342 ctx = iwl_rxon_ctx_from_vif(vif);
@@ -942,7 +503,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
942 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); 503 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
943 504
944 /* In power save mode use one chain, otherwise use all chains */ 505 /* In power save mode use one chain, otherwise use all chains */
945 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 506 if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) {
946 /* rx_ant has been set to all valid chains previously */ 507 /* rx_ant has been set to all valid chains previously */
947 active_chains = rx_ant & 508 active_chains = rx_ant &
948 ((u8)(priv->chain_noise_data.active_chains)); 509 ((u8)(priv->chain_noise_data.active_chains));
@@ -962,7 +523,8 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
962 } 523 }
963 524
964 /* MIMO is not used here, but value is required */ 525 /* MIMO is not used here, but value is required */
965 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 526 rx_chain |=
527 hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
966 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 528 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
967 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 529 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
968 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 530 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -1044,15 +606,15 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1044 scan->len = cpu_to_le16(cmd.len[0]); 606 scan->len = cpu_to_le16(cmd.len[0]);
1045 607
1046 /* set scan bit here for PAN params */ 608 /* set scan bit here for PAN params */
1047 set_bit(STATUS_SCAN_HW, &priv->status); 609 set_bit(STATUS_SCAN_HW, &priv->shrd->status);
1048 610
1049 ret = iwlagn_set_pan_params(priv); 611 ret = iwlagn_set_pan_params(priv);
1050 if (ret) 612 if (ret)
1051 return ret; 613 return ret;
1052 614
1053 ret = trans_send_cmd(&priv->trans, &cmd); 615 ret = iwl_trans_send_cmd(trans(priv), &cmd);
1054 if (ret) { 616 if (ret) {
1055 clear_bit(STATUS_SCAN_HW, &priv->status); 617 clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
1056 iwlagn_set_pan_params(priv); 618 iwlagn_set_pan_params(priv);
1057 } 619 }
1058 620
@@ -1072,52 +634,6 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1072 vif->bss_conf.bssid); 634 vif->bss_conf.bssid);
1073} 635}
1074 636
1075void iwl_free_tfds_in_queue(struct iwl_priv *priv,
1076 int sta_id, int tid, int freed)
1077{
1078 lockdep_assert_held(&priv->sta_lock);
1079
1080 if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
1081 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
1082 else {
1083 IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
1084 priv->stations[sta_id].tid[tid].tfds_in_queue,
1085 freed);
1086 priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
1087 }
1088}
1089
1090#define IWL_FLUSH_WAIT_MS 2000
1091
1092int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
1093{
1094 struct iwl_tx_queue *txq;
1095 struct iwl_queue *q;
1096 int cnt;
1097 unsigned long now = jiffies;
1098 int ret = 0;
1099
1100 /* waiting for all the tx frames complete might take a while */
1101 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1102 if (cnt == priv->cmd_queue)
1103 continue;
1104 txq = &priv->txq[cnt];
1105 q = &txq->q;
1106 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1107 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1108 msleep(1);
1109
1110 if (q->read_ptr != q->write_ptr) {
1111 IWL_ERR(priv, "fail to flush all tx fifo queues\n");
1112 ret = -ETIMEDOUT;
1113 break;
1114 }
1115 }
1116 return ret;
1117}
1118
1119#define IWL_TX_QUEUE_MSK 0xfffff
1120
1121/** 637/**
1122 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode 638 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1123 * 639 *
@@ -1156,22 +672,22 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1156 flush_cmd.fifo_control); 672 flush_cmd.fifo_control);
1157 flush_cmd.flush_control = cpu_to_le16(flush_control); 673 flush_cmd.flush_control = cpu_to_le16(flush_control);
1158 674
1159 return trans_send_cmd(&priv->trans, &cmd); 675 return iwl_trans_send_cmd(trans(priv), &cmd);
1160} 676}
1161 677
1162void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control) 678void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
1163{ 679{
1164 mutex_lock(&priv->mutex); 680 mutex_lock(&priv->shrd->mutex);
1165 ieee80211_stop_queues(priv->hw); 681 ieee80211_stop_queues(priv->hw);
1166 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) { 682 if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
1167 IWL_ERR(priv, "flush request fail\n"); 683 IWL_ERR(priv, "flush request fail\n");
1168 goto done; 684 goto done;
1169 } 685 }
1170 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); 686 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
1171 iwlagn_wait_tx_queue_empty(priv); 687 iwl_trans_wait_tx_queue_empty(trans(priv));
1172done: 688done:
1173 ieee80211_wake_queues(priv->hw); 689 ieee80211_wake_queues(priv->hw);
1174 mutex_unlock(&priv->mutex); 690 mutex_unlock(&priv->shrd->mutex);
1175} 691}
1176 692
1177/* 693/*
@@ -1350,12 +866,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1350 if (priv->cfg->bt_params->bt_session_2) { 866 if (priv->cfg->bt_params->bt_session_2) {
1351 memcpy(&bt_cmd_2000.basic, &basic, 867 memcpy(&bt_cmd_2000.basic, &basic,
1352 sizeof(basic)); 868 sizeof(basic));
1353 ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG, 869 ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
1354 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000); 870 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
1355 } else { 871 } else {
1356 memcpy(&bt_cmd_6000.basic, &basic, 872 memcpy(&bt_cmd_6000.basic, &basic,
1357 sizeof(basic)); 873 sizeof(basic));
1358 ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG, 874 ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
1359 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000); 875 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
1360 } 876 }
1361 if (ret) 877 if (ret)
@@ -1368,7 +884,7 @@ void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
1368 struct iwl_rxon_context *ctx, *found_ctx = NULL; 884 struct iwl_rxon_context *ctx, *found_ctx = NULL;
1369 bool found_ap = false; 885 bool found_ap = false;
1370 886
1371 lockdep_assert_held(&priv->mutex); 887 lockdep_assert_held(&priv->shrd->mutex);
1372 888
1373 /* Check whether AP or GO mode is active. */ 889 /* Check whether AP or GO mode is active. */
1374 if (rssi_ena) { 890 if (rssi_ena) {
@@ -1481,7 +997,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1481 break; 997 break;
1482 } 998 }
1483 999
1484 mutex_lock(&priv->mutex); 1000 mutex_lock(&priv->shrd->mutex);
1485 1001
1486 /* 1002 /*
1487 * We can not send command to firmware while scanning. When the scan 1003 * We can not send command to firmware while scanning. When the scan
@@ -1490,7 +1006,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1490 * STATUS_SCANNING to avoid race when queue_work two times from 1006 * STATUS_SCANNING to avoid race when queue_work two times from
1491 * different notifications, but quit and not perform any work at all. 1007 * different notifications, but quit and not perform any work at all.
1492 */ 1008 */
1493 if (test_bit(STATUS_SCAN_HW, &priv->status)) 1009 if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
1494 goto out; 1010 goto out;
1495 1011
1496 iwl_update_chain_flags(priv); 1012 iwl_update_chain_flags(priv);
@@ -1509,7 +1025,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1509 */ 1025 */
1510 iwlagn_bt_coex_rssi_monitor(priv); 1026 iwlagn_bt_coex_rssi_monitor(priv);
1511out: 1027out:
1512 mutex_unlock(&priv->mutex); 1028 mutex_unlock(&priv->shrd->mutex);
1513} 1029}
1514 1030
1515/* 1031/*
@@ -1616,7 +1132,7 @@ static void iwlagn_set_kill_msk(struct iwl_priv *priv,
1616 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk]; 1132 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
1617 1133
1618 /* schedule to send runtime bt_config */ 1134 /* schedule to send runtime bt_config */
1619 queue_work(priv->workqueue, &priv->bt_runtime_config); 1135 queue_work(priv->shrd->workqueue, &priv->bt_runtime_config);
1620 } 1136 }
1621} 1137}
1622 1138
@@ -1660,7 +1176,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1660 IWL_BT_COEX_TRAFFIC_LOAD_NONE; 1176 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
1661 } 1177 }
1662 priv->bt_status = coex->bt_status; 1178 priv->bt_status = coex->bt_status;
1663 queue_work(priv->workqueue, 1179 queue_work(priv->shrd->workqueue,
1664 &priv->bt_traffic_change_work); 1180 &priv->bt_traffic_change_work);
1665 } 1181 }
1666 } 1182 }
@@ -1669,9 +1185,9 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
1669 1185
1670 /* FIXME: based on notification, adjust the prio_boost */ 1186 /* FIXME: based on notification, adjust the prio_boost */
1671 1187
1672 spin_lock_irqsave(&priv->lock, flags); 1188 spin_lock_irqsave(&priv->shrd->lock, flags);
1673 priv->bt_ci_compliance = coex->bt_ci_compliance; 1189 priv->bt_ci_compliance = coex->bt_ci_compliance;
1674 spin_unlock_irqrestore(&priv->lock, flags); 1190 spin_unlock_irqrestore(&priv->shrd->lock, flags);
1675} 1191}
1676 1192
1677void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv) 1193void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
@@ -1771,7 +1287,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1771void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 1287void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1772{ 1288{
1773 bool is_single = is_single_rx_stream(priv); 1289 bool is_single = is_single_rx_stream(priv);
1774 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 1290 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
1775 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; 1291 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
1776 u32 active_chains; 1292 u32 active_chains;
1777 u16 rx_chain; 1293 u16 rx_chain;
@@ -1783,7 +1299,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1783 if (priv->chain_noise_data.active_chains) 1299 if (priv->chain_noise_data.active_chains)
1784 active_chains = priv->chain_noise_data.active_chains; 1300 active_chains = priv->chain_noise_data.active_chains;
1785 else 1301 else
1786 active_chains = priv->hw_params.valid_rx_ant; 1302 active_chains = hw_params(priv).valid_rx_ant;
1787 1303
1788 if (priv->cfg->bt_params && 1304 if (priv->cfg->bt_params &&
1789 priv->cfg->bt_params->advanced_bt_coexist && 1305 priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1848,136 +1364,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
1848 return ant; 1364 return ant;
1849} 1365}
1850 1366
1851static const char *get_csr_string(int cmd)
1852{
1853 switch (cmd) {
1854 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1855 IWL_CMD(CSR_INT_COALESCING);
1856 IWL_CMD(CSR_INT);
1857 IWL_CMD(CSR_INT_MASK);
1858 IWL_CMD(CSR_FH_INT_STATUS);
1859 IWL_CMD(CSR_GPIO_IN);
1860 IWL_CMD(CSR_RESET);
1861 IWL_CMD(CSR_GP_CNTRL);
1862 IWL_CMD(CSR_HW_REV);
1863 IWL_CMD(CSR_EEPROM_REG);
1864 IWL_CMD(CSR_EEPROM_GP);
1865 IWL_CMD(CSR_OTP_GP_REG);
1866 IWL_CMD(CSR_GIO_REG);
1867 IWL_CMD(CSR_GP_UCODE_REG);
1868 IWL_CMD(CSR_GP_DRIVER_REG);
1869 IWL_CMD(CSR_UCODE_DRV_GP1);
1870 IWL_CMD(CSR_UCODE_DRV_GP2);
1871 IWL_CMD(CSR_LED_REG);
1872 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1873 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1874 IWL_CMD(CSR_ANA_PLL_CFG);
1875 IWL_CMD(CSR_HW_REV_WA_REG);
1876 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1877 default:
1878 return "UNKNOWN";
1879 }
1880}
1881
1882void iwl_dump_csr(struct iwl_priv *priv)
1883{
1884 int i;
1885 static const u32 csr_tbl[] = {
1886 CSR_HW_IF_CONFIG_REG,
1887 CSR_INT_COALESCING,
1888 CSR_INT,
1889 CSR_INT_MASK,
1890 CSR_FH_INT_STATUS,
1891 CSR_GPIO_IN,
1892 CSR_RESET,
1893 CSR_GP_CNTRL,
1894 CSR_HW_REV,
1895 CSR_EEPROM_REG,
1896 CSR_EEPROM_GP,
1897 CSR_OTP_GP_REG,
1898 CSR_GIO_REG,
1899 CSR_GP_UCODE_REG,
1900 CSR_GP_DRIVER_REG,
1901 CSR_UCODE_DRV_GP1,
1902 CSR_UCODE_DRV_GP2,
1903 CSR_LED_REG,
1904 CSR_DRAM_INT_TBL_REG,
1905 CSR_GIO_CHICKEN_BITS,
1906 CSR_ANA_PLL_CFG,
1907 CSR_HW_REV_WA_REG,
1908 CSR_DBG_HPET_MEM_REG
1909 };
1910 IWL_ERR(priv, "CSR values:\n");
1911 IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
1912 "CSR_INT_PERIODIC_REG)\n");
1913 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1914 IWL_ERR(priv, " %25s: 0X%08x\n",
1915 get_csr_string(csr_tbl[i]),
1916 iwl_read32(priv, csr_tbl[i]));
1917 }
1918}
1919
1920static const char *get_fh_string(int cmd)
1921{
1922 switch (cmd) {
1923 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1924 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1925 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1926 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1927 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1928 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1929 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1930 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1931 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1932 default:
1933 return "UNKNOWN";
1934 }
1935}
1936
1937int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
1938{
1939 int i;
1940#ifdef CONFIG_IWLWIFI_DEBUG
1941 int pos = 0;
1942 size_t bufsz = 0;
1943#endif
1944 static const u32 fh_tbl[] = {
1945 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1946 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1947 FH_RSCSR_CHNL0_WPTR,
1948 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1949 FH_MEM_RSSR_SHARED_CTRL_REG,
1950 FH_MEM_RSSR_RX_STATUS_REG,
1951 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1952 FH_TSSR_TX_STATUS_REG,
1953 FH_TSSR_TX_ERROR_REG
1954 };
1955#ifdef CONFIG_IWLWIFI_DEBUG
1956 if (display) {
1957 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1958 *buf = kmalloc(bufsz, GFP_KERNEL);
1959 if (!*buf)
1960 return -ENOMEM;
1961 pos += scnprintf(*buf + pos, bufsz - pos,
1962 "FH register values:\n");
1963 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1964 pos += scnprintf(*buf + pos, bufsz - pos,
1965 " %34s: 0X%08x\n",
1966 get_fh_string(fh_tbl[i]),
1967 iwl_read_direct32(priv, fh_tbl[i]));
1968 }
1969 return pos;
1970 }
1971#endif
1972 IWL_ERR(priv, "FH register values:\n");
1973 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
1974 IWL_ERR(priv, " %34s: 0X%08x\n",
1975 get_fh_string(fh_tbl[i]),
1976 iwl_read_direct32(priv, fh_tbl[i]));
1977 }
1978 return 0;
1979}
1980
1981/* notification wait support */ 1367/* notification wait support */
1982void iwlagn_init_notification_wait(struct iwl_priv *priv, 1368void iwlagn_init_notification_wait(struct iwl_priv *priv,
1983 struct iwl_notification_wait *wait_entry, 1369 struct iwl_notification_wait *wait_entry,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1fa438e20f0a..ffee15ba06a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -297,10 +297,10 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
297 u8 *qc = ieee80211_get_qos_ctl(hdr); 297 u8 *qc = ieee80211_get_qos_ctl(hdr);
298 tid = qc[0] & 0xf; 298 tid = qc[0] & 0xf;
299 } else 299 } else
300 return MAX_TID_COUNT; 300 return IWL_MAX_TID_COUNT;
301 301
302 if (unlikely(tid >= TID_MAX_LOAD_COUNT)) 302 if (unlikely(tid >= TID_MAX_LOAD_COUNT))
303 return MAX_TID_COUNT; 303 return IWL_MAX_TID_COUNT;
304 304
305 tl = &lq_data->load[tid]; 305 tl = &lq_data->load[tid];
306 306
@@ -313,7 +313,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
313 tl->queue_count = 1; 313 tl->queue_count = 1;
314 tl->head = 0; 314 tl->head = 0;
315 tl->packet_count[0] = 1; 315 tl->packet_count[0] = 1;
316 return MAX_TID_COUNT; 316 return IWL_MAX_TID_COUNT;
317 } 317 }
318 318
319 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time); 319 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
@@ -420,7 +420,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
420 420
421 load = rs_tl_get_load(lq_data, tid); 421 load = rs_tl_get_load(lq_data, tid);
422 422
423 if (load > IWL_AGG_LOAD_THRESHOLD) { 423 if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
424 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 424 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
425 sta->addr, tid); 425 sta->addr, tid);
426 ret = ieee80211_start_tx_ba_session(sta, tid, 5000); 426 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -819,7 +819,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
819 819
820 if (num_of_ant(tbl->ant_type) > 1) 820 if (num_of_ant(tbl->ant_type) > 1)
821 tbl->ant_type = 821 tbl->ant_type =
822 first_antenna(priv->hw_params.valid_tx_ant); 822 first_antenna(hw_params(priv).valid_tx_ant);
823 823
824 tbl->is_ht40 = 0; 824 tbl->is_ht40 = 0;
825 tbl->is_SGI = 0; 825 tbl->is_SGI = 0;
@@ -877,12 +877,12 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
877 * Is there a need to switch between 877 * Is there a need to switch between
878 * full concurrency and 3-wire? 878 * full concurrency and 3-wire?
879 */ 879 */
880 spin_lock_irqsave(&priv->lock, flags); 880 spin_lock_irqsave(&priv->shrd->lock, flags);
881 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok) 881 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
882 full_concurrent = true; 882 full_concurrent = true;
883 else 883 else
884 full_concurrent = false; 884 full_concurrent = false;
885 spin_unlock_irqrestore(&priv->lock, flags); 885 spin_unlock_irqrestore(&priv->shrd->lock, flags);
886 } 886 }
887 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || 887 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
888 (priv->bt_full_concurrent != full_concurrent)) { 888 (priv->bt_full_concurrent != full_concurrent)) {
@@ -893,7 +893,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
893 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); 893 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
894 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); 894 iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
895 895
896 queue_work(priv->workqueue, &priv->bt_full_concurrency); 896 queue_work(priv->shrd->workqueue, &priv->bt_full_concurrency);
897 } 897 }
898} 898}
899 899
@@ -1293,7 +1293,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1293 return -1; 1293 return -1;
1294 1294
1295 /* Need both Tx chains/antennas to support MIMO */ 1295 /* Need both Tx chains/antennas to support MIMO */
1296 if (priv->hw_params.tx_chains_num < 2) 1296 if (hw_params(priv).tx_chains_num < 2)
1297 return -1; 1297 return -1;
1298 1298
1299 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); 1299 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
@@ -1349,7 +1349,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1349 return -1; 1349 return -1;
1350 1350
1351 /* Need both Tx chains/antennas to support MIMO */ 1351 /* Need both Tx chains/antennas to support MIMO */
1352 if (priv->hw_params.tx_chains_num < 3) 1352 if (hw_params(priv).tx_chains_num < 3)
1353 return -1; 1353 return -1;
1354 1354
1355 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); 1355 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
@@ -1448,8 +1448,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1448 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1448 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1449 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1449 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1450 u8 start_action; 1450 u8 start_action;
1451 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1451 u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
1452 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1452 u8 tx_chains_num = hw_params(priv).tx_chains_num;
1453 int ret = 0; 1453 int ret = 0;
1454 u8 update_search_tbl_counter = 0; 1454 u8 update_search_tbl_counter = 0;
1455 1455
@@ -1459,14 +1459,16 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1459 break; 1459 break;
1460 case IWL_BT_COEX_TRAFFIC_LOAD_LOW: 1460 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1461 /* avoid antenna B unless MIMO */ 1461 /* avoid antenna B unless MIMO */
1462 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant); 1462 valid_tx_ant =
1463 first_antenna(hw_params(priv).valid_tx_ant);
1463 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2) 1464 if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
1464 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; 1465 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1465 break; 1466 break;
1466 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: 1467 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1467 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1468 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1468 /* avoid antenna B and MIMO */ 1469 /* avoid antenna B and MIMO */
1469 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant); 1470 valid_tx_ant =
1471 first_antenna(hw_params(priv).valid_tx_ant);
1470 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && 1472 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1471 tbl->action != IWL_LEGACY_SWITCH_SISO) 1473 tbl->action != IWL_LEGACY_SWITCH_SISO)
1472 tbl->action = IWL_LEGACY_SWITCH_SISO; 1474 tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1489,7 +1491,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1489 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; 1491 tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
1490 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1492 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1491 tbl->action = IWL_LEGACY_SWITCH_SISO; 1493 tbl->action = IWL_LEGACY_SWITCH_SISO;
1492 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant); 1494 valid_tx_ant =
1495 first_antenna(hw_params(priv).valid_tx_ant);
1493 } 1496 }
1494 1497
1495 start_action = tbl->action; 1498 start_action = tbl->action;
@@ -1623,8 +1626,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1623 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1626 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1624 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1627 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1625 u8 start_action; 1628 u8 start_action;
1626 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1629 u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
1627 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1630 u8 tx_chains_num = hw_params(priv).tx_chains_num;
1628 u8 update_search_tbl_counter = 0; 1631 u8 update_search_tbl_counter = 0;
1629 int ret; 1632 int ret;
1630 1633
@@ -1634,14 +1637,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1634 break; 1637 break;
1635 case IWL_BT_COEX_TRAFFIC_LOAD_LOW: 1638 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1636 /* avoid antenna B unless MIMO */ 1639 /* avoid antenna B unless MIMO */
1637 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant); 1640 valid_tx_ant =
1641 first_antenna(hw_params(priv).valid_tx_ant);
1638 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) 1642 if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
1639 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1643 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1640 break; 1644 break;
1641 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: 1645 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
1642 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1646 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1643 /* avoid antenna B and MIMO */ 1647 /* avoid antenna B and MIMO */
1644 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant); 1648 valid_tx_ant =
1649 first_antenna(hw_params(priv).valid_tx_ant);
1645 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) 1650 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1646 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1651 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1647 break; 1652 break;
@@ -1658,7 +1663,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1658 1663
1659 /* configure as 1x1 if bt full concurrency */ 1664 /* configure as 1x1 if bt full concurrency */
1660 if (priv->bt_full_concurrent) { 1665 if (priv->bt_full_concurrent) {
1661 valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant); 1666 valid_tx_ant =
1667 first_antenna(hw_params(priv).valid_tx_ant);
1662 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1668 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1663 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1669 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1664 } 1670 }
@@ -1794,8 +1800,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1794 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1800 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1795 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1801 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1796 u8 start_action; 1802 u8 start_action;
1797 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1803 u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
1798 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1804 u8 tx_chains_num = hw_params(priv).tx_chains_num;
1799 u8 update_search_tbl_counter = 0; 1805 u8 update_search_tbl_counter = 0;
1800 int ret; 1806 int ret;
1801 1807
@@ -1964,8 +1970,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1964 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1970 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1965 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1971 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1966 u8 start_action; 1972 u8 start_action;
1967 u8 valid_tx_ant = priv->hw_params.valid_tx_ant; 1973 u8 valid_tx_ant = hw_params(priv).valid_tx_ant;
1968 u8 tx_chains_num = priv->hw_params.tx_chains_num; 1974 u8 tx_chains_num = hw_params(priv).tx_chains_num;
1969 int ret; 1975 int ret;
1970 u8 update_search_tbl_counter = 0; 1976 u8 update_search_tbl_counter = 0;
1971 1977
@@ -2208,7 +2214,6 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2208 2214
2209/* 2215/*
2210 * setup rate table in uCode 2216 * setup rate table in uCode
2211 * return rate_n_flags as used in the table
2212 */ 2217 */
2213static void rs_update_rate_tbl(struct iwl_priv *priv, 2218static void rs_update_rate_tbl(struct iwl_priv *priv,
2214 struct iwl_rxon_context *ctx, 2219 struct iwl_rxon_context *ctx,
@@ -2255,7 +2260,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2255 u8 done_search = 0; 2260 u8 done_search = 0;
2256 u16 high_low; 2261 u16 high_low;
2257 s32 sr; 2262 s32 sr;
2258 u8 tid = MAX_TID_COUNT; 2263 u8 tid = IWL_MAX_TID_COUNT;
2259 struct iwl_tid_data *tid_data; 2264 struct iwl_tid_data *tid_data;
2260 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 2265 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2261 struct iwl_rxon_context *ctx = sta_priv->common.ctx; 2266 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
@@ -2274,8 +2279,9 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2274 lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; 2279 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2275 2280
2276 tid = rs_tl_add_packet(lq_sta, hdr); 2281 tid = rs_tl_add_packet(lq_sta, hdr);
2277 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) { 2282 if ((tid != IWL_MAX_TID_COUNT) &&
2278 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid]; 2283 (lq_sta->tx_agg_tid_en & (1 << tid))) {
2284 tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
2279 if (tid_data->agg.state == IWL_AGG_OFF) 2285 if (tid_data->agg.state == IWL_AGG_OFF)
2280 lq_sta->is_agg = 0; 2286 lq_sta->is_agg = 0;
2281 else 2287 else
@@ -2645,9 +2651,10 @@ lq_update:
2645 iwl_ht_enabled(priv)) { 2651 iwl_ht_enabled(priv)) {
2646 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && 2652 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
2647 (lq_sta->tx_agg_tid_en & (1 << tid)) && 2653 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
2648 (tid != MAX_TID_COUNT)) { 2654 (tid != IWL_MAX_TID_COUNT)) {
2655 u8 sta_id = lq_sta->lq.sta_id;
2649 tid_data = 2656 tid_data =
2650 &priv->stations[lq_sta->lq.sta_id].tid[tid]; 2657 &priv->shrd->tid_data[sta_id][tid];
2651 if (tid_data->agg.state == IWL_AGG_OFF) { 2658 if (tid_data->agg.state == IWL_AGG_OFF) {
2652 IWL_DEBUG_RATE(priv, 2659 IWL_DEBUG_RATE(priv,
2653 "try to aggregate tid %d\n", 2660 "try to aggregate tid %d\n",
@@ -2703,7 +2710,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2703 2710
2704 i = lq_sta->last_txrate_idx; 2711 i = lq_sta->last_txrate_idx;
2705 2712
2706 valid_tx_ant = priv->hw_params.valid_tx_ant; 2713 valid_tx_ant = hw_params(priv).valid_tx_ant;
2707 2714
2708 if (!lq_sta->search_better_tbl) 2715 if (!lq_sta->search_better_tbl)
2709 active_tbl = lq_sta->active_tbl; 2716 active_tbl = lq_sta->active_tbl;
@@ -2886,15 +2893,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2886 2893
2887 /* These values will be overridden later */ 2894 /* These values will be overridden later */
2888 lq_sta->lq.general_params.single_stream_ant_msk = 2895 lq_sta->lq.general_params.single_stream_ant_msk =
2889 first_antenna(priv->hw_params.valid_tx_ant); 2896 first_antenna(hw_params(priv).valid_tx_ant);
2890 lq_sta->lq.general_params.dual_stream_ant_msk = 2897 lq_sta->lq.general_params.dual_stream_ant_msk =
2891 priv->hw_params.valid_tx_ant & 2898 hw_params(priv).valid_tx_ant &
2892 ~first_antenna(priv->hw_params.valid_tx_ant); 2899 ~first_antenna(hw_params(priv).valid_tx_ant);
2893 if (!lq_sta->lq.general_params.dual_stream_ant_msk) { 2900 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2894 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2901 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2895 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { 2902 } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
2896 lq_sta->lq.general_params.dual_stream_ant_msk = 2903 lq_sta->lq.general_params.dual_stream_ant_msk =
2897 priv->hw_params.valid_tx_ant; 2904 hw_params(priv).valid_tx_ant;
2898 } 2905 }
2899 2906
2900 /* as default allow aggregation for all tids */ 2907 /* as default allow aggregation for all tids */
@@ -2940,7 +2947,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2940 if (priv && priv->bt_full_concurrent) { 2947 if (priv && priv->bt_full_concurrent) {
2941 /* 1x1 only */ 2948 /* 1x1 only */
2942 tbl_type.ant_type = 2949 tbl_type.ant_type =
2943 first_antenna(priv->hw_params.valid_tx_ant); 2950 first_antenna(hw_params(priv).valid_tx_ant);
2944 } 2951 }
2945 2952
2946 /* How many times should we repeat the initial rate? */ 2953 /* How many times should we repeat the initial rate? */
@@ -2972,7 +2979,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2972 if (priv->bt_full_concurrent) 2979 if (priv->bt_full_concurrent)
2973 valid_tx_ant = ANT_A; 2980 valid_tx_ant = ANT_A;
2974 else 2981 else
2975 valid_tx_ant = priv->hw_params.valid_tx_ant; 2982 valid_tx_ant = hw_params(priv).valid_tx_ant;
2976 } 2983 }
2977 2984
2978 /* Fill rest of rate table */ 2985 /* Fill rest of rate table */
@@ -3006,7 +3013,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3006 if (priv && priv->bt_full_concurrent) { 3013 if (priv && priv->bt_full_concurrent) {
3007 /* 1x1 only */ 3014 /* 1x1 only */
3008 tbl_type.ant_type = 3015 tbl_type.ant_type =
3009 first_antenna(priv->hw_params.valid_tx_ant); 3016 first_antenna(hw_params(priv).valid_tx_ant);
3010 } 3017 }
3011 3018
3012 /* Indicate to uCode which entries might be MIMO. 3019 /* Indicate to uCode which entries might be MIMO.
@@ -3097,7 +3104,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3097 u8 ant_sel_tx; 3104 u8 ant_sel_tx;
3098 3105
3099 priv = lq_sta->drv; 3106 priv = lq_sta->drv;
3100 valid_tx_ant = priv->hw_params.valid_tx_ant; 3107 valid_tx_ant = hw_params(priv).valid_tx_ant;
3101 if (lq_sta->dbg_fixed_rate) { 3108 if (lq_sta->dbg_fixed_rate) {
3102 ant_sel_tx = 3109 ant_sel_tx =
3103 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 3110 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3168,9 +3175,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3168 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3175 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3169 lq_sta->dbg_fixed_rate); 3176 lq_sta->dbg_fixed_rate);
3170 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3177 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3171 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", 3178 (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "",
3172 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", 3179 (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "",
3173 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3180 (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : "");
3174 desc += sprintf(buff+desc, "lq type %s\n", 3181 desc += sprintf(buff+desc, "lq type %s\n",
3175 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3182 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3176 if (is_Ht(tbl->lq_type)) { 3183 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index d562e9359d97..1af276739d87 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -31,6 +31,7 @@
31#include "iwl-agn-calib.h" 31#include "iwl-agn-calib.h"
32#include "iwl-helpers.h" 32#include "iwl-helpers.h"
33#include "iwl-trans.h" 33#include "iwl-trans.h"
34#include "iwl-shared.h"
34 35
35static int iwlagn_disable_bss(struct iwl_priv *priv, 36static int iwlagn_disable_bss(struct iwl_priv *priv,
36 struct iwl_rxon_context *ctx, 37 struct iwl_rxon_context *ctx,
@@ -40,7 +41,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
40 int ret; 41 int ret;
41 42
42 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 43 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
43 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, 44 ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
44 CMD_SYNC, sizeof(*send), send); 45 CMD_SYNC, sizeof(*send), send);
45 46
46 send->filter_flags = old_filter; 47 send->filter_flags = old_filter;
@@ -66,7 +67,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
66 67
67 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 68 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
68 send->dev_type = RXON_DEV_TYPE_P2P; 69 send->dev_type = RXON_DEV_TYPE_P2P;
69 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, 70 ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd,
70 CMD_SYNC, sizeof(*send), send); 71 CMD_SYNC, sizeof(*send), send);
71 72
72 send->filter_flags = old_filter; 73 send->filter_flags = old_filter;
@@ -92,7 +93,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
92 int ret; 93 int ret;
93 94
94 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 95 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
95 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC, 96 ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
96 sizeof(*send), send); 97 sizeof(*send), send);
97 98
98 send->filter_flags = old_filter; 99 send->filter_flags = old_filter;
@@ -121,7 +122,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
121 ctx->qos_data.qos_active, 122 ctx->qos_data.qos_active,
122 ctx->qos_data.def_qos_parm.qos_flags); 123 ctx->qos_data.def_qos_parm.qos_flags);
123 124
124 ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC, 125 ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->qos_cmd, CMD_SYNC,
125 sizeof(struct iwl_qosparam_cmd), 126 sizeof(struct iwl_qosparam_cmd),
126 &ctx->qos_data.def_qos_parm); 127 &ctx->qos_data.def_qos_parm);
127 if (ret) 128 if (ret)
@@ -131,7 +132,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
131static int iwlagn_update_beacon(struct iwl_priv *priv, 132static int iwlagn_update_beacon(struct iwl_priv *priv,
132 struct ieee80211_vif *vif) 133 struct ieee80211_vif *vif)
133{ 134{
134 lockdep_assert_held(&priv->mutex); 135 lockdep_assert_held(&priv->shrd->mutex);
135 136
136 dev_kfree_skb(priv->beacon_skb); 137 dev_kfree_skb(priv->beacon_skb);
137 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif); 138 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
@@ -180,7 +181,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
180 ctx->staging.ofdm_ht_triple_stream_basic_rates; 181 ctx->staging.ofdm_ht_triple_stream_basic_rates;
181 rxon_assoc.acquisition_data = ctx->staging.acquisition_data; 182 rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
182 183
183 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd, 184 ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_assoc_cmd,
184 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc); 185 CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
185 return ret; 186 return ret;
186} 187}
@@ -266,7 +267,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
266 * Associated RXON doesn't clear the station table in uCode, 267 * Associated RXON doesn't clear the station table in uCode,
267 * so we don't need to restore stations etc. after this. 268 * so we don't need to restore stations etc. after this.
268 */ 269 */
269 ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC, 270 ret = iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_cmd, CMD_SYNC,
270 sizeof(struct iwl_rxon_cmd), &ctx->staging); 271 sizeof(struct iwl_rxon_cmd), &ctx->staging);
271 if (ret) { 272 if (ret) {
272 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 273 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
@@ -315,7 +316,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
315 316
316 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 317 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
317 318
318 lockdep_assert_held(&priv->mutex); 319 lockdep_assert_held(&priv->shrd->mutex);
319 320
320 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS]; 321 ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
321 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN]; 322 ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
@@ -362,7 +363,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
362 slot0 = bcnint / 2; 363 slot0 = bcnint / 2;
363 slot1 = bcnint - slot0; 364 slot1 = bcnint - slot0;
364 365
365 if (test_bit(STATUS_SCAN_HW, &priv->status) || 366 if (test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
366 (!ctx_bss->vif->bss_conf.idle && 367 (!ctx_bss->vif->bss_conf.idle &&
367 !ctx_bss->vif->bss_conf.assoc)) { 368 !ctx_bss->vif->bss_conf.assoc)) {
368 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME; 369 slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
@@ -378,7 +379,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
378 ctx_pan->beacon_int; 379 ctx_pan->beacon_int;
379 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1); 380 slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
380 381
381 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 382 if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
382 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME; 383 slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
383 slot1 = IWL_MIN_SLOT_TIME; 384 slot1 = IWL_MIN_SLOT_TIME;
384 } 385 }
@@ -387,7 +388,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
387 cmd.slots[0].width = cpu_to_le16(slot0); 388 cmd.slots[0].width = cpu_to_le16(slot0);
388 cmd.slots[1].width = cpu_to_le16(slot1); 389 cmd.slots[1].width = cpu_to_le16(slot1);
389 390
390 ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC, 391 ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WIPAN_PARAMS, CMD_SYNC,
391 sizeof(cmd), &cmd); 392 sizeof(cmd), &cmd);
392 if (ret) 393 if (ret)
393 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret); 394 IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
@@ -420,12 +421,12 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
420 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); 421 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
421 int ret; 422 int ret;
422 423
423 lockdep_assert_held(&priv->mutex); 424 lockdep_assert_held(&priv->shrd->mutex);
424 425
425 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 426 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
426 return -EINVAL; 427 return -EINVAL;
427 428
428 if (!iwl_is_alive(priv)) 429 if (!iwl_is_alive(priv->shrd))
429 return -EBUSY; 430 return -EBUSY;
430 431
431 /* This function hardcodes a bunch of dual-mode assumptions */ 432 /* This function hardcodes a bunch of dual-mode assumptions */
@@ -434,6 +435,10 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
434 if (!ctx->is_active) 435 if (!ctx->is_active)
435 return 0; 436 return 0;
436 437
438 /* override BSSID if necessary due to preauth */
439 if (ctx->preauth_bssid)
440 memcpy(ctx->staging.bssid_addr, ctx->bssid, ETH_ALEN);
441
437 /* always get timestamp with Rx frame */ 442 /* always get timestamp with Rx frame */
438 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 443 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
439 444
@@ -462,7 +467,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
462 * receive commit_rxon request 467 * receive commit_rxon request
463 * abort any previous channel switch if still in process 468 * abort any previous channel switch if still in process
464 */ 469 */
465 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) && 470 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status) &&
466 (priv->switch_channel != ctx->staging.channel)) { 471 (priv->switch_channel != ctx->staging.channel)) {
467 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 472 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
468 le16_to_cpu(priv->switch_channel)); 473 le16_to_cpu(priv->switch_channel));
@@ -536,14 +541,14 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
536 541
537 IWL_DEBUG_MAC80211(priv, "changed %#x", changed); 542 IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
538 543
539 mutex_lock(&priv->mutex); 544 mutex_lock(&priv->shrd->mutex);
540 545
541 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) { 546 if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
542 IWL_DEBUG_MAC80211(priv, "leave - scanning\n"); 547 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
543 goto out; 548 goto out;
544 } 549 }
545 550
546 if (!iwl_is_ready(priv)) { 551 if (!iwl_is_ready(priv->shrd)) {
547 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 552 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
548 goto out; 553 goto out;
549 } 554 }
@@ -575,7 +580,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
575 goto out; 580 goto out;
576 } 581 }
577 582
578 spin_lock_irqsave(&priv->lock, flags); 583 spin_lock_irqsave(&priv->shrd->lock, flags);
579 584
580 for_each_context(priv, ctx) { 585 for_each_context(priv, ctx) {
581 /* Configure HT40 channels */ 586 /* Configure HT40 channels */
@@ -619,7 +624,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
619 ctx->vif); 624 ctx->vif);
620 } 625 }
621 626
622 spin_unlock_irqrestore(&priv->lock, flags); 627 spin_unlock_irqrestore(&priv->shrd->lock, flags);
623 628
624 iwl_update_bcast_stations(priv); 629 iwl_update_bcast_stations(priv);
625 630
@@ -651,7 +656,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
651 iwlagn_commit_rxon(priv, ctx); 656 iwlagn_commit_rxon(priv, ctx);
652 } 657 }
653 out: 658 out:
654 mutex_unlock(&priv->mutex); 659 mutex_unlock(&priv->shrd->mutex);
655 return ret; 660 return ret;
656} 661}
657 662
@@ -666,7 +671,7 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
666 struct ieee80211_sta_ht_cap *ht_cap; 671 struct ieee80211_sta_ht_cap *ht_cap;
667 bool need_multiple; 672 bool need_multiple;
668 673
669 lockdep_assert_held(&priv->mutex); 674 lockdep_assert_held(&priv->shrd->mutex);
670 675
671 switch (vif->type) { 676 switch (vif->type) {
672 case NL80211_IFTYPE_STATION: 677 case NL80211_IFTYPE_STATION:
@@ -770,7 +775,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
770 memset(&cmd, 0, sizeof(cmd)); 775 memset(&cmd, 0, sizeof(cmd));
771 iwl_set_calib_hdr(&cmd.hdr, 776 iwl_set_calib_hdr(&cmd.hdr,
772 priv->phy_calib_chain_noise_reset_cmd); 777 priv->phy_calib_chain_noise_reset_cmd);
773 ret = trans_send_cmd_pdu(&priv->trans, 778 ret = iwl_trans_send_cmd_pdu(trans(priv),
774 REPLY_PHY_CALIBRATION_CMD, 779 REPLY_PHY_CALIBRATION_CMD,
775 CMD_SYNC, sizeof(cmd), &cmd); 780 CMD_SYNC, sizeof(cmd), &cmd);
776 if (ret) 781 if (ret)
@@ -791,17 +796,17 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
791 int ret; 796 int ret;
792 bool force = false; 797 bool force = false;
793 798
794 mutex_lock(&priv->mutex); 799 mutex_lock(&priv->shrd->mutex);
795 800
796 if (unlikely(!iwl_is_ready(priv))) { 801 if (unlikely(!iwl_is_ready(priv->shrd))) {
797 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 802 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
798 mutex_unlock(&priv->mutex); 803 mutex_unlock(&priv->shrd->mutex);
799 return; 804 return;
800 } 805 }
801 806
802 if (unlikely(!ctx->vif)) { 807 if (unlikely(!ctx->vif)) {
803 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n"); 808 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
804 mutex_unlock(&priv->mutex); 809 mutex_unlock(&priv->shrd->mutex);
805 return; 810 return;
806 } 811 }
807 812
@@ -834,7 +839,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
834 */ 839 */
835 if (ctx->last_tx_rejected) { 840 if (ctx->last_tx_rejected) {
836 ctx->last_tx_rejected = false; 841 ctx->last_tx_rejected = false;
837 iwl_wake_any_queue(priv, ctx); 842 iwl_trans_wake_any_queue(trans(priv),
843 ctx->ctxid);
838 } 844 }
839 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 845 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
840 846
@@ -895,6 +901,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
895 if (!priv->disable_chain_noise_cal) 901 if (!priv->disable_chain_noise_cal)
896 iwlagn_chain_noise_reset(priv); 902 iwlagn_chain_noise_reset(priv);
897 priv->start_calib = 1; 903 priv->start_calib = 1;
904 WARN_ON(ctx->preauth_bssid);
898 } 905 }
899 906
900 if (changes & BSS_CHANGED_IBSS) { 907 if (changes & BSS_CHANGED_IBSS) {
@@ -912,7 +919,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
912 IWL_ERR(priv, "Error sending IBSS beacon\n"); 919 IWL_ERR(priv, "Error sending IBSS beacon\n");
913 } 920 }
914 921
915 mutex_unlock(&priv->mutex); 922 mutex_unlock(&priv->shrd->mutex);
916} 923}
917 924
918void iwlagn_post_scan(struct iwl_priv *priv) 925void iwlagn_post_scan(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 37e624095e40..8f0b86de1863 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -49,7 +49,7 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
49 return NULL; 49 return NULL;
50 } 50 }
51 51
52 lockdep_assert_held(&priv->mutex); 52 lockdep_assert_held(&priv->shrd->mutex);
53 53
54 /* Set up the rate scaling to start at selected rate, fall back 54 /* Set up the rate scaling to start at selected rate, fall back
55 * all the way down to 1M in IEEE order, and then spin on 1M */ 55 * all the way down to 1M in IEEE order, and then spin on 1M */
@@ -63,23 +63,23 @@ iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, u8 sta_id)
63 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 63 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
64 rate_flags |= RATE_MCS_CCK_MSK; 64 rate_flags |= RATE_MCS_CCK_MSK;
65 65
66 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << 66 rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
67 RATE_MCS_ANT_POS; 67 RATE_MCS_ANT_POS;
68 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags); 68 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
69 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) 69 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
70 link_cmd->rs_table[i].rate_n_flags = rate_n_flags; 70 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
71 71
72 link_cmd->general_params.single_stream_ant_msk = 72 link_cmd->general_params.single_stream_ant_msk =
73 first_antenna(priv->hw_params.valid_tx_ant); 73 first_antenna(hw_params(priv).valid_tx_ant);
74 74
75 link_cmd->general_params.dual_stream_ant_msk = 75 link_cmd->general_params.dual_stream_ant_msk =
76 priv->hw_params.valid_tx_ant & 76 hw_params(priv).valid_tx_ant &
77 ~first_antenna(priv->hw_params.valid_tx_ant); 77 ~first_antenna(hw_params(priv).valid_tx_ant);
78 if (!link_cmd->general_params.dual_stream_ant_msk) { 78 if (!link_cmd->general_params.dual_stream_ant_msk) {
79 link_cmd->general_params.dual_stream_ant_msk = ANT_AB; 79 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
80 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) { 80 } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
81 link_cmd->general_params.dual_stream_ant_msk = 81 link_cmd->general_params.dual_stream_ant_msk =
82 priv->hw_params.valid_tx_ant; 82 hw_params(priv).valid_tx_ant;
83 } 83 }
84 84
85 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 85 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
@@ -116,9 +116,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
116 if (sta_id_r) 116 if (sta_id_r)
117 *sta_id_r = sta_id; 117 *sta_id_r = sta_id;
118 118
119 spin_lock_irqsave(&priv->sta_lock, flags); 119 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
120 priv->stations[sta_id].used |= IWL_STA_LOCAL; 120 priv->stations[sta_id].used |= IWL_STA_LOCAL;
121 spin_unlock_irqrestore(&priv->sta_lock, flags); 121 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
122 122
123 /* Set up default rate scaling table in device's station table */ 123 /* Set up default rate scaling table in device's station table */
124 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); 124 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
@@ -132,9 +132,9 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
132 if (ret) 132 if (ret)
133 IWL_ERR(priv, "Link quality command failed (%d)\n", ret); 133 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
134 134
135 spin_lock_irqsave(&priv->sta_lock, flags); 135 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
136 priv->stations[sta_id].lq = link_cmd; 136 priv->stations[sta_id].lq = link_cmd;
137 spin_unlock_irqrestore(&priv->sta_lock, flags); 137 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
138 138
139 return 0; 139 return 0;
140} 140}
@@ -189,7 +189,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
189 cmd.len[0] = cmd_size; 189 cmd.len[0] = cmd_size;
190 190
191 if (not_empty || send_if_empty) 191 if (not_empty || send_if_empty)
192 return trans_send_cmd(&priv->trans, &cmd); 192 return iwl_trans_send_cmd(trans(priv), &cmd);
193 else 193 else
194 return 0; 194 return 0;
195} 195}
@@ -197,7 +197,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
197int iwl_restore_default_wep_keys(struct iwl_priv *priv, 197int iwl_restore_default_wep_keys(struct iwl_priv *priv,
198 struct iwl_rxon_context *ctx) 198 struct iwl_rxon_context *ctx)
199{ 199{
200 lockdep_assert_held(&priv->mutex); 200 lockdep_assert_held(&priv->shrd->mutex);
201 201
202 return iwl_send_static_wepkey_cmd(priv, ctx, false); 202 return iwl_send_static_wepkey_cmd(priv, ctx, false);
203} 203}
@@ -208,13 +208,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
208{ 208{
209 int ret; 209 int ret;
210 210
211 lockdep_assert_held(&priv->mutex); 211 lockdep_assert_held(&priv->shrd->mutex);
212 212
213 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 213 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
214 keyconf->keyidx); 214 keyconf->keyidx);
215 215
216 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0])); 216 memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
217 if (iwl_is_rfkill(priv)) { 217 if (iwl_is_rfkill(priv->shrd)) {
218 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); 218 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
219 /* but keys in device are clear anyway so return success */ 219 /* but keys in device are clear anyway so return success */
220 return 0; 220 return 0;
@@ -232,7 +232,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
232{ 232{
233 int ret; 233 int ret;
234 234
235 lockdep_assert_held(&priv->mutex); 235 lockdep_assert_held(&priv->shrd->mutex);
236 236
237 if (keyconf->keylen != WEP_KEY_LEN_128 && 237 if (keyconf->keylen != WEP_KEY_LEN_128 &&
238 keyconf->keylen != WEP_KEY_LEN_64) { 238 keyconf->keylen != WEP_KEY_LEN_64) {
@@ -311,9 +311,9 @@ static int iwlagn_send_sta_key(struct iwl_priv *priv,
311 struct iwl_addsta_cmd sta_cmd; 311 struct iwl_addsta_cmd sta_cmd;
312 int i; 312 int i;
313 313
314 spin_lock_irqsave(&priv->sta_lock, flags); 314 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
315 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); 315 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
316 spin_unlock_irqrestore(&priv->sta_lock, flags); 316 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
317 317
318 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); 318 key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
319 key_flags |= STA_KEY_FLG_MAP_KEY_MSK; 319 key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
@@ -388,16 +388,16 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
388 if (sta_id == IWL_INVALID_STATION) 388 if (sta_id == IWL_INVALID_STATION)
389 return -ENOENT; 389 return -ENOENT;
390 390
391 spin_lock_irqsave(&priv->sta_lock, flags); 391 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
392 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd)); 392 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
393 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) 393 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
394 sta_id = IWL_INVALID_STATION; 394 sta_id = IWL_INVALID_STATION;
395 spin_unlock_irqrestore(&priv->sta_lock, flags); 395 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
396 396
397 if (sta_id == IWL_INVALID_STATION) 397 if (sta_id == IWL_INVALID_STATION)
398 return 0; 398 return 0;
399 399
400 lockdep_assert_held(&priv->mutex); 400 lockdep_assert_held(&priv->shrd->mutex);
401 401
402 ctx->key_mapping_keys--; 402 ctx->key_mapping_keys--;
403 403
@@ -430,7 +430,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
430 if (sta_id == IWL_INVALID_STATION) 430 if (sta_id == IWL_INVALID_STATION)
431 return -EINVAL; 431 return -EINVAL;
432 432
433 lockdep_assert_held(&priv->mutex); 433 lockdep_assert_held(&priv->shrd->mutex);
434 434
435 keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv); 435 keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
436 if (keyconf->hw_key_idx == WEP_INVALID_OFFSET) 436 if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
@@ -493,18 +493,18 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
493 unsigned long flags; 493 unsigned long flags;
494 u8 sta_id; 494 u8 sta_id;
495 495
496 spin_lock_irqsave(&priv->sta_lock, flags); 496 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
497 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); 497 sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
498 if (sta_id == IWL_INVALID_STATION) { 498 if (sta_id == IWL_INVALID_STATION) {
499 IWL_ERR(priv, "Unable to prepare broadcast station\n"); 499 IWL_ERR(priv, "Unable to prepare broadcast station\n");
500 spin_unlock_irqrestore(&priv->sta_lock, flags); 500 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
501 501
502 return -EINVAL; 502 return -EINVAL;
503 } 503 }
504 504
505 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; 505 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
506 priv->stations[sta_id].used |= IWL_STA_BCAST; 506 priv->stations[sta_id].used |= IWL_STA_BCAST;
507 spin_unlock_irqrestore(&priv->sta_lock, flags); 507 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
508 508
509 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id); 509 link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
510 if (!link_cmd) { 510 if (!link_cmd) {
@@ -513,9 +513,9 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
513 return -ENOMEM; 513 return -ENOMEM;
514 } 514 }
515 515
516 spin_lock_irqsave(&priv->sta_lock, flags); 516 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
517 priv->stations[sta_id].lq = link_cmd; 517 priv->stations[sta_id].lq = link_cmd;
518 spin_unlock_irqrestore(&priv->sta_lock, flags); 518 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
519 519
520 return 0; 520 return 0;
521} 521}
@@ -539,13 +539,13 @@ int iwl_update_bcast_station(struct iwl_priv *priv,
539 return -ENOMEM; 539 return -ENOMEM;
540 } 540 }
541 541
542 spin_lock_irqsave(&priv->sta_lock, flags); 542 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
543 if (priv->stations[sta_id].lq) 543 if (priv->stations[sta_id].lq)
544 kfree(priv->stations[sta_id].lq); 544 kfree(priv->stations[sta_id].lq);
545 else 545 else
546 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n"); 546 IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
547 priv->stations[sta_id].lq = link_cmd; 547 priv->stations[sta_id].lq = link_cmd;
548 spin_unlock_irqrestore(&priv->sta_lock, flags); 548 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
549 549
550 return 0; 550 return 0;
551} 551}
@@ -572,15 +572,15 @@ int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
572 unsigned long flags; 572 unsigned long flags;
573 struct iwl_addsta_cmd sta_cmd; 573 struct iwl_addsta_cmd sta_cmd;
574 574
575 lockdep_assert_held(&priv->mutex); 575 lockdep_assert_held(&priv->shrd->mutex);
576 576
577 /* Remove "disable" flag, to enable Tx for this TID */ 577 /* Remove "disable" flag, to enable Tx for this TID */
578 spin_lock_irqsave(&priv->sta_lock, flags); 578 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
579 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX; 579 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
580 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid)); 580 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
581 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 581 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
582 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 582 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
583 spin_unlock_irqrestore(&priv->sta_lock, flags); 583 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
584 584
585 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 585 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
586} 586}
@@ -592,20 +592,20 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
592 int sta_id; 592 int sta_id;
593 struct iwl_addsta_cmd sta_cmd; 593 struct iwl_addsta_cmd sta_cmd;
594 594
595 lockdep_assert_held(&priv->mutex); 595 lockdep_assert_held(&priv->shrd->mutex);
596 596
597 sta_id = iwl_sta_id(sta); 597 sta_id = iwl_sta_id(sta);
598 if (sta_id == IWL_INVALID_STATION) 598 if (sta_id == IWL_INVALID_STATION)
599 return -ENXIO; 599 return -ENXIO;
600 600
601 spin_lock_irqsave(&priv->sta_lock, flags); 601 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
602 priv->stations[sta_id].sta.station_flags_msk = 0; 602 priv->stations[sta_id].sta.station_flags_msk = 0;
603 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK; 603 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
604 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid; 604 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
605 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn); 605 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
606 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 606 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
607 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 607 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
608 spin_unlock_irqrestore(&priv->sta_lock, flags); 608 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
609 609
610 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 610 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
611} 611}
@@ -617,7 +617,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
617 int sta_id; 617 int sta_id;
618 struct iwl_addsta_cmd sta_cmd; 618 struct iwl_addsta_cmd sta_cmd;
619 619
620 lockdep_assert_held(&priv->mutex); 620 lockdep_assert_held(&priv->shrd->mutex);
621 621
622 sta_id = iwl_sta_id(sta); 622 sta_id = iwl_sta_id(sta);
623 if (sta_id == IWL_INVALID_STATION) { 623 if (sta_id == IWL_INVALID_STATION) {
@@ -625,13 +625,13 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
625 return -ENXIO; 625 return -ENXIO;
626 } 626 }
627 627
628 spin_lock_irqsave(&priv->sta_lock, flags); 628 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
629 priv->stations[sta_id].sta.station_flags_msk = 0; 629 priv->stations[sta_id].sta.station_flags_msk = 0;
630 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK; 630 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
631 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid; 631 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
632 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 632 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
633 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 633 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
634 spin_unlock_irqrestore(&priv->sta_lock, flags); 634 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
635 635
636 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 636 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
637} 637}
@@ -640,14 +640,14 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
640{ 640{
641 unsigned long flags; 641 unsigned long flags;
642 642
643 spin_lock_irqsave(&priv->sta_lock, flags); 643 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
644 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; 644 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
645 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; 645 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
646 priv->stations[sta_id].sta.sta.modify_mask = 0; 646 priv->stations[sta_id].sta.sta.modify_mask = 0;
647 priv->stations[sta_id].sta.sleep_tx_count = 0; 647 priv->stations[sta_id].sta.sleep_tx_count = 0;
648 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 648 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
649 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 649 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
650 spin_unlock_irqrestore(&priv->sta_lock, flags); 650 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
651 651
652} 652}
653 653
@@ -655,7 +655,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
655{ 655{
656 unsigned long flags; 656 unsigned long flags;
657 657
658 spin_lock_irqsave(&priv->sta_lock, flags); 658 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
659 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK; 659 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
660 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; 660 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
661 priv->stations[sta_id].sta.sta.modify_mask = 661 priv->stations[sta_id].sta.sta.modify_mask =
@@ -663,7 +663,7 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
663 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt); 663 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
664 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 664 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
665 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 665 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
666 spin_unlock_irqrestore(&priv->sta_lock, flags); 666 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
667 667
668} 668}
669 669
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index f501d742984c..92ba8cd0ecd5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -176,24 +176,24 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
176 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 176 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
177 unsigned long flags; 177 unsigned long flags;
178 178
179 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 179 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
180 return; 180 return;
181 181
182 if (tt->state == IWL_TI_CT_KILL) { 182 if (tt->state == IWL_TI_CT_KILL) {
183 if (priv->thermal_throttle.ct_kill_toggle) { 183 if (priv->thermal_throttle.ct_kill_toggle) {
184 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 184 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
185 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 185 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
186 priv->thermal_throttle.ct_kill_toggle = false; 186 priv->thermal_throttle.ct_kill_toggle = false;
187 } else { 187 } else {
188 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 188 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
189 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 189 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
190 priv->thermal_throttle.ct_kill_toggle = true; 190 priv->thermal_throttle.ct_kill_toggle = true;
191 } 191 }
192 iwl_read32(priv, CSR_UCODE_DRV_GP1); 192 iwl_read32(bus(priv), CSR_UCODE_DRV_GP1);
193 spin_lock_irqsave(&priv->reg_lock, flags); 193 spin_lock_irqsave(&bus(priv)->reg_lock, flags);
194 if (!iwl_grab_nic_access(priv)) 194 if (!iwl_grab_nic_access(bus(priv)))
195 iwl_release_nic_access(priv); 195 iwl_release_nic_access(bus(priv));
196 spin_unlock_irqrestore(&priv->reg_lock, flags); 196 spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
197 197
198 /* Reschedule the ct_kill timer to occur in 198 /* Reschedule the ct_kill timer to occur in
199 * CT_KILL_EXIT_DURATION seconds to ensure we get a 199 * CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -209,7 +209,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
209{ 209{
210 if (stop) { 210 if (stop) {
211 IWL_DEBUG_TEMP(priv, "Stop all queues\n"); 211 IWL_DEBUG_TEMP(priv, "Stop all queues\n");
212 if (priv->mac80211_registered) 212 if (priv->shrd->mac80211_registered)
213 ieee80211_stop_queues(priv->hw); 213 ieee80211_stop_queues(priv->hw);
214 IWL_DEBUG_TEMP(priv, 214 IWL_DEBUG_TEMP(priv,
215 "Schedule 5 seconds CT_KILL Timer\n"); 215 "Schedule 5 seconds CT_KILL Timer\n");
@@ -217,7 +217,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
217 jiffies + CT_KILL_EXIT_DURATION * HZ); 217 jiffies + CT_KILL_EXIT_DURATION * HZ);
218 } else { 218 } else {
219 IWL_DEBUG_TEMP(priv, "Wake all queues\n"); 219 IWL_DEBUG_TEMP(priv, "Wake all queues\n");
220 if (priv->mac80211_registered) 220 if (priv->shrd->mac80211_registered)
221 ieee80211_wake_queues(priv->hw); 221 ieee80211_wake_queues(priv->hw);
222 } 222 }
223} 223}
@@ -227,7 +227,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
227 struct iwl_priv *priv = (struct iwl_priv *)data; 227 struct iwl_priv *priv = (struct iwl_priv *)data;
228 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 228 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
229 229
230 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 230 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
231 return; 231 return;
232 232
233 /* temperature timer expired, ready to go into CT_KILL state */ 233 /* temperature timer expired, ready to go into CT_KILL state */
@@ -235,7 +235,7 @@ static void iwl_tt_ready_for_ct_kill(unsigned long data)
235 IWL_DEBUG_TEMP(priv, "entering CT_KILL state when " 235 IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
236 "temperature timer expired\n"); 236 "temperature timer expired\n");
237 tt->state = IWL_TI_CT_KILL; 237 tt->state = IWL_TI_CT_KILL;
238 set_bit(STATUS_CT_KILL, &priv->status); 238 set_bit(STATUS_CT_KILL, &priv->shrd->status);
239 iwl_perform_ct_kill_task(priv, true); 239 iwl_perform_ct_kill_task(priv, true);
240 } 240 }
241} 241}
@@ -313,23 +313,24 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
313 tt->tt_power_mode = IWL_POWER_INDEX_5; 313 tt->tt_power_mode = IWL_POWER_INDEX_5;
314 break; 314 break;
315 } 315 }
316 mutex_lock(&priv->mutex); 316 mutex_lock(&priv->shrd->mutex);
317 if (old_state == IWL_TI_CT_KILL) 317 if (old_state == IWL_TI_CT_KILL)
318 clear_bit(STATUS_CT_KILL, &priv->status); 318 clear_bit(STATUS_CT_KILL, &priv->shrd->status);
319 if (tt->state != IWL_TI_CT_KILL && 319 if (tt->state != IWL_TI_CT_KILL &&
320 iwl_power_update_mode(priv, true)) { 320 iwl_power_update_mode(priv, true)) {
321 /* TT state not updated 321 /* TT state not updated
322 * try again during next temperature read 322 * try again during next temperature read
323 */ 323 */
324 if (old_state == IWL_TI_CT_KILL) 324 if (old_state == IWL_TI_CT_KILL)
325 set_bit(STATUS_CT_KILL, &priv->status); 325 set_bit(STATUS_CT_KILL, &priv->shrd->status);
326 tt->state = old_state; 326 tt->state = old_state;
327 IWL_ERR(priv, "Cannot update power mode, " 327 IWL_ERR(priv, "Cannot update power mode, "
328 "TT state not updated\n"); 328 "TT state not updated\n");
329 } else { 329 } else {
330 if (tt->state == IWL_TI_CT_KILL) { 330 if (tt->state == IWL_TI_CT_KILL) {
331 if (force) { 331 if (force) {
332 set_bit(STATUS_CT_KILL, &priv->status); 332 set_bit(STATUS_CT_KILL,
333 &priv->shrd->status);
333 iwl_perform_ct_kill_task(priv, true); 334 iwl_perform_ct_kill_task(priv, true);
334 } else { 335 } else {
335 iwl_prepare_ct_kill_task(priv); 336 iwl_prepare_ct_kill_task(priv);
@@ -343,7 +344,7 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
343 IWL_DEBUG_TEMP(priv, "Power Index change to %u\n", 344 IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
344 tt->tt_power_mode); 345 tt->tt_power_mode);
345 } 346 }
346 mutex_unlock(&priv->mutex); 347 mutex_unlock(&priv->shrd->mutex);
347 } 348 }
348} 349}
349 350
@@ -453,9 +454,9 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
453 * in case get disabled before */ 454 * in case get disabled before */
454 iwl_set_rxon_ht(priv, &priv->current_ht_config); 455 iwl_set_rxon_ht(priv, &priv->current_ht_config);
455 } 456 }
456 mutex_lock(&priv->mutex); 457 mutex_lock(&priv->shrd->mutex);
457 if (old_state == IWL_TI_CT_KILL) 458 if (old_state == IWL_TI_CT_KILL)
458 clear_bit(STATUS_CT_KILL, &priv->status); 459 clear_bit(STATUS_CT_KILL, &priv->shrd->status);
459 if (tt->state != IWL_TI_CT_KILL && 460 if (tt->state != IWL_TI_CT_KILL &&
460 iwl_power_update_mode(priv, true)) { 461 iwl_power_update_mode(priv, true)) {
461 /* TT state not updated 462 /* TT state not updated
@@ -464,7 +465,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
464 IWL_ERR(priv, "Cannot update power mode, " 465 IWL_ERR(priv, "Cannot update power mode, "
465 "TT state not updated\n"); 466 "TT state not updated\n");
466 if (old_state == IWL_TI_CT_KILL) 467 if (old_state == IWL_TI_CT_KILL)
467 set_bit(STATUS_CT_KILL, &priv->status); 468 set_bit(STATUS_CT_KILL, &priv->shrd->status);
468 tt->state = old_state; 469 tt->state = old_state;
469 } else { 470 } else {
470 IWL_DEBUG_TEMP(priv, 471 IWL_DEBUG_TEMP(priv,
@@ -475,7 +476,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
475 if (force) { 476 if (force) {
476 IWL_DEBUG_TEMP(priv, 477 IWL_DEBUG_TEMP(priv,
477 "Enter IWL_TI_CT_KILL\n"); 478 "Enter IWL_TI_CT_KILL\n");
478 set_bit(STATUS_CT_KILL, &priv->status); 479 set_bit(STATUS_CT_KILL,
480 &priv->shrd->status);
479 iwl_perform_ct_kill_task(priv, true); 481 iwl_perform_ct_kill_task(priv, true);
480 } else { 482 } else {
481 iwl_prepare_ct_kill_task(priv); 483 iwl_prepare_ct_kill_task(priv);
@@ -487,7 +489,7 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
487 iwl_perform_ct_kill_task(priv, false); 489 iwl_perform_ct_kill_task(priv, false);
488 } 490 }
489 } 491 }
490 mutex_unlock(&priv->mutex); 492 mutex_unlock(&priv->shrd->mutex);
491 } 493 }
492} 494}
493 495
@@ -506,10 +508,10 @@ static void iwl_bg_ct_enter(struct work_struct *work)
506 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter); 508 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
507 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 509 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
508 510
509 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 511 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
510 return; 512 return;
511 513
512 if (!iwl_is_ready(priv)) 514 if (!iwl_is_ready(priv->shrd))
513 return; 515 return;
514 516
515 if (tt->state != IWL_TI_CT_KILL) { 517 if (tt->state != IWL_TI_CT_KILL) {
@@ -535,10 +537,10 @@ static void iwl_bg_ct_exit(struct work_struct *work)
535 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit); 537 struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
536 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 538 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
537 539
538 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 540 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
539 return; 541 return;
540 542
541 if (!iwl_is_ready(priv)) 543 if (!iwl_is_ready(priv->shrd))
542 return; 544 return;
543 545
544 /* stop ct_kill_exit_tm timer */ 546 /* stop ct_kill_exit_tm timer */
@@ -565,20 +567,20 @@ static void iwl_bg_ct_exit(struct work_struct *work)
565 567
566void iwl_tt_enter_ct_kill(struct iwl_priv *priv) 568void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
567{ 569{
568 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 570 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
569 return; 571 return;
570 572
571 IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n"); 573 IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
572 queue_work(priv->workqueue, &priv->ct_enter); 574 queue_work(priv->shrd->workqueue, &priv->ct_enter);
573} 575}
574 576
575void iwl_tt_exit_ct_kill(struct iwl_priv *priv) 577void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
576{ 578{
577 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 579 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
578 return; 580 return;
579 581
580 IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n"); 582 IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
581 queue_work(priv->workqueue, &priv->ct_exit); 583 queue_work(priv->shrd->workqueue, &priv->ct_exit);
582} 584}
583 585
584static void iwl_bg_tt_work(struct work_struct *work) 586static void iwl_bg_tt_work(struct work_struct *work)
@@ -586,7 +588,7 @@ static void iwl_bg_tt_work(struct work_struct *work)
586 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); 588 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
587 s32 temp = priv->temperature; /* degrees CELSIUS except specified */ 589 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
588 590
589 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 591 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
590 return; 592 return;
591 593
592 if (priv->cfg->base_params->temperature_kelvin) 594 if (priv->cfg->base_params->temperature_kelvin)
@@ -600,11 +602,11 @@ static void iwl_bg_tt_work(struct work_struct *work)
600 602
601void iwl_tt_handler(struct iwl_priv *priv) 603void iwl_tt_handler(struct iwl_priv *priv)
602{ 604{
603 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 605 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
604 return; 606 return;
605 607
606 IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n"); 608 IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
607 queue_work(priv->workqueue, &priv->tt_work); 609 queue_work(priv->shrd->workqueue, &priv->tt_work);
608} 610}
609 611
610/* Thermal throttling initialization 612/* Thermal throttling initialization
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 9bc26da62768..f8a4bcf0a34b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -31,6 +31,7 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/sched.h> 33#include <linux/sched.h>
34#include <linux/ieee80211.h>
34 35
35#include "iwl-dev.h" 36#include "iwl-dev.h"
36#include "iwl-core.h" 37#include "iwl-core.h"
@@ -41,79 +42,6 @@
41#include "iwl-agn.h" 42#include "iwl-agn.h"
42#include "iwl-trans.h" 43#include "iwl-trans.h"
43 44
44/*
45 * mac80211 queues, ACs, hardware queues, FIFOs.
46 *
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 *
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
51 *
52 * VO 0
53 * VI 1
54 * BE 2
55 * BK 3
56 *
57 *
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
64 *
65 * Due to the way hw queues are set up (by the hw specific modules like
66 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
67 * mapping.
68 */
69
70static const u8 tid_to_ac[] = {
71 IEEE80211_AC_BE,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BK,
74 IEEE80211_AC_BE,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VI,
77 IEEE80211_AC_VO,
78 IEEE80211_AC_VO
79};
80
81static inline int get_ac_from_tid(u16 tid)
82{
83 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
84 return tid_to_ac[tid];
85
86 /* no support for TIDs 8-15 yet */
87 return -EINVAL;
88}
89
90static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
91{
92 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
93 return ctx->ac_to_fifo[tid_to_ac[tid]];
94
95 /* no support for TIDs 8-15 yet */
96 return -EINVAL;
97}
98
99static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
100 int tid)
101{
102 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
103 (IWLAGN_FIRST_AMPDU_QUEUE +
104 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
105 IWL_WARN(priv,
106 "queue number out of range: %d, must be %d to %d\n",
107 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
108 IWLAGN_FIRST_AMPDU_QUEUE +
109 priv->cfg->base_params->num_of_ampdu_queues - 1);
110 return -EINVAL;
111 }
112
113 /* Modify device's station table to Tx this TID */
114 return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
115}
116
117static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, 45static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
118 struct ieee80211_tx_info *info, 46 struct ieee80211_tx_info *info,
119 __le16 fc, __le32 *tx_flags) 47 __le16 fc, __le32 *tx_flags)
@@ -260,10 +188,10 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
260 priv->bt_full_concurrent) { 188 priv->bt_full_concurrent) {
261 /* operated as 1x1 in full concurrency mode */ 189 /* operated as 1x1 in full concurrency mode */
262 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 190 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
263 first_antenna(priv->hw_params.valid_tx_ant)); 191 first_antenna(hw_params(priv).valid_tx_ant));
264 } else 192 } else
265 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 193 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
266 priv->hw_params.valid_tx_ant); 194 hw_params(priv).valid_tx_ant);
267 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 195 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
268 196
269 /* Set the rate in the TX cmd */ 197 /* Set the rate in the TX cmd */
@@ -321,23 +249,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
321 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 249 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
322 struct iwl_station_priv *sta_priv = NULL; 250 struct iwl_station_priv *sta_priv = NULL;
323 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 251 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
252 struct iwl_device_cmd *dev_cmd = NULL;
324 struct iwl_tx_cmd *tx_cmd; 253 struct iwl_tx_cmd *tx_cmd;
325 int txq_id;
326 254
327 u16 seq_number = 0;
328 __le16 fc; 255 __le16 fc;
329 u8 hdr_len; 256 u8 hdr_len;
330 u16 len; 257 u16 len;
331 u8 sta_id; 258 u8 sta_id;
332 u8 tid = 0;
333 unsigned long flags; 259 unsigned long flags;
334 bool is_agg = false; 260 bool is_agg = false;
335 261
336 if (info->control.vif) 262 if (info->control.vif)
337 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 263 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
338 264
339 spin_lock_irqsave(&priv->lock, flags); 265 spin_lock_irqsave(&priv->shrd->lock, flags);
340 if (iwl_is_rfkill(priv)) { 266 if (iwl_is_rfkill(priv->shrd)) {
341 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); 267 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
342 goto drop_unlock_priv; 268 goto drop_unlock_priv;
343 } 269 }
@@ -387,52 +313,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
387 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 313 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
388 } 314 }
389 315
390 /* 316 /* irqs already disabled/saved above when locking priv->shrd->lock */
391 * Send this frame after DTIM -- there's a special queue 317 spin_lock(&priv->shrd->sta_lock);
392 * reserved for this for contexts that support AP mode.
393 */
394 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
395 txq_id = ctx->mcast_queue;
396 /*
397 * The microcode will clear the more data
398 * bit in the last frame it transmits.
399 */
400 hdr->frame_control |=
401 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
402 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
403 txq_id = IWL_AUX_QUEUE;
404 else
405 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
406
407 /* irqs already disabled/saved above when locking priv->lock */
408 spin_lock(&priv->sta_lock);
409 318
410 if (ieee80211_is_data_qos(fc)) { 319 dev_cmd = kmem_cache_alloc(priv->tx_cmd_pool, GFP_ATOMIC);
411 u8 *qc = NULL;
412 qc = ieee80211_get_qos_ctl(hdr);
413 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
414
415 if (WARN_ON_ONCE(tid >= MAX_TID_COUNT))
416 goto drop_unlock_sta;
417
418 seq_number = priv->stations[sta_id].tid[tid].seq_number;
419 seq_number &= IEEE80211_SCTL_SEQ;
420 hdr->seq_ctrl = hdr->seq_ctrl &
421 cpu_to_le16(IEEE80211_SCTL_FRAG);
422 hdr->seq_ctrl |= cpu_to_le16(seq_number);
423 seq_number += 0x10;
424 /* aggregation is on for this <sta,tid> */
425 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
426 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
427 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
428 is_agg = true;
429 }
430 }
431 320
432 tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id); 321 if (unlikely(!dev_cmd))
433 if (unlikely(!tx_cmd))
434 goto drop_unlock_sta; 322 goto drop_unlock_sta;
435 323
324 memset(dev_cmd, 0, sizeof(*dev_cmd));
325 tx_cmd = &dev_cmd->cmd.tx;
326
436 /* Copy MAC header from skb into command buffer */ 327 /* Copy MAC header from skb into command buffer */
437 memcpy(tx_cmd->hdr, hdr, hdr_len); 328 memcpy(tx_cmd->hdr, hdr, hdr_len);
438 329
@@ -451,17 +342,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
451 342
452 iwl_update_stats(priv, true, fc, len); 343 iwl_update_stats(priv, true, fc, len);
453 344
454 if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx)) 345 info->driver_data[0] = ctx;
455 goto drop_unlock_sta; 346 info->driver_data[1] = dev_cmd;
456 347
457 if (ieee80211_is_data_qos(fc)) { 348 if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
458 priv->stations[sta_id].tid[tid].tfds_in_queue++; 349 goto drop_unlock_sta;
459 if (!ieee80211_has_morefrags(fc))
460 priv->stations[sta_id].tid[tid].seq_number = seq_number;
461 }
462 350
463 spin_unlock(&priv->sta_lock); 351 spin_unlock(&priv->shrd->sta_lock);
464 spin_unlock_irqrestore(&priv->lock, flags); 352 spin_unlock_irqrestore(&priv->shrd->lock, flags);
465 353
466 /* 354 /*
467 * Avoid atomic ops if it isn't an associated client. 355 * Avoid atomic ops if it isn't an associated client.
@@ -476,41 +364,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
476 return 0; 364 return 0;
477 365
478drop_unlock_sta: 366drop_unlock_sta:
479 spin_unlock(&priv->sta_lock); 367 if (dev_cmd)
368 kmem_cache_free(priv->tx_cmd_pool, dev_cmd);
369 spin_unlock(&priv->shrd->sta_lock);
480drop_unlock_priv: 370drop_unlock_priv:
481 spin_unlock_irqrestore(&priv->lock, flags); 371 spin_unlock_irqrestore(&priv->shrd->lock, flags);
482 return -1;
483}
484
485/*
486 * Find first available (lowest unused) Tx Queue, mark it "active".
487 * Called only when finding queue for aggregation.
488 * Should never return anything < 7, because they should already
489 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
490 */
491static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
492{
493 int txq_id;
494
495 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
496 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
497 return txq_id;
498 return -1; 372 return -1;
499} 373}
500 374
501int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 375int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
502 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 376 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
503{ 377{
378 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
504 int sta_id; 379 int sta_id;
505 int tx_fifo;
506 int txq_id;
507 int ret; 380 int ret;
508 unsigned long flags;
509 struct iwl_tid_data *tid_data;
510
511 tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
512 if (unlikely(tx_fifo < 0))
513 return tx_fifo;
514 381
515 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", 382 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
516 sta->addr, tid); 383 sta->addr, tid);
@@ -520,58 +387,29 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
520 IWL_ERR(priv, "Start AGG on invalid station\n"); 387 IWL_ERR(priv, "Start AGG on invalid station\n");
521 return -ENXIO; 388 return -ENXIO;
522 } 389 }
523 if (unlikely(tid >= MAX_TID_COUNT)) 390 if (unlikely(tid >= IWL_MAX_TID_COUNT))
524 return -EINVAL; 391 return -EINVAL;
525 392
526 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 393 if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
527 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n"); 394 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
528 return -ENXIO; 395 return -ENXIO;
529 } 396 }
530 397
531 txq_id = iwlagn_txq_ctx_activate_free(priv); 398 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
532 if (txq_id == -1) {
533 IWL_ERR(priv, "No free aggregation queue available\n");
534 return -ENXIO;
535 }
536
537 spin_lock_irqsave(&priv->sta_lock, flags);
538 tid_data = &priv->stations[sta_id].tid[tid];
539 *ssn = SEQ_TO_SN(tid_data->seq_number);
540 tid_data->agg.txq_id = txq_id;
541 tid_data->agg.tx_fifo = tx_fifo;
542 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
543 spin_unlock_irqrestore(&priv->sta_lock, flags);
544
545 ret = iwlagn_txq_agg_enable(priv, txq_id, sta_id, tid);
546 if (ret) 399 if (ret)
547 return ret; 400 return ret;
548 401
549 spin_lock_irqsave(&priv->sta_lock, flags); 402 ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
550 tid_data = &priv->stations[sta_id].tid[tid]; 403 tid, ssn);
551 if (tid_data->tfds_in_queue == 0) { 404
552 IWL_DEBUG_HT(priv, "HW queue is empty\n");
553 tid_data->agg.state = IWL_AGG_ON;
554 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
555 } else {
556 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
557 tid_data->tfds_in_queue);
558 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
559 }
560 spin_unlock_irqrestore(&priv->sta_lock, flags);
561 return ret; 405 return ret;
562} 406}
563 407
564int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 408int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
565 struct ieee80211_sta *sta, u16 tid) 409 struct ieee80211_sta *sta, u16 tid)
566{ 410{
567 int tx_fifo_id, txq_id, sta_id, ssn; 411 int sta_id;
568 struct iwl_tid_data *tid_data; 412 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
569 int write_ptr, read_ptr;
570 unsigned long flags;
571
572 tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
573 if (unlikely(tx_fifo_id < 0))
574 return tx_fifo_id;
575 413
576 sta_id = iwl_sta_id(sta); 414 sta_id = iwl_sta_id(sta);
577 415
@@ -580,101 +418,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
580 return -ENXIO; 418 return -ENXIO;
581 } 419 }
582 420
583 spin_lock_irqsave(&priv->sta_lock, flags); 421 return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
584 422 sta_id, tid);
585 tid_data = &priv->stations[sta_id].tid[tid];
586 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
587 txq_id = tid_data->agg.txq_id;
588
589 switch (priv->stations[sta_id].tid[tid].agg.state) {
590 case IWL_EMPTYING_HW_QUEUE_ADDBA:
591 /*
592 * This can happen if the peer stops aggregation
593 * again before we've had a chance to drain the
594 * queue we selected previously, i.e. before the
595 * session was really started completely.
596 */
597 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
598 goto turn_off;
599 case IWL_AGG_ON:
600 break;
601 default:
602 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
603 }
604
605 write_ptr = priv->txq[txq_id].q.write_ptr;
606 read_ptr = priv->txq[txq_id].q.read_ptr;
607
608 /* The queue is not empty */
609 if (write_ptr != read_ptr) {
610 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
611 priv->stations[sta_id].tid[tid].agg.state =
612 IWL_EMPTYING_HW_QUEUE_DELBA;
613 spin_unlock_irqrestore(&priv->sta_lock, flags);
614 return 0;
615 }
616
617 IWL_DEBUG_HT(priv, "HW queue is empty\n");
618 turn_off:
619 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
620
621 /* do not restore/save irqs */
622 spin_unlock(&priv->sta_lock);
623 spin_lock(&priv->lock);
624
625 /*
626 * the only reason this call can fail is queue number out of range,
627 * which can happen if uCode is reloaded and all the station
628 * information are lost. if it is outside the range, there is no need
629 * to deactivate the uCode queue, just return "success" to allow
630 * mac80211 to clean up it own data.
631 */
632 trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
633 spin_unlock_irqrestore(&priv->lock, flags);
634
635 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
636
637 return 0;
638}
639
640int iwlagn_txq_check_empty(struct iwl_priv *priv,
641 int sta_id, u8 tid, int txq_id)
642{
643 struct iwl_queue *q = &priv->txq[txq_id].q;
644 u8 *addr = priv->stations[sta_id].sta.sta.addr;
645 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
646 struct iwl_rxon_context *ctx;
647
648 ctx = &priv->contexts[priv->stations[sta_id].ctxid];
649
650 lockdep_assert_held(&priv->sta_lock);
651
652 switch (priv->stations[sta_id].tid[tid].agg.state) {
653 case IWL_EMPTYING_HW_QUEUE_DELBA:
654 /* We are reclaiming the last packet of the */
655 /* aggregated HW queue */
656 if ((txq_id == tid_data->agg.txq_id) &&
657 (q->read_ptr == q->write_ptr)) {
658 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
659 int tx_fifo = get_fifo_from_tid(ctx, tid);
660 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
661 trans_txq_agg_disable(&priv->trans, txq_id,
662 ssn, tx_fifo);
663 tid_data->agg.state = IWL_AGG_OFF;
664 ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
665 }
666 break;
667 case IWL_EMPTYING_HW_QUEUE_ADDBA:
668 /* We are reclaiming the last packet of the queue */
669 if (tid_data->tfds_in_queue == 0) {
670 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
671 tid_data->agg.state = IWL_AGG_ON;
672 ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
673 }
674 break;
675 }
676
677 return 0;
678} 423}
679 424
680static void iwlagn_non_agg_tx_status(struct iwl_priv *priv, 425static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -696,147 +441,389 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
696 rcu_read_unlock(); 441 rcu_read_unlock();
697} 442}
698 443
699static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info, 444/**
700 bool is_agg) 445 * translate ucode response to mac80211 tx status control values
446 */
447static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
448 struct ieee80211_tx_info *info)
701{ 449{
702 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data; 450 struct ieee80211_tx_rate *r = &info->control.rates[0];
451
452 info->antenna_sel_tx =
453 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
454 if (rate_n_flags & RATE_MCS_HT_MSK)
455 r->flags |= IEEE80211_TX_RC_MCS;
456 if (rate_n_flags & RATE_MCS_GF_MSK)
457 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
458 if (rate_n_flags & RATE_MCS_HT40_MSK)
459 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
460 if (rate_n_flags & RATE_MCS_DUP_MSK)
461 r->flags |= IEEE80211_TX_RC_DUP_DATA;
462 if (rate_n_flags & RATE_MCS_SGI_MSK)
463 r->flags |= IEEE80211_TX_RC_SHORT_GI;
464 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
465}
466
467#ifdef CONFIG_IWLWIFI_DEBUG
468const char *iwl_get_tx_fail_reason(u32 status)
469{
470#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
471#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
472
473 switch (status & TX_STATUS_MSK) {
474 case TX_STATUS_SUCCESS:
475 return "SUCCESS";
476 TX_STATUS_POSTPONE(DELAY);
477 TX_STATUS_POSTPONE(FEW_BYTES);
478 TX_STATUS_POSTPONE(BT_PRIO);
479 TX_STATUS_POSTPONE(QUIET_PERIOD);
480 TX_STATUS_POSTPONE(CALC_TTAK);
481 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
482 TX_STATUS_FAIL(SHORT_LIMIT);
483 TX_STATUS_FAIL(LONG_LIMIT);
484 TX_STATUS_FAIL(FIFO_UNDERRUN);
485 TX_STATUS_FAIL(DRAIN_FLOW);
486 TX_STATUS_FAIL(RFKILL_FLUSH);
487 TX_STATUS_FAIL(LIFE_EXPIRE);
488 TX_STATUS_FAIL(DEST_PS);
489 TX_STATUS_FAIL(HOST_ABORTED);
490 TX_STATUS_FAIL(BT_RETRY);
491 TX_STATUS_FAIL(STA_INVALID);
492 TX_STATUS_FAIL(FRAG_DROPPED);
493 TX_STATUS_FAIL(TID_DISABLE);
494 TX_STATUS_FAIL(FIFO_FLUSHED);
495 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
496 TX_STATUS_FAIL(PASSIVE_NO_RX);
497 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
498 }
703 499
704 if (!is_agg) 500 return "UNKNOWN";
705 iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
706 501
707 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); 502#undef TX_STATUS_FAIL
503#undef TX_STATUS_POSTPONE
708} 504}
505#endif /* CONFIG_IWLWIFI_DEBUG */
709 506
710int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 507static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
711{ 508{
712 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 509 status &= AGG_TX_STATUS_MSK;
713 struct iwl_queue *q = &txq->q;
714 struct iwl_tx_info *tx_info;
715 int nfreed = 0;
716 struct ieee80211_hdr *hdr;
717 510
718 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 511 switch (status) {
719 IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), " 512 case AGG_TX_STATE_UNDERRUN_MSK:
720 "index %d is out of range [0-%d] %d %d.\n", __func__, 513 priv->reply_agg_tx_stats.underrun++;
721 txq_id, index, q->n_bd, q->write_ptr, q->read_ptr); 514 break;
722 return 0; 515 case AGG_TX_STATE_BT_PRIO_MSK:
516 priv->reply_agg_tx_stats.bt_prio++;
517 break;
518 case AGG_TX_STATE_FEW_BYTES_MSK:
519 priv->reply_agg_tx_stats.few_bytes++;
520 break;
521 case AGG_TX_STATE_ABORT_MSK:
522 priv->reply_agg_tx_stats.abort++;
523 break;
524 case AGG_TX_STATE_LAST_SENT_TTL_MSK:
525 priv->reply_agg_tx_stats.last_sent_ttl++;
526 break;
527 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
528 priv->reply_agg_tx_stats.last_sent_try++;
529 break;
530 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
531 priv->reply_agg_tx_stats.last_sent_bt_kill++;
532 break;
533 case AGG_TX_STATE_SCD_QUERY_MSK:
534 priv->reply_agg_tx_stats.scd_query++;
535 break;
536 case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
537 priv->reply_agg_tx_stats.bad_crc32++;
538 break;
539 case AGG_TX_STATE_RESPONSE_MSK:
540 priv->reply_agg_tx_stats.response++;
541 break;
542 case AGG_TX_STATE_DUMP_TX_MSK:
543 priv->reply_agg_tx_stats.dump_tx++;
544 break;
545 case AGG_TX_STATE_DELAY_TX_MSK:
546 priv->reply_agg_tx_stats.delay_tx++;
547 break;
548 default:
549 priv->reply_agg_tx_stats.unknown++;
550 break;
723 } 551 }
552}
724 553
725 for (index = iwl_queue_inc_wrap(index, q->n_bd); 554static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
726 q->read_ptr != index; 555 struct iwlagn_tx_resp *tx_resp)
727 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 556{
557 struct agg_tx_status *frame_status = &tx_resp->status;
558 int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
559 IWLAGN_TX_RES_TID_POS;
560 int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
561 IWLAGN_TX_RES_RA_POS;
562 struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
563 u32 status = le16_to_cpu(tx_resp->status.status);
564 int i;
565
566 if (agg->wait_for_ba)
567 IWL_DEBUG_TX_REPLY(priv,
568 "got tx response w/o block-ack\n");
728 569
729 tx_info = &txq->txb[txq->q.read_ptr]; 570 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
571 agg->wait_for_ba = (tx_resp->frame_count > 1);
730 572
731 if (WARN_ON_ONCE(tx_info->skb == NULL)) 573 /*
732 continue; 574 * If the BT kill count is non-zero, we'll get this
575 * notification again.
576 */
577 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
578 priv->cfg->bt_params &&
579 priv->cfg->bt_params->advanced_bt_coexist) {
580 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
581 }
733 582
734 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 583 /* Construct bit-map of pending frames within Tx window */
735 if (ieee80211_is_data_qos(hdr->frame_control)) 584 for (i = 0; i < tx_resp->frame_count; i++) {
736 nfreed++; 585 u16 fstatus = le16_to_cpu(frame_status[i].status);
737 586
738 iwlagn_tx_status(priv, tx_info, 587 if (status & AGG_TX_STATUS_MSK)
739 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); 588 iwlagn_count_agg_tx_err_status(priv, fstatus);
740 tx_info->skb = NULL;
741 589
742 iwlagn_txq_inval_byte_cnt_tbl(priv, txq); 590 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
591 AGG_TX_STATE_ABORT_MSK))
592 continue;
743 593
744 iwlagn_txq_free_tfd(priv, txq, txq->q.read_ptr); 594 IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
595 "try-count (0x%08x)\n",
596 iwl_get_agg_tx_fail_reason(fstatus),
597 fstatus & AGG_TX_STATUS_MSK,
598 fstatus & AGG_TX_TRY_MSK);
745 } 599 }
746 return nfreed;
747} 600}
748 601
749/** 602#ifdef CONFIG_IWLWIFI_DEBUG
750 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack 603#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
751 *
752 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
753 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
754 */
755static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
756 struct iwl_ht_agg *agg,
757 struct iwl_compressed_ba_resp *ba_resp)
758 604
605const char *iwl_get_agg_tx_fail_reason(u16 status)
759{ 606{
760 int sh; 607 status &= AGG_TX_STATUS_MSK;
761 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); 608 switch (status) {
762 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 609 case AGG_TX_STATE_TRANSMITTED:
763 struct ieee80211_tx_info *info; 610 return "SUCCESS";
764 u64 bitmap, sent_bitmap; 611 AGG_TX_STATE_FAIL(UNDERRUN_MSK);
765 612 AGG_TX_STATE_FAIL(BT_PRIO_MSK);
766 if (unlikely(!agg->wait_for_ba)) { 613 AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
767 if (unlikely(ba_resp->bitmap)) 614 AGG_TX_STATE_FAIL(ABORT_MSK);
768 IWL_ERR(priv, "Received BA when not expected\n"); 615 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
769 return -EINVAL; 616 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
617 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
618 AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
619 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
620 AGG_TX_STATE_FAIL(RESPONSE_MSK);
621 AGG_TX_STATE_FAIL(DUMP_TX_MSK);
622 AGG_TX_STATE_FAIL(DELAY_TX_MSK);
770 } 623 }
771 624
772 /* Mark that the expected block-ack response arrived */ 625 return "UNKNOWN";
773 agg->wait_for_ba = 0; 626}
774 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl); 627#endif /* CONFIG_IWLWIFI_DEBUG */
775
776 /* Calculate shift to align block-ack bits with our Tx window bits */
777 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
778 if (sh < 0)
779 sh += 0x100;
780 628
781 /* 629static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
782 * Check for success or failure according to the 630{
783 * transmitted bitmap and block-ack bitmap 631 return le32_to_cpup((__le32 *)&tx_resp->status +
784 */ 632 tx_resp->frame_count) & MAX_SN;
785 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; 633}
786 sent_bitmap = bitmap & agg->bitmap;
787 634
788 /* Sanity check values reported by uCode */ 635static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
789 if (ba_resp->txed_2_done > ba_resp->txed) { 636{
790 IWL_DEBUG_TX_REPLY(priv, 637 status &= TX_STATUS_MSK;
791 "bogus sent(%d) and ack(%d) count\n",
792 ba_resp->txed, ba_resp->txed_2_done);
793 /*
794 * set txed_2_done = txed,
795 * so it won't impact rate scale
796 */
797 ba_resp->txed = ba_resp->txed_2_done;
798 }
799 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
800 ba_resp->txed, ba_resp->txed_2_done);
801 638
802 /* Find the first ACKed frame to store the TX status */ 639 switch (status) {
803 while (sent_bitmap && !(sent_bitmap & 1)) { 640 case TX_STATUS_POSTPONE_DELAY:
804 agg->start_idx = (agg->start_idx + 1) & 0xff; 641 priv->reply_tx_stats.pp_delay++;
805 sent_bitmap >>= 1; 642 break;
643 case TX_STATUS_POSTPONE_FEW_BYTES:
644 priv->reply_tx_stats.pp_few_bytes++;
645 break;
646 case TX_STATUS_POSTPONE_BT_PRIO:
647 priv->reply_tx_stats.pp_bt_prio++;
648 break;
649 case TX_STATUS_POSTPONE_QUIET_PERIOD:
650 priv->reply_tx_stats.pp_quiet_period++;
651 break;
652 case TX_STATUS_POSTPONE_CALC_TTAK:
653 priv->reply_tx_stats.pp_calc_ttak++;
654 break;
655 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
656 priv->reply_tx_stats.int_crossed_retry++;
657 break;
658 case TX_STATUS_FAIL_SHORT_LIMIT:
659 priv->reply_tx_stats.short_limit++;
660 break;
661 case TX_STATUS_FAIL_LONG_LIMIT:
662 priv->reply_tx_stats.long_limit++;
663 break;
664 case TX_STATUS_FAIL_FIFO_UNDERRUN:
665 priv->reply_tx_stats.fifo_underrun++;
666 break;
667 case TX_STATUS_FAIL_DRAIN_FLOW:
668 priv->reply_tx_stats.drain_flow++;
669 break;
670 case TX_STATUS_FAIL_RFKILL_FLUSH:
671 priv->reply_tx_stats.rfkill_flush++;
672 break;
673 case TX_STATUS_FAIL_LIFE_EXPIRE:
674 priv->reply_tx_stats.life_expire++;
675 break;
676 case TX_STATUS_FAIL_DEST_PS:
677 priv->reply_tx_stats.dest_ps++;
678 break;
679 case TX_STATUS_FAIL_HOST_ABORTED:
680 priv->reply_tx_stats.host_abort++;
681 break;
682 case TX_STATUS_FAIL_BT_RETRY:
683 priv->reply_tx_stats.bt_retry++;
684 break;
685 case TX_STATUS_FAIL_STA_INVALID:
686 priv->reply_tx_stats.sta_invalid++;
687 break;
688 case TX_STATUS_FAIL_FRAG_DROPPED:
689 priv->reply_tx_stats.frag_drop++;
690 break;
691 case TX_STATUS_FAIL_TID_DISABLE:
692 priv->reply_tx_stats.tid_disable++;
693 break;
694 case TX_STATUS_FAIL_FIFO_FLUSHED:
695 priv->reply_tx_stats.fifo_flush++;
696 break;
697 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
698 priv->reply_tx_stats.insuff_cf_poll++;
699 break;
700 case TX_STATUS_FAIL_PASSIVE_NO_RX:
701 priv->reply_tx_stats.fail_hw_drop++;
702 break;
703 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
704 priv->reply_tx_stats.sta_color_mismatch++;
705 break;
706 default:
707 priv->reply_tx_stats.unknown++;
708 break;
806 } 709 }
710}
807 711
808 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); 712static void iwlagn_set_tx_status(struct iwl_priv *priv,
809 memset(&info->status, 0, sizeof(info->status)); 713 struct ieee80211_tx_info *info,
810 info->flags |= IEEE80211_TX_STAT_ACK; 714 struct iwlagn_tx_resp *tx_resp,
811 info->flags |= IEEE80211_TX_STAT_AMPDU; 715 bool is_agg)
812 info->status.ampdu_ack_len = ba_resp->txed_2_done; 716{
813 info->status.ampdu_len = ba_resp->txed; 717 u16 status = le16_to_cpu(tx_resp->status.status);
814 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 718
719 info->status.rates[0].count = tx_resp->failure_frame + 1;
720 if (is_agg)
721 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
722 info->flags |= iwl_tx_status_to_mac80211(status);
723 iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
724 info);
725 if (!iwl_is_tx_success(status))
726 iwlagn_count_tx_err_status(priv, status);
727}
815 728
816 return 0; 729static void iwl_check_abort_status(struct iwl_priv *priv,
730 u8 frame_count, u32 status)
731{
732 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
733 IWL_ERR(priv, "Tx flush command to flush out all frames\n");
734 if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
735 queue_work(priv->shrd->workqueue, &priv->tx_flush);
736 }
817} 737}
818 738
819/** 739void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
820 * translate ucode response to mac80211 tx status control values
821 */
822void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
823 struct ieee80211_tx_info *info)
824{ 740{
825 struct ieee80211_tx_rate *r = &info->control.rates[0]; 741 struct iwl_rx_packet *pkt = rxb_addr(rxb);
742 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
743 int txq_id = SEQ_TO_QUEUE(sequence);
744 int cmd_index = SEQ_TO_INDEX(sequence);
745 struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
746 struct ieee80211_hdr *hdr;
747 u32 status = le16_to_cpu(tx_resp->status.status);
748 u32 ssn = iwlagn_get_scd_ssn(tx_resp);
749 int tid;
750 int sta_id;
751 int freed;
752 struct ieee80211_tx_info *info;
753 unsigned long flags;
754 struct sk_buff_head skbs;
755 struct sk_buff *skb;
756 struct iwl_rxon_context *ctx;
757 bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
758
759 tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
760 IWLAGN_TX_RES_TID_POS;
761 sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
762 IWLAGN_TX_RES_RA_POS;
763
764 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
765
766 if (is_agg)
767 iwl_rx_reply_tx_agg(priv, tx_resp);
768
769 if (tx_resp->frame_count == 1) {
770 __skb_queue_head_init(&skbs);
771 /*we can free until ssn % q.n_bd not inclusive */
772 iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
773 ssn, status, &skbs);
774 freed = 0;
775 while (!skb_queue_empty(&skbs)) {
776 skb = __skb_dequeue(&skbs);
777 hdr = (struct ieee80211_hdr *)skb->data;
778
779 if (!ieee80211_is_data_qos(hdr->frame_control))
780 priv->last_seq_ctl = tx_resp->seq_ctl;
781
782 info = IEEE80211_SKB_CB(skb);
783 ctx = info->driver_data[0];
784 kmem_cache_free(priv->tx_cmd_pool,
785 (info->driver_data[1]));
786
787 memset(&info->status, 0, sizeof(info->status));
788
789 if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
790 iwl_is_associated_ctx(ctx) && ctx->vif &&
791 ctx->vif->type == NL80211_IFTYPE_STATION) {
792 ctx->last_tx_rejected = true;
793 iwl_trans_stop_queue(trans(priv), txq_id);
794
795 IWL_DEBUG_TX_REPLY(priv,
796 "TXQ %d status %s (0x%08x) "
797 "rate_n_flags 0x%x retries %d\n",
798 txq_id,
799 iwl_get_tx_fail_reason(status),
800 status,
801 le32_to_cpu(tx_resp->rate_n_flags),
802 tx_resp->failure_frame);
803
804 IWL_DEBUG_TX_REPLY(priv,
805 "FrameCnt = %d, idx=%d\n",
806 tx_resp->frame_count, cmd_index);
807 }
808
809 /* check if BAR is needed */
810 if (is_agg && !iwl_is_tx_success(status))
811 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
812 iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
813 tx_resp, is_agg);
814 if (!is_agg)
815 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
816
817 ieee80211_tx_status_irqsafe(priv->hw, skb);
818
819 freed++;
820 }
826 821
827 info->antenna_sel_tx = 822 WARN_ON(!is_agg && freed != 1);
828 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 823 }
829 if (rate_n_flags & RATE_MCS_HT_MSK) 824
830 r->flags |= IEEE80211_TX_RC_MCS; 825 iwl_check_abort_status(priv, tx_resp->frame_count, status);
831 if (rate_n_flags & RATE_MCS_GF_MSK) 826 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
832 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
833 if (rate_n_flags & RATE_MCS_HT40_MSK)
834 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
835 if (rate_n_flags & RATE_MCS_DUP_MSK)
836 r->flags |= IEEE80211_TX_RC_DUP_DATA;
837 if (rate_n_flags & RATE_MCS_SGI_MSK)
838 r->flags |= IEEE80211_TX_RC_SHORT_GI;
839 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
840} 827}
841 828
842/** 829/**
@@ -850,12 +837,15 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
850{ 837{
851 struct iwl_rx_packet *pkt = rxb_addr(rxb); 838 struct iwl_rx_packet *pkt = rxb_addr(rxb);
852 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 839 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
853 struct iwl_tx_queue *txq = NULL;
854 struct iwl_ht_agg *agg; 840 struct iwl_ht_agg *agg;
855 int index; 841 struct sk_buff_head reclaimed_skbs;
842 struct ieee80211_tx_info *info;
843 struct ieee80211_hdr *hdr;
844 struct sk_buff *skb;
845 unsigned long flags;
856 int sta_id; 846 int sta_id;
857 int tid; 847 int tid;
858 unsigned long flags; 848 int freed;
859 849
860 /* "flow" corresponds to Tx queue */ 850 /* "flow" corresponds to Tx queue */
861 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 851 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
@@ -864,16 +854,18 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
864 * (in Tx queue's circular buffer) of first TFD/frame in window */ 854 * (in Tx queue's circular buffer) of first TFD/frame in window */
865 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 855 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
866 856
867 if (scd_flow >= priv->hw_params.max_txq_num) { 857 if (scd_flow >= hw_params(priv).max_txq_num) {
868 IWL_ERR(priv, 858 IWL_ERR(priv,
869 "BUG_ON scd_flow is bigger than number of queues\n"); 859 "BUG_ON scd_flow is bigger than number of queues\n");
870 return; 860 return;
871 } 861 }
872 862
873 txq = &priv->txq[scd_flow];
874 sta_id = ba_resp->sta_id; 863 sta_id = ba_resp->sta_id;
875 tid = ba_resp->tid; 864 tid = ba_resp->tid;
876 agg = &priv->stations[sta_id].tid[tid].agg; 865 agg = &priv->shrd->tid_data[sta_id][tid].agg;
866
867 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
868
877 if (unlikely(agg->txq_id != scd_flow)) { 869 if (unlikely(agg->txq_id != scd_flow)) {
878 /* 870 /*
879 * FIXME: this is a uCode bug which need to be addressed, 871 * FIXME: this is a uCode bug which need to be addressed,
@@ -884,88 +876,83 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
884 IWL_DEBUG_TX_REPLY(priv, 876 IWL_DEBUG_TX_REPLY(priv,
885 "BA scd_flow %d does not match txq_id %d\n", 877 "BA scd_flow %d does not match txq_id %d\n",
886 scd_flow, agg->txq_id); 878 scd_flow, agg->txq_id);
879 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
887 return; 880 return;
888 } 881 }
889 882
890 /* Find index just before block-ack window */ 883 if (unlikely(!agg->wait_for_ba)) {
891 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 884 if (unlikely(ba_resp->bitmap))
892 885 IWL_ERR(priv, "Received BA when not expected\n");
893 spin_lock_irqsave(&priv->sta_lock, flags); 886 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
887 return;
888 }
894 889
895 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, " 890 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
896 "sta_id = %d\n", 891 "sta_id = %d\n",
897 agg->wait_for_ba, 892 agg->wait_for_ba,
898 (u8 *) &ba_resp->sta_addr_lo32, 893 (u8 *) &ba_resp->sta_addr_lo32,
899 ba_resp->sta_id); 894 ba_resp->sta_id);
900 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = " 895 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
901 "%d, scd_ssn = %d\n", 896 "scd_flow = %d, scd_ssn = %d\n",
902 ba_resp->tid, 897 ba_resp->tid,
903 ba_resp->seq_ctl, 898 ba_resp->seq_ctl,
904 (unsigned long long)le64_to_cpu(ba_resp->bitmap), 899 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
905 ba_resp->scd_flow, 900 ba_resp->scd_flow,
906 ba_resp->scd_ssn); 901 ba_resp->scd_ssn);
907 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
908 agg->start_idx,
909 (unsigned long long)agg->bitmap);
910 902
911 /* Update driver's record of ACK vs. not for each frame in window */ 903 /* Mark that the expected block-ack response arrived */
912 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp); 904 agg->wait_for_ba = 0;
905
906 /* Sanity check values reported by uCode */
907 if (ba_resp->txed_2_done > ba_resp->txed) {
908 IWL_DEBUG_TX_REPLY(priv,
909 "bogus sent(%d) and ack(%d) count\n",
910 ba_resp->txed, ba_resp->txed_2_done);
911 /*
912 * set txed_2_done = txed,
913 * so it won't impact rate scale
914 */
915 ba_resp->txed = ba_resp->txed_2_done;
916 }
917 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
918 ba_resp->txed, ba_resp->txed_2_done);
919
920 __skb_queue_head_init(&reclaimed_skbs);
913 921
914 /* Release all TFDs before the SSN, i.e. all TFDs in front of 922 /* Release all TFDs before the SSN, i.e. all TFDs in front of
915 * block-ack window (we assume that they've been successfully 923 * block-ack window (we assume that they've been successfully
916 * transmitted ... if not, it's too late anyway). */ 924 * transmitted ... if not, it's too late anyway). */
917 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { 925 iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
918 /* calculate mac80211 ampdu sw queue to wake */ 926 0, &reclaimed_skbs);
919 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index); 927 freed = 0;
920 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 928 while (!skb_queue_empty(&reclaimed_skbs)) {
921 929
922 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 930 skb = __skb_dequeue(&reclaimed_skbs);
923 priv->mac80211_registered && 931 hdr = (struct ieee80211_hdr *)skb->data;
924 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
925 iwl_wake_queue(priv, txq);
926 932
927 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); 933 if (ieee80211_is_data_qos(hdr->frame_control))
928 } 934 freed++;
929 935 else
930 spin_unlock_irqrestore(&priv->sta_lock, flags); 936 WARN_ON_ONCE(1);
931} 937
938 if (freed == 0) {
939 /* this is the first skb we deliver in this batch */
940 /* put the rate scaling data there */
941 info = IEEE80211_SKB_CB(skb);
942 memset(&info->status, 0, sizeof(info->status));
943 info->flags |= IEEE80211_TX_STAT_ACK;
944 info->flags |= IEEE80211_TX_STAT_AMPDU;
945 info->status.ampdu_ack_len = ba_resp->txed_2_done;
946 info->status.ampdu_len = ba_resp->txed;
947 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
948 info);
949 }
932 950
933#ifdef CONFIG_IWLWIFI_DEBUG 951 info = IEEE80211_SKB_CB(skb);
934const char *iwl_get_tx_fail_reason(u32 status) 952 kmem_cache_free(priv->tx_cmd_pool, (info->driver_data[1]));
935{
936#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
937#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
938 953
939 switch (status & TX_STATUS_MSK) { 954 ieee80211_tx_status_irqsafe(priv->hw, skb);
940 case TX_STATUS_SUCCESS:
941 return "SUCCESS";
942 TX_STATUS_POSTPONE(DELAY);
943 TX_STATUS_POSTPONE(FEW_BYTES);
944 TX_STATUS_POSTPONE(BT_PRIO);
945 TX_STATUS_POSTPONE(QUIET_PERIOD);
946 TX_STATUS_POSTPONE(CALC_TTAK);
947 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
948 TX_STATUS_FAIL(SHORT_LIMIT);
949 TX_STATUS_FAIL(LONG_LIMIT);
950 TX_STATUS_FAIL(FIFO_UNDERRUN);
951 TX_STATUS_FAIL(DRAIN_FLOW);
952 TX_STATUS_FAIL(RFKILL_FLUSH);
953 TX_STATUS_FAIL(LIFE_EXPIRE);
954 TX_STATUS_FAIL(DEST_PS);
955 TX_STATUS_FAIL(HOST_ABORTED);
956 TX_STATUS_FAIL(BT_RETRY);
957 TX_STATUS_FAIL(STA_INVALID);
958 TX_STATUS_FAIL(FRAG_DROPPED);
959 TX_STATUS_FAIL(TID_DISABLE);
960 TX_STATUS_FAIL(FIFO_FLUSHED);
961 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
962 TX_STATUS_FAIL(PASSIVE_NO_RX);
963 TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
964 } 955 }
965 956
966 return "UNKNOWN"; 957 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
967
968#undef TX_STATUS_FAIL
969#undef TX_STATUS_POSTPONE
970} 958}
971#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index a895a099d086..ddb255a575df 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -40,6 +40,7 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-agn-calib.h" 41#include "iwl-agn-calib.h"
42#include "iwl-trans.h" 42#include "iwl-trans.h"
43#include "iwl-fh.h"
43 44
44static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 45static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
45 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, 46 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@@ -84,29 +85,29 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
84 85
85 priv->ucode_write_complete = 0; 86 priv->ucode_write_complete = 0;
86 87
87 iwl_write_direct32(priv, 88 iwl_write_direct32(bus(priv),
88 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 89 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
89 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 90 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
90 91
91 iwl_write_direct32(priv, 92 iwl_write_direct32(bus(priv),
92 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr); 93 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
93 94
94 iwl_write_direct32(priv, 95 iwl_write_direct32(bus(priv),
95 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 96 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
96 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 97 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
97 98
98 iwl_write_direct32(priv, 99 iwl_write_direct32(bus(priv),
99 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 100 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
100 (iwl_get_dma_hi_addr(phy_addr) 101 (iwl_get_dma_hi_addr(phy_addr)
101 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 102 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
102 103
103 iwl_write_direct32(priv, 104 iwl_write_direct32(bus(priv),
104 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 105 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
105 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 106 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
106 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 107 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
107 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 108 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
108 109
109 iwl_write_direct32(priv, 110 iwl_write_direct32(bus(priv),
110 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 111 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
111 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 112 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
112 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 113 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
@@ -193,7 +194,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
193 calib_cfg_cmd.ucd_calib_cfg.flags = 194 calib_cfg_cmd.ucd_calib_cfg.flags =
194 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK; 195 IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
195 196
196 return trans_send_cmd(&priv->trans, &cmd); 197 return iwl_trans_send_cmd(trans(priv), &cmd);
197} 198}
198 199
199void iwlagn_rx_calib_result(struct iwl_priv *priv, 200void iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -291,7 +292,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
291 /* coexistence is disabled */ 292 /* coexistence is disabled */
292 memset(&coex_cmd, 0, sizeof(coex_cmd)); 293 memset(&coex_cmd, 0, sizeof(coex_cmd));
293 } 294 }
294 return trans_send_cmd_pdu(&priv->trans, 295 return iwl_trans_send_cmd_pdu(trans(priv),
295 COEX_PRIORITY_TABLE_CMD, CMD_SYNC, 296 COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
296 sizeof(coex_cmd), &coex_cmd); 297 sizeof(coex_cmd), &coex_cmd);
297} 298}
@@ -324,7 +325,7 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
324 325
325 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl, 326 memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
326 sizeof(iwlagn_bt_prio_tbl)); 327 sizeof(iwlagn_bt_prio_tbl));
327 if (trans_send_cmd_pdu(&priv->trans, 328 if (iwl_trans_send_cmd_pdu(trans(priv),
328 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC, 329 REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
329 sizeof(prio_tbl_cmd), &prio_tbl_cmd)) 330 sizeof(prio_tbl_cmd), &prio_tbl_cmd))
330 IWL_ERR(priv, "failed to send BT prio tbl command\n"); 331 IWL_ERR(priv, "failed to send BT prio tbl command\n");
@@ -337,7 +338,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
337 338
338 env_cmd.action = action; 339 env_cmd.action = action;
339 env_cmd.type = type; 340 env_cmd.type = type;
340 ret = trans_send_cmd_pdu(&priv->trans, 341 ret = iwl_trans_send_cmd_pdu(trans(priv),
341 REPLY_BT_COEX_PROT_ENV, CMD_SYNC, 342 REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
342 sizeof(env_cmd), &env_cmd); 343 sizeof(env_cmd), &env_cmd);
343 if (ret) 344 if (ret)
@@ -350,7 +351,16 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
350{ 351{
351 int ret; 352 int ret;
352 353
353 trans_tx_start(&priv->trans); 354 if (!priv->tx_cmd_pool)
355 priv->tx_cmd_pool =
356 kmem_cache_create("iwlagn_dev_cmd",
357 sizeof(struct iwl_device_cmd),
358 sizeof(void *), 0, NULL);
359
360 if (!priv->tx_cmd_pool)
361 return -ENOMEM;
362
363 iwl_trans_tx_start(trans(priv));
354 364
355 ret = iwlagn_send_wimax_coex(priv); 365 ret = iwlagn_send_wimax_coex(priv);
356 if (ret) 366 if (ret)
@@ -369,7 +379,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
369 * using sample data 100 bytes apart. If these sample points are good, 379 * using sample data 100 bytes apart. If these sample points are good,
370 * it's a pretty good bet that everything between them is good, too. 380 * it's a pretty good bet that everything between them is good, too.
371 */ 381 */
372static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, 382static int iwl_verify_inst_sparse(struct iwl_priv *priv,
373 struct fw_desc *fw_desc) 383 struct fw_desc *fw_desc)
374{ 384{
375 __le32 *image = (__le32 *)fw_desc->v_addr; 385 __le32 *image = (__le32 *)fw_desc->v_addr;
@@ -383,9 +393,9 @@ static int iwlcore_verify_inst_sparse(struct iwl_priv *priv,
383 /* read data comes through single port, auto-incr addr */ 393 /* read data comes through single port, auto-incr addr */
384 /* NOTE: Use the debugless read so we don't flood kernel log 394 /* NOTE: Use the debugless read so we don't flood kernel log
385 * if IWL_DL_IO is set */ 395 * if IWL_DL_IO is set */
386 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 396 iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
387 i + IWLAGN_RTC_INST_LOWER_BOUND); 397 i + IWLAGN_RTC_INST_LOWER_BOUND);
388 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 398 val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
389 if (val != le32_to_cpu(*image)) 399 if (val != le32_to_cpu(*image))
390 return -EIO; 400 return -EIO;
391 } 401 }
@@ -404,14 +414,14 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
404 414
405 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); 415 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
406 416
407 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, 417 iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
408 IWLAGN_RTC_INST_LOWER_BOUND); 418 IWLAGN_RTC_INST_LOWER_BOUND);
409 419
410 for (offs = 0; 420 for (offs = 0;
411 offs < len && errors < 20; 421 offs < len && errors < 20;
412 offs += sizeof(u32), image++) { 422 offs += sizeof(u32), image++) {
413 /* read data comes through single port, auto-incr addr */ 423 /* read data comes through single port, auto-incr addr */
414 val = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 424 val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
415 if (val != le32_to_cpu(*image)) { 425 if (val != le32_to_cpu(*image)) {
416 IWL_ERR(priv, "uCode INST section at " 426 IWL_ERR(priv, "uCode INST section at "
417 "offset 0x%x, is 0x%x, s/b 0x%x\n", 427 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -427,7 +437,7 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
427 */ 437 */
428static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img) 438static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
429{ 439{
430 if (!iwlcore_verify_inst_sparse(priv, &img->code)) { 440 if (!iwl_verify_inst_sparse(priv, &img->code)) {
431 IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n"); 441 IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
432 return 0; 442 return 0;
433 } 443 }
@@ -478,7 +488,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
478 int ret; 488 int ret;
479 enum iwlagn_ucode_type old_type; 489 enum iwlagn_ucode_type old_type;
480 490
481 ret = trans_start_device(&priv->trans); 491 ret = iwl_trans_start_device(trans(priv));
482 if (ret) 492 if (ret)
483 return ret; 493 return ret;
484 494
@@ -495,7 +505,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
495 return ret; 505 return ret;
496 } 506 }
497 507
498 trans_kick_nic(&priv->trans); 508 iwl_trans_kick_nic(trans(priv));
499 509
500 /* 510 /*
501 * Some things may run in the background now, but we 511 * Some things may run in the background now, but we
@@ -545,7 +555,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
545 struct iwl_notification_wait calib_wait; 555 struct iwl_notification_wait calib_wait;
546 int ret; 556 int ret;
547 557
548 lockdep_assert_held(&priv->mutex); 558 lockdep_assert_held(&priv->shrd->mutex);
549 559
550 /* No init ucode required? Curious, but maybe ok */ 560 /* No init ucode required? Curious, but maybe ok */
551 if (!priv->ucode_init.code.len) 561 if (!priv->ucode_init.code.len)
@@ -580,6 +590,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
580 iwlagn_remove_notification(priv, &calib_wait); 590 iwlagn_remove_notification(priv, &calib_wait);
581 out: 591 out:
582 /* Whatever happened, stop the device */ 592 /* Whatever happened, stop the device */
583 trans_stop_device(&priv->trans); 593 iwl_trans_stop_device(trans(priv));
584 return ret; 594 return ret;
585} 595}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 33894dde1ae3..8113fbe770a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -51,6 +51,7 @@
51#include "iwl-sta.h" 51#include "iwl-sta.h"
52#include "iwl-agn-calib.h" 52#include "iwl-agn-calib.h"
53#include "iwl-agn.h" 53#include "iwl-agn.h"
54#include "iwl-shared.h"
54#include "iwl-bus.h" 55#include "iwl-bus.h"
55#include "iwl-trans.h" 56#include "iwl-trans.h"
56 57
@@ -79,9 +80,6 @@ MODULE_VERSION(DRV_VERSION);
79MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 80MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
80MODULE_LICENSE("GPL"); 81MODULE_LICENSE("GPL");
81 82
82static int iwlagn_ant_coupling;
83static bool iwlagn_bt_ch_announce = 1;
84
85void iwl_update_chain_flags(struct iwl_priv *priv) 83void iwl_update_chain_flags(struct iwl_priv *priv)
86{ 84{
87 struct iwl_rxon_context *ctx; 85 struct iwl_rxon_context *ctx;
@@ -137,7 +135,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
137 * beacon contents. 135 * beacon contents.
138 */ 136 */
139 137
140 lockdep_assert_held(&priv->mutex); 138 lockdep_assert_held(&priv->shrd->mutex);
141 139
142 if (!priv->beacon_ctx) { 140 if (!priv->beacon_ctx) {
143 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n"); 141 IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
@@ -182,7 +180,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
182 rate = info->control.rates[0].idx; 180 rate = info->control.rates[0].idx;
183 181
184 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 182 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
185 priv->hw_params.valid_tx_ant); 183 hw_params(priv).valid_tx_ant);
186 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 184 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
187 185
188 /* In mac80211, rates for 5 GHz start at 0 */ 186 /* In mac80211, rates for 5 GHz start at 0 */
@@ -202,7 +200,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
202 cmd.data[1] = priv->beacon_skb->data; 200 cmd.data[1] = priv->beacon_skb->data;
203 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; 201 cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
204 202
205 return trans_send_cmd(&priv->trans, &cmd); 203 return iwl_trans_send_cmd(trans(priv), &cmd);
206} 204}
207 205
208static void iwl_bg_beacon_update(struct work_struct *work) 206static void iwl_bg_beacon_update(struct work_struct *work)
@@ -211,7 +209,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
211 container_of(work, struct iwl_priv, beacon_update); 209 container_of(work, struct iwl_priv, beacon_update);
212 struct sk_buff *beacon; 210 struct sk_buff *beacon;
213 211
214 mutex_lock(&priv->mutex); 212 mutex_lock(&priv->shrd->mutex);
215 if (!priv->beacon_ctx) { 213 if (!priv->beacon_ctx) {
216 IWL_ERR(priv, "updating beacon w/o beacon context!\n"); 214 IWL_ERR(priv, "updating beacon w/o beacon context!\n");
217 goto out; 215 goto out;
@@ -241,7 +239,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
241 239
242 iwlagn_send_beacon_cmd(priv); 240 iwlagn_send_beacon_cmd(priv);
243 out: 241 out:
244 mutex_unlock(&priv->mutex); 242 mutex_unlock(&priv->shrd->mutex);
245} 243}
246 244
247static void iwl_bg_bt_runtime_config(struct work_struct *work) 245static void iwl_bg_bt_runtime_config(struct work_struct *work)
@@ -249,11 +247,11 @@ static void iwl_bg_bt_runtime_config(struct work_struct *work)
249 struct iwl_priv *priv = 247 struct iwl_priv *priv =
250 container_of(work, struct iwl_priv, bt_runtime_config); 248 container_of(work, struct iwl_priv, bt_runtime_config);
251 249
252 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 250 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
253 return; 251 return;
254 252
255 /* dont send host command if rf-kill is on */ 253 /* dont send host command if rf-kill is on */
256 if (!iwl_is_ready_rf(priv)) 254 if (!iwl_is_ready_rf(priv->shrd))
257 return; 255 return;
258 iwlagn_send_advance_bt_config(priv); 256 iwlagn_send_advance_bt_config(priv);
259} 257}
@@ -264,13 +262,13 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
264 container_of(work, struct iwl_priv, bt_full_concurrency); 262 container_of(work, struct iwl_priv, bt_full_concurrency);
265 struct iwl_rxon_context *ctx; 263 struct iwl_rxon_context *ctx;
266 264
267 mutex_lock(&priv->mutex); 265 mutex_lock(&priv->shrd->mutex);
268 266
269 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 267 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
270 goto out; 268 goto out;
271 269
272 /* dont send host command if rf-kill is on */ 270 /* dont send host command if rf-kill is on */
273 if (!iwl_is_ready_rf(priv)) 271 if (!iwl_is_ready_rf(priv->shrd))
274 goto out; 272 goto out;
275 273
276 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n", 274 IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
@@ -288,7 +286,7 @@ static void iwl_bg_bt_full_concurrency(struct work_struct *work)
288 286
289 iwlagn_send_advance_bt_config(priv); 287 iwlagn_send_advance_bt_config(priv);
290out: 288out:
291 mutex_unlock(&priv->mutex); 289 mutex_unlock(&priv->shrd->mutex);
292} 290}
293 291
294/** 292/**
@@ -305,11 +303,11 @@ static void iwl_bg_statistics_periodic(unsigned long data)
305{ 303{
306 struct iwl_priv *priv = (struct iwl_priv *)data; 304 struct iwl_priv *priv = (struct iwl_priv *)data;
307 305
308 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 306 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
309 return; 307 return;
310 308
311 /* dont send host command if rf-kill is on */ 309 /* dont send host command if rf-kill is on */
312 if (!iwl_is_ready_rf(priv)) 310 if (!iwl_is_ready_rf(priv->shrd))
313 return; 311 return;
314 312
315 iwl_send_statistics_request(priv, CMD_ASYNC, false); 313 iwl_send_statistics_request(priv, CMD_ASYNC, false);
@@ -331,14 +329,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
331 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); 329 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
332 330
333 /* Make sure device is powered up for SRAM reads */ 331 /* Make sure device is powered up for SRAM reads */
334 spin_lock_irqsave(&priv->reg_lock, reg_flags); 332 spin_lock_irqsave(&bus(priv)->reg_lock, reg_flags);
335 if (iwl_grab_nic_access(priv)) { 333 if (iwl_grab_nic_access(bus(priv))) {
336 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 334 spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
337 return; 335 return;
338 } 336 }
339 337
340 /* Set starting address; reads will auto-increment */ 338 /* Set starting address; reads will auto-increment */
341 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr); 339 iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, ptr);
342 rmb(); 340 rmb();
343 341
344 /* 342 /*
@@ -346,20 +344,20 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
346 * place event id # at far right for easier visual parsing. 344 * place event id # at far right for easier visual parsing.
347 */ 345 */
348 for (i = 0; i < num_events; i++) { 346 for (i = 0; i < num_events; i++) {
349 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 347 ev = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
350 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 348 time = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
351 if (mode == 0) { 349 if (mode == 0) {
352 trace_iwlwifi_dev_ucode_cont_event(priv, 350 trace_iwlwifi_dev_ucode_cont_event(priv,
353 0, time, ev); 351 0, time, ev);
354 } else { 352 } else {
355 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 353 data = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
356 trace_iwlwifi_dev_ucode_cont_event(priv, 354 trace_iwlwifi_dev_ucode_cont_event(priv,
357 time, data, ev); 355 time, data, ev);
358 } 356 }
359 } 357 }
360 /* Allow device to power down */ 358 /* Allow device to power down */
361 iwl_release_nic_access(priv); 359 iwl_release_nic_access(bus(priv));
362 spin_unlock_irqrestore(&priv->reg_lock, reg_flags); 360 spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
363} 361}
364 362
365static void iwl_continuous_event_trace(struct iwl_priv *priv) 363static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -372,10 +370,12 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
372 370
373 base = priv->device_pointers.error_event_table; 371 base = priv->device_pointers.error_event_table;
374 if (iwlagn_hw_valid_rtc_data_addr(base)) { 372 if (iwlagn_hw_valid_rtc_data_addr(base)) {
375 capacity = iwl_read_targ_mem(priv, base); 373 capacity = iwl_read_targ_mem(bus(priv), base);
376 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 374 num_wraps = iwl_read_targ_mem(bus(priv),
377 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); 375 base + (2 * sizeof(u32)));
378 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 376 mode = iwl_read_targ_mem(bus(priv), base + (1 * sizeof(u32)));
377 next_entry = iwl_read_targ_mem(bus(priv),
378 base + (3 * sizeof(u32)));
379 } else 379 } else
380 return; 380 return;
381 381
@@ -426,7 +426,7 @@ static void iwl_bg_ucode_trace(unsigned long data)
426{ 426{
427 struct iwl_priv *priv = (struct iwl_priv *)data; 427 struct iwl_priv *priv = (struct iwl_priv *)data;
428 428
429 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 429 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
430 return; 430 return;
431 431
432 if (priv->event_log.ucode_trace) { 432 if (priv->event_log.ucode_trace) {
@@ -442,11 +442,11 @@ static void iwl_bg_tx_flush(struct work_struct *work)
442 struct iwl_priv *priv = 442 struct iwl_priv *priv =
443 container_of(work, struct iwl_priv, tx_flush); 443 container_of(work, struct iwl_priv, tx_flush);
444 444
445 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 445 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
446 return; 446 return;
447 447
448 /* do nothing if rf-kill is on */ 448 /* do nothing if rf-kill is on */
449 if (!iwl_is_ready_rf(priv)) 449 if (!iwl_is_ready_rf(priv->shrd))
450 return; 450 return;
451 451
452 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); 452 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
@@ -475,14 +475,15 @@ static void iwl_bg_tx_flush(struct work_struct *work)
475static ssize_t show_debug_level(struct device *d, 475static ssize_t show_debug_level(struct device *d,
476 struct device_attribute *attr, char *buf) 476 struct device_attribute *attr, char *buf)
477{ 477{
478 struct iwl_priv *priv = dev_get_drvdata(d); 478 struct iwl_shared *shrd = dev_get_drvdata(d);
479 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); 479 return sprintf(buf, "0x%08X\n", iwl_get_debug_level(shrd));
480} 480}
481static ssize_t store_debug_level(struct device *d, 481static ssize_t store_debug_level(struct device *d,
482 struct device_attribute *attr, 482 struct device_attribute *attr,
483 const char *buf, size_t count) 483 const char *buf, size_t count)
484{ 484{
485 struct iwl_priv *priv = dev_get_drvdata(d); 485 struct iwl_shared *shrd = dev_get_drvdata(d);
486 struct iwl_priv *priv = shrd->priv;
486 unsigned long val; 487 unsigned long val;
487 int ret; 488 int ret;
488 489
@@ -490,9 +491,9 @@ static ssize_t store_debug_level(struct device *d,
490 if (ret) 491 if (ret)
491 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf); 492 IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
492 else { 493 else {
493 priv->debug_level = val; 494 shrd->dbg_level_dev = val;
494 if (iwl_alloc_traffic_mem(priv)) 495 if (iwl_alloc_traffic_mem(priv))
495 IWL_ERR(priv, 496 IWL_ERR(shrd->priv,
496 "Not enough memory to generate traffic log\n"); 497 "Not enough memory to generate traffic log\n");
497 } 498 }
498 return strnlen(buf, count); 499 return strnlen(buf, count);
@@ -508,9 +509,10 @@ static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
508static ssize_t show_temperature(struct device *d, 509static ssize_t show_temperature(struct device *d,
509 struct device_attribute *attr, char *buf) 510 struct device_attribute *attr, char *buf)
510{ 511{
511 struct iwl_priv *priv = dev_get_drvdata(d); 512 struct iwl_shared *shrd = dev_get_drvdata(d);
513 struct iwl_priv *priv = shrd->priv;
512 514
513 if (!iwl_is_alive(priv)) 515 if (!iwl_is_alive(priv->shrd))
514 return -EAGAIN; 516 return -EAGAIN;
515 517
516 return sprintf(buf, "%d\n", priv->temperature); 518 return sprintf(buf, "%d\n", priv->temperature);
@@ -523,7 +525,7 @@ static ssize_t show_tx_power(struct device *d,
523{ 525{
524 struct iwl_priv *priv = dev_get_drvdata(d); 526 struct iwl_priv *priv = dev_get_drvdata(d);
525 527
526 if (!iwl_is_ready_rf(priv)) 528 if (!iwl_is_ready_rf(priv->shrd))
527 return sprintf(buf, "off\n"); 529 return sprintf(buf, "off\n");
528 else 530 else
529 return sprintf(buf, "%d\n", priv->tx_power_user_lmt); 531 return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
@@ -615,24 +617,6 @@ static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
615 617
616static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) 618static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
617{ 619{
618 static const u8 iwlagn_bss_ac_to_fifo[] = {
619 IWL_TX_FIFO_VO,
620 IWL_TX_FIFO_VI,
621 IWL_TX_FIFO_BE,
622 IWL_TX_FIFO_BK,
623 };
624 static const u8 iwlagn_bss_ac_to_queue[] = {
625 0, 1, 2, 3,
626 };
627 static const u8 iwlagn_pan_ac_to_fifo[] = {
628 IWL_TX_FIFO_VO_IPAN,
629 IWL_TX_FIFO_VI_IPAN,
630 IWL_TX_FIFO_BE_IPAN,
631 IWL_TX_FIFO_BK_IPAN,
632 };
633 static const u8 iwlagn_pan_ac_to_queue[] = {
634 7, 6, 5, 4,
635 };
636 int i; 620 int i;
637 621
638 /* 622 /*
@@ -654,8 +638,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
654 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; 638 priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
655 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; 639 priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
656 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; 640 priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
657 priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwlagn_bss_ac_to_fifo;
658 priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwlagn_bss_ac_to_queue;
659 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes = 641 priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
660 BIT(NL80211_IFTYPE_ADHOC); 642 BIT(NL80211_IFTYPE_ADHOC);
661 priv->contexts[IWL_RXON_CTX_BSS].interface_modes = 643 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
@@ -675,9 +657,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
675 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY; 657 priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
676 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID; 658 priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
677 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION; 659 priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
678 priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo = iwlagn_pan_ac_to_fifo;
679 priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue = iwlagn_pan_ac_to_queue;
680 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
681 priv->contexts[IWL_RXON_CTX_PAN].interface_modes = 660 priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
682 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP); 661 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
683 662
@@ -818,8 +797,6 @@ static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
818 return 0; 797 return 0;
819} 798}
820 799
821static int iwlagn_wanted_ucode_alternative = 1;
822
823static int iwlagn_load_firmware(struct iwl_priv *priv, 800static int iwlagn_load_firmware(struct iwl_priv *priv,
824 const struct firmware *ucode_raw, 801 const struct firmware *ucode_raw,
825 struct iwlagn_firmware_pieces *pieces, 802 struct iwlagn_firmware_pieces *pieces,
@@ -829,7 +806,8 @@ static int iwlagn_load_firmware(struct iwl_priv *priv,
829 struct iwl_ucode_tlv *tlv; 806 struct iwl_ucode_tlv *tlv;
830 size_t len = ucode_raw->size; 807 size_t len = ucode_raw->size;
831 const u8 *data; 808 const u8 *data;
832 int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp; 809 int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
810 int tmp;
833 u64 alternatives; 811 u64 alternatives;
834 u32 tlv_len; 812 u32 tlv_len;
835 enum iwl_ucode_tlv_type tlv_type; 813 enum iwl_ucode_tlv_type tlv_type;
@@ -1150,25 +1128,25 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1150 pieces.init_data_size); 1128 pieces.init_data_size);
1151 1129
1152 /* Verify that uCode images will fit in card's SRAM */ 1130 /* Verify that uCode images will fit in card's SRAM */
1153 if (pieces.inst_size > priv->hw_params.max_inst_size) { 1131 if (pieces.inst_size > hw_params(priv).max_inst_size) {
1154 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n", 1132 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1155 pieces.inst_size); 1133 pieces.inst_size);
1156 goto try_again; 1134 goto try_again;
1157 } 1135 }
1158 1136
1159 if (pieces.data_size > priv->hw_params.max_data_size) { 1137 if (pieces.data_size > hw_params(priv).max_data_size) {
1160 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n", 1138 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1161 pieces.data_size); 1139 pieces.data_size);
1162 goto try_again; 1140 goto try_again;
1163 } 1141 }
1164 1142
1165 if (pieces.init_size > priv->hw_params.max_inst_size) { 1143 if (pieces.init_size > hw_params(priv).max_inst_size) {
1166 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n", 1144 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1167 pieces.init_size); 1145 pieces.init_size);
1168 goto try_again; 1146 goto try_again;
1169 } 1147 }
1170 1148
1171 if (pieces.init_data_size > priv->hw_params.max_data_size) { 1149 if (pieces.init_data_size > hw_params(priv).max_data_size) {
1172 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n", 1150 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1173 pieces.init_data_size); 1151 pieces.init_data_size);
1174 goto try_again; 1152 goto try_again;
@@ -1245,10 +1223,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1245 1223
1246 if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) { 1224 if (ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN) {
1247 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1225 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1248 priv->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; 1226 priv->shrd->cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1249 } else { 1227 } else {
1250 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1228 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1251 priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1229 priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1252 } 1230 }
1253 1231
1254 /* 1232 /*
@@ -1309,364 +1287,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1309 release_firmware(ucode_raw); 1287 release_firmware(ucode_raw);
1310} 1288}
1311 1289
1312static const char * const desc_lookup_text[] = {
1313 "OK",
1314 "FAIL",
1315 "BAD_PARAM",
1316 "BAD_CHECKSUM",
1317 "NMI_INTERRUPT_WDG",
1318 "SYSASSERT",
1319 "FATAL_ERROR",
1320 "BAD_COMMAND",
1321 "HW_ERROR_TUNE_LOCK",
1322 "HW_ERROR_TEMPERATURE",
1323 "ILLEGAL_CHAN_FREQ",
1324 "VCC_NOT_STABLE",
1325 "FH_ERROR",
1326 "NMI_INTERRUPT_HOST",
1327 "NMI_INTERRUPT_ACTION_PT",
1328 "NMI_INTERRUPT_UNKNOWN",
1329 "UCODE_VERSION_MISMATCH",
1330 "HW_ERROR_ABS_LOCK",
1331 "HW_ERROR_CAL_LOCK_FAIL",
1332 "NMI_INTERRUPT_INST_ACTION_PT",
1333 "NMI_INTERRUPT_DATA_ACTION_PT",
1334 "NMI_TRM_HW_ER",
1335 "NMI_INTERRUPT_TRM",
1336 "NMI_INTERRUPT_BREAK_POINT",
1337 "DEBUG_0",
1338 "DEBUG_1",
1339 "DEBUG_2",
1340 "DEBUG_3",
1341};
1342
1343static struct { char *name; u8 num; } advanced_lookup[] = {
1344 { "NMI_INTERRUPT_WDG", 0x34 },
1345 { "SYSASSERT", 0x35 },
1346 { "UCODE_VERSION_MISMATCH", 0x37 },
1347 { "BAD_COMMAND", 0x38 },
1348 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1349 { "FATAL_ERROR", 0x3D },
1350 { "NMI_TRM_HW_ERR", 0x46 },
1351 { "NMI_INTERRUPT_TRM", 0x4C },
1352 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1353 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1354 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1355 { "NMI_INTERRUPT_HOST", 0x66 },
1356 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1357 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1358 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1359 { "ADVANCED_SYSASSERT", 0 },
1360};
1361
1362static const char *desc_lookup(u32 num)
1363{
1364 int i;
1365 int max = ARRAY_SIZE(desc_lookup_text);
1366
1367 if (num < max)
1368 return desc_lookup_text[num];
1369
1370 max = ARRAY_SIZE(advanced_lookup) - 1;
1371 for (i = 0; i < max; i++) {
1372 if (advanced_lookup[i].num == num)
1373 break;
1374 }
1375 return advanced_lookup[i].name;
1376}
1377
1378#define ERROR_START_OFFSET (1 * sizeof(u32))
1379#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1380
1381void iwl_dump_nic_error_log(struct iwl_priv *priv)
1382{
1383 u32 base;
1384 struct iwl_error_event_table table;
1385
1386 base = priv->device_pointers.error_event_table;
1387 if (priv->ucode_type == IWL_UCODE_INIT) {
1388 if (!base)
1389 base = priv->init_errlog_ptr;
1390 } else {
1391 if (!base)
1392 base = priv->inst_errlog_ptr;
1393 }
1394
1395 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1396 IWL_ERR(priv,
1397 "Not valid error log pointer 0x%08X for %s uCode\n",
1398 base,
1399 (priv->ucode_type == IWL_UCODE_INIT)
1400 ? "Init" : "RT");
1401 return;
1402 }
1403
1404 iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
1405
1406 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1407 IWL_ERR(priv, "Start IWL Error Log Dump:\n");
1408 IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
1409 priv->status, table.valid);
1410 }
1411
1412 priv->isr_stats.err_code = table.error_id;
1413
1414 trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
1415 table.data1, table.data2, table.line,
1416 table.blink1, table.blink2, table.ilink1,
1417 table.ilink2, table.bcon_time, table.gp1,
1418 table.gp2, table.gp3, table.ucode_ver,
1419 table.hw_ver, table.brd_ver);
1420 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1421 desc_lookup(table.error_id));
1422 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1423 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1424 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1425 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1426 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1427 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1428 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1429 IWL_ERR(priv, "0x%08X | line\n", table.line);
1430 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1431 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1432 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1433 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1434 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1435 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1436 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1437 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1438 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1439 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1440}
1441
1442#define EVENT_START_OFFSET (4 * sizeof(u32))
1443
1444/**
1445 * iwl_print_event_log - Dump error event log to syslog
1446 *
1447 */
1448static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1449 u32 num_events, u32 mode,
1450 int pos, char **buf, size_t bufsz)
1451{
1452 u32 i;
1453 u32 base; /* SRAM byte address of event log header */
1454 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1455 u32 ptr; /* SRAM byte address of log data */
1456 u32 ev, time, data; /* event log data */
1457 unsigned long reg_flags;
1458
1459 if (num_events == 0)
1460 return pos;
1461
1462 base = priv->device_pointers.log_event_table;
1463 if (priv->ucode_type == IWL_UCODE_INIT) {
1464 if (!base)
1465 base = priv->init_evtlog_ptr;
1466 } else {
1467 if (!base)
1468 base = priv->inst_evtlog_ptr;
1469 }
1470
1471 if (mode == 0)
1472 event_size = 2 * sizeof(u32);
1473 else
1474 event_size = 3 * sizeof(u32);
1475
1476 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1477
1478 /* Make sure device is powered up for SRAM reads */
1479 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1480 iwl_grab_nic_access(priv);
1481
1482 /* Set starting address; reads will auto-increment */
1483 iwl_write32(priv, HBUS_TARG_MEM_RADDR, ptr);
1484 rmb();
1485
1486 /* "time" is actually "data" for mode 0 (no timestamp).
1487 * place event id # at far right for easier visual parsing. */
1488 for (i = 0; i < num_events; i++) {
1489 ev = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1490 time = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1491 if (mode == 0) {
1492 /* data, ev */
1493 if (bufsz) {
1494 pos += scnprintf(*buf + pos, bufsz - pos,
1495 "EVT_LOG:0x%08x:%04u\n",
1496 time, ev);
1497 } else {
1498 trace_iwlwifi_dev_ucode_event(priv, 0,
1499 time, ev);
1500 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1501 time, ev);
1502 }
1503 } else {
1504 data = iwl_read32(priv, HBUS_TARG_MEM_RDAT);
1505 if (bufsz) {
1506 pos += scnprintf(*buf + pos, bufsz - pos,
1507 "EVT_LOGT:%010u:0x%08x:%04u\n",
1508 time, data, ev);
1509 } else {
1510 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1511 time, data, ev);
1512 trace_iwlwifi_dev_ucode_event(priv, time,
1513 data, ev);
1514 }
1515 }
1516 }
1517
1518 /* Allow device to power down */
1519 iwl_release_nic_access(priv);
1520 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1521 return pos;
1522}
1523
1524/**
1525 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1526 */
1527static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1528 u32 num_wraps, u32 next_entry,
1529 u32 size, u32 mode,
1530 int pos, char **buf, size_t bufsz)
1531{
1532 /*
1533 * display the newest DEFAULT_LOG_ENTRIES entries
1534 * i.e the entries just before the next ont that uCode would fill.
1535 */
1536 if (num_wraps) {
1537 if (next_entry < size) {
1538 pos = iwl_print_event_log(priv,
1539 capacity - (size - next_entry),
1540 size - next_entry, mode,
1541 pos, buf, bufsz);
1542 pos = iwl_print_event_log(priv, 0,
1543 next_entry, mode,
1544 pos, buf, bufsz);
1545 } else
1546 pos = iwl_print_event_log(priv, next_entry - size,
1547 size, mode, pos, buf, bufsz);
1548 } else {
1549 if (next_entry < size) {
1550 pos = iwl_print_event_log(priv, 0, next_entry,
1551 mode, pos, buf, bufsz);
1552 } else {
1553 pos = iwl_print_event_log(priv, next_entry - size,
1554 size, mode, pos, buf, bufsz);
1555 }
1556 }
1557 return pos;
1558}
1559
1560#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1561
1562int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1563 char **buf, bool display)
1564{
1565 u32 base; /* SRAM byte address of event log header */
1566 u32 capacity; /* event log capacity in # entries */
1567 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
1568 u32 num_wraps; /* # times uCode wrapped to top of log */
1569 u32 next_entry; /* index of next entry to be written by uCode */
1570 u32 size; /* # entries that we'll print */
1571 u32 logsize;
1572 int pos = 0;
1573 size_t bufsz = 0;
1574
1575 base = priv->device_pointers.log_event_table;
1576 if (priv->ucode_type == IWL_UCODE_INIT) {
1577 logsize = priv->init_evtlog_size;
1578 if (!base)
1579 base = priv->init_evtlog_ptr;
1580 } else {
1581 logsize = priv->inst_evtlog_size;
1582 if (!base)
1583 base = priv->inst_evtlog_ptr;
1584 }
1585
1586 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1587 IWL_ERR(priv,
1588 "Invalid event log pointer 0x%08X for %s uCode\n",
1589 base,
1590 (priv->ucode_type == IWL_UCODE_INIT)
1591 ? "Init" : "RT");
1592 return -EINVAL;
1593 }
1594
1595 /* event log header */
1596 capacity = iwl_read_targ_mem(priv, base);
1597 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
1598 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1599 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1600
1601 if (capacity > logsize) {
1602 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1603 capacity, logsize);
1604 capacity = logsize;
1605 }
1606
1607 if (next_entry > logsize) {
1608 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1609 next_entry, logsize);
1610 next_entry = logsize;
1611 }
1612
1613 size = num_wraps ? capacity : next_entry;
1614
1615 /* bail out if nothing in log */
1616 if (size == 0) {
1617 IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
1618 return pos;
1619 }
1620
1621 /* enable/disable bt channel inhibition */
1622 priv->bt_ch_announce = iwlagn_bt_ch_announce;
1623
1624#ifdef CONFIG_IWLWIFI_DEBUG
1625 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
1626 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1627 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1628#else
1629 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1630 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1631#endif
1632 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1633 size);
1634
1635#ifdef CONFIG_IWLWIFI_DEBUG
1636 if (display) {
1637 if (full_log)
1638 bufsz = capacity * 48;
1639 else
1640 bufsz = size * 48;
1641 *buf = kmalloc(bufsz, GFP_KERNEL);
1642 if (!*buf)
1643 return -ENOMEM;
1644 }
1645 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1646 /*
1647 * if uCode has wrapped back to top of log,
1648 * start at the oldest entry,
1649 * i.e the next one that uCode would fill.
1650 */
1651 if (num_wraps)
1652 pos = iwl_print_event_log(priv, next_entry,
1653 capacity - next_entry, mode,
1654 pos, buf, bufsz);
1655 /* (then/else) start at top of log */
1656 pos = iwl_print_event_log(priv, 0,
1657 next_entry, mode, pos, buf, bufsz);
1658 } else
1659 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1660 next_entry, size, mode,
1661 pos, buf, bufsz);
1662#else
1663 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
1664 next_entry, size, mode,
1665 pos, buf, bufsz);
1666#endif
1667 return pos;
1668}
1669
1670static void iwl_rf_kill_ct_config(struct iwl_priv *priv) 1290static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1671{ 1291{
1672 struct iwl_ct_kill_config cmd; 1292 struct iwl_ct_kill_config cmd;
@@ -1674,44 +1294,43 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
1674 unsigned long flags; 1294 unsigned long flags;
1675 int ret = 0; 1295 int ret = 0;
1676 1296
1677 spin_lock_irqsave(&priv->lock, flags); 1297 spin_lock_irqsave(&priv->shrd->lock, flags);
1678 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 1298 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
1679 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 1299 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
1680 spin_unlock_irqrestore(&priv->lock, flags); 1300 spin_unlock_irqrestore(&priv->shrd->lock, flags);
1681 priv->thermal_throttle.ct_kill_toggle = false; 1301 priv->thermal_throttle.ct_kill_toggle = false;
1682 1302
1683 if (priv->cfg->base_params->support_ct_kill_exit) { 1303 if (priv->cfg->base_params->support_ct_kill_exit) {
1684 adv_cmd.critical_temperature_enter = 1304 adv_cmd.critical_temperature_enter =
1685 cpu_to_le32(priv->hw_params.ct_kill_threshold); 1305 cpu_to_le32(hw_params(priv).ct_kill_threshold);
1686 adv_cmd.critical_temperature_exit = 1306 adv_cmd.critical_temperature_exit =
1687 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold); 1307 cpu_to_le32(hw_params(priv).ct_kill_exit_threshold);
1688 1308
1689 ret = trans_send_cmd_pdu(&priv->trans, 1309 ret = iwl_trans_send_cmd_pdu(trans(priv),
1690 REPLY_CT_KILL_CONFIG_CMD, 1310 REPLY_CT_KILL_CONFIG_CMD,
1691 CMD_SYNC, sizeof(adv_cmd), &adv_cmd); 1311 CMD_SYNC, sizeof(adv_cmd), &adv_cmd);
1692 if (ret) 1312 if (ret)
1693 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); 1313 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1694 else 1314 else
1695 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " 1315 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1696 "succeeded, " 1316 "succeeded, critical temperature enter is %d,"
1697 "critical temperature enter is %d," 1317 "exit is %d\n",
1698 "exit is %d\n", 1318 hw_params(priv).ct_kill_threshold,
1699 priv->hw_params.ct_kill_threshold, 1319 hw_params(priv).ct_kill_exit_threshold);
1700 priv->hw_params.ct_kill_exit_threshold);
1701 } else { 1320 } else {
1702 cmd.critical_temperature_R = 1321 cmd.critical_temperature_R =
1703 cpu_to_le32(priv->hw_params.ct_kill_threshold); 1322 cpu_to_le32(hw_params(priv).ct_kill_threshold);
1704 1323
1705 ret = trans_send_cmd_pdu(&priv->trans, 1324 ret = iwl_trans_send_cmd_pdu(trans(priv),
1706 REPLY_CT_KILL_CONFIG_CMD, 1325 REPLY_CT_KILL_CONFIG_CMD,
1707 CMD_SYNC, sizeof(cmd), &cmd); 1326 CMD_SYNC, sizeof(cmd), &cmd);
1708 if (ret) 1327 if (ret)
1709 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n"); 1328 IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1710 else 1329 else
1711 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " 1330 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
1712 "succeeded, " 1331 "succeeded, "
1713 "critical temperature is %d\n", 1332 "critical temperature is %d\n",
1714 priv->hw_params.ct_kill_threshold); 1333 hw_params(priv).ct_kill_threshold);
1715 } 1334 }
1716} 1335}
1717 1336
@@ -1728,7 +1347,7 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
1728 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL; 1347 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
1729 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg); 1348 calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
1730 1349
1731 return trans_send_cmd(&priv->trans, &cmd); 1350 return iwl_trans_send_cmd(trans(priv), &cmd);
1732} 1351}
1733 1352
1734 1353
@@ -1740,7 +1359,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1740 1359
1741 if (IWL_UCODE_API(priv->ucode_ver) > 1) { 1360 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1742 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant); 1361 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1743 return trans_send_cmd_pdu(&priv->trans, 1362 return iwl_trans_send_cmd_pdu(trans(priv),
1744 TX_ANT_CONFIGURATION_CMD, 1363 TX_ANT_CONFIGURATION_CMD,
1745 CMD_SYNC, 1364 CMD_SYNC,
1746 sizeof(struct iwl_tx_ant_config_cmd), 1365 sizeof(struct iwl_tx_ant_config_cmd),
@@ -1762,17 +1381,17 @@ int iwl_alive_start(struct iwl_priv *priv)
1762 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1381 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1763 1382
1764 /*TODO: this should go to the transport layer */ 1383 /*TODO: this should go to the transport layer */
1765 iwl_reset_ict(priv); 1384 iwl_reset_ict(trans(priv));
1766 1385
1767 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 1386 IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
1768 1387
1769 /* After the ALIVE response, we can send host commands to the uCode */ 1388 /* After the ALIVE response, we can send host commands to the uCode */
1770 set_bit(STATUS_ALIVE, &priv->status); 1389 set_bit(STATUS_ALIVE, &priv->shrd->status);
1771 1390
1772 /* Enable watchdog to monitor the driver tx queues */ 1391 /* Enable watchdog to monitor the driver tx queues */
1773 iwl_setup_watchdog(priv); 1392 iwl_setup_watchdog(priv);
1774 1393
1775 if (iwl_is_rfkill(priv)) 1394 if (iwl_is_rfkill(priv->shrd))
1776 return -ERFKILL; 1395 return -ERFKILL;
1777 1396
1778 /* download priority table before any calibration request */ 1397 /* download priority table before any calibration request */
@@ -1809,8 +1428,9 @@ int iwl_alive_start(struct iwl_priv *priv)
1809 iwl_send_bt_config(priv); 1428 iwl_send_bt_config(priv);
1810 } 1429 }
1811 1430
1812 if (priv->hw_params.calib_rt_cfg) 1431 if (hw_params(priv).calib_rt_cfg)
1813 iwlagn_send_calib_cfg_rt(priv, priv->hw_params.calib_rt_cfg); 1432 iwlagn_send_calib_cfg_rt(priv,
1433 hw_params(priv).calib_rt_cfg);
1814 1434
1815 ieee80211_wake_queues(priv->hw); 1435 ieee80211_wake_queues(priv->hw);
1816 1436
@@ -1819,7 +1439,7 @@ int iwl_alive_start(struct iwl_priv *priv)
1819 /* Configure Tx antenna selection based on H/W config */ 1439 /* Configure Tx antenna selection based on H/W config */
1820 iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant); 1440 iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
1821 1441
1822 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { 1442 if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
1823 struct iwl_rxon_cmd *active_rxon = 1443 struct iwl_rxon_cmd *active_rxon =
1824 (struct iwl_rxon_cmd *)&ctx->active; 1444 (struct iwl_rxon_cmd *)&ctx->active;
1825 /* apply any changes in staging */ 1445 /* apply any changes in staging */
@@ -1834,12 +1454,12 @@ int iwl_alive_start(struct iwl_priv *priv)
1834 iwlagn_set_rxon_chain(priv, ctx); 1454 iwlagn_set_rxon_chain(priv, ctx);
1835 } 1455 }
1836 1456
1837 if (!priv->wowlan) { 1457 if (!priv->shrd->wowlan) {
1838 /* WoWLAN ucode will not reply in the same way, skip it */ 1458 /* WoWLAN ucode will not reply in the same way, skip it */
1839 iwl_reset_run_time_calib(priv); 1459 iwl_reset_run_time_calib(priv);
1840 } 1460 }
1841 1461
1842 set_bit(STATUS_READY, &priv->status); 1462 set_bit(STATUS_READY, &priv->shrd->status);
1843 1463
1844 /* Configure the adapter for unassociated operation */ 1464 /* Configure the adapter for unassociated operation */
1845 ret = iwlagn_commit_rxon(priv, ctx); 1465 ret = iwlagn_commit_rxon(priv, ctx);
@@ -1871,7 +1491,8 @@ static void __iwl_down(struct iwl_priv *priv)
1871 */ 1491 */
1872 ieee80211_remain_on_channel_expired(priv->hw); 1492 ieee80211_remain_on_channel_expired(priv->hw);
1873 1493
1874 exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 1494 exit_pending =
1495 test_and_set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
1875 1496
1876 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set 1497 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
1877 * to prevent rearm timer */ 1498 * to prevent rearm timer */
@@ -1896,22 +1517,23 @@ static void __iwl_down(struct iwl_priv *priv)
1896 /* Wipe out the EXIT_PENDING status bit if we are not actually 1517 /* Wipe out the EXIT_PENDING status bit if we are not actually
1897 * exiting the module */ 1518 * exiting the module */
1898 if (!exit_pending) 1519 if (!exit_pending)
1899 clear_bit(STATUS_EXIT_PENDING, &priv->status); 1520 clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
1900 1521
1901 if (priv->mac80211_registered) 1522 if (priv->shrd->mac80211_registered)
1902 ieee80211_stop_queues(priv->hw); 1523 ieee80211_stop_queues(priv->hw);
1903 1524
1904 /* Clear out all status bits but a few that are stable across reset */ 1525 /* Clear out all status bits but a few that are stable across reset */
1905 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 1526 priv->shrd->status &=
1527 test_bit(STATUS_RF_KILL_HW, &priv->shrd->status) <<
1906 STATUS_RF_KILL_HW | 1528 STATUS_RF_KILL_HW |
1907 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 1529 test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) <<
1908 STATUS_GEO_CONFIGURED | 1530 STATUS_GEO_CONFIGURED |
1909 test_bit(STATUS_FW_ERROR, &priv->status) << 1531 test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
1910 STATUS_FW_ERROR | 1532 STATUS_FW_ERROR |
1911 test_bit(STATUS_EXIT_PENDING, &priv->status) << 1533 test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) <<
1912 STATUS_EXIT_PENDING; 1534 STATUS_EXIT_PENDING;
1913 1535
1914 trans_stop_device(&priv->trans); 1536 iwl_trans_stop_device(trans(priv));
1915 1537
1916 dev_kfree_skb(priv->beacon_skb); 1538 dev_kfree_skb(priv->beacon_skb);
1917 priv->beacon_skb = NULL; 1539 priv->beacon_skb = NULL;
@@ -1919,9 +1541,9 @@ static void __iwl_down(struct iwl_priv *priv)
1919 1541
1920static void iwl_down(struct iwl_priv *priv) 1542static void iwl_down(struct iwl_priv *priv)
1921{ 1543{
1922 mutex_lock(&priv->mutex); 1544 mutex_lock(&priv->shrd->mutex);
1923 __iwl_down(priv); 1545 __iwl_down(priv);
1924 mutex_unlock(&priv->mutex); 1546 mutex_unlock(&priv->shrd->mutex);
1925 1547
1926 iwl_cancel_deferred_work(priv); 1548 iwl_cancel_deferred_work(priv);
1927} 1549}
@@ -1933,9 +1555,9 @@ static int __iwl_up(struct iwl_priv *priv)
1933 struct iwl_rxon_context *ctx; 1555 struct iwl_rxon_context *ctx;
1934 int ret; 1556 int ret;
1935 1557
1936 lockdep_assert_held(&priv->mutex); 1558 lockdep_assert_held(&priv->shrd->mutex);
1937 1559
1938 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 1560 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
1939 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); 1561 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
1940 return -EIO; 1562 return -EIO;
1941 } 1563 }
@@ -1968,9 +1590,9 @@ static int __iwl_up(struct iwl_priv *priv)
1968 return 0; 1590 return 0;
1969 1591
1970 error: 1592 error:
1971 set_bit(STATUS_EXIT_PENDING, &priv->status); 1593 set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
1972 __iwl_down(priv); 1594 __iwl_down(priv);
1973 clear_bit(STATUS_EXIT_PENDING, &priv->status); 1595 clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
1974 1596
1975 IWL_ERR(priv, "Unable to initialize device.\n"); 1597 IWL_ERR(priv, "Unable to initialize device.\n");
1976 return ret; 1598 return ret;
@@ -1988,11 +1610,11 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
1988 struct iwl_priv *priv = container_of(work, struct iwl_priv, 1610 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1989 run_time_calib_work); 1611 run_time_calib_work);
1990 1612
1991 mutex_lock(&priv->mutex); 1613 mutex_lock(&priv->shrd->mutex);
1992 1614
1993 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 1615 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
1994 test_bit(STATUS_SCANNING, &priv->status)) { 1616 test_bit(STATUS_SCANNING, &priv->shrd->status)) {
1995 mutex_unlock(&priv->mutex); 1617 mutex_unlock(&priv->shrd->mutex);
1996 return; 1618 return;
1997 } 1619 }
1998 1620
@@ -2001,7 +1623,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
2001 iwl_sensitivity_calibration(priv); 1623 iwl_sensitivity_calibration(priv);
2002 } 1624 }
2003 1625
2004 mutex_unlock(&priv->mutex); 1626 mutex_unlock(&priv->shrd->mutex);
2005} 1627}
2006 1628
2007static void iwlagn_prepare_restart(struct iwl_priv *priv) 1629static void iwlagn_prepare_restart(struct iwl_priv *priv)
@@ -2013,7 +1635,7 @@ static void iwlagn_prepare_restart(struct iwl_priv *priv)
2013 u8 bt_status; 1635 u8 bt_status;
2014 bool bt_is_sco; 1636 bool bt_is_sco;
2015 1637
2016 lockdep_assert_held(&priv->mutex); 1638 lockdep_assert_held(&priv->shrd->mutex);
2017 1639
2018 for_each_context(priv, ctx) 1640 for_each_context(priv, ctx)
2019 ctx->vif = NULL; 1641 ctx->vif = NULL;
@@ -2047,13 +1669,13 @@ static void iwl_bg_restart(struct work_struct *data)
2047{ 1669{
2048 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); 1670 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
2049 1671
2050 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1672 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
2051 return; 1673 return;
2052 1674
2053 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { 1675 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) {
2054 mutex_lock(&priv->mutex); 1676 mutex_lock(&priv->shrd->mutex);
2055 iwlagn_prepare_restart(priv); 1677 iwlagn_prepare_restart(priv);
2056 mutex_unlock(&priv->mutex); 1678 mutex_unlock(&priv->shrd->mutex);
2057 iwl_cancel_deferred_work(priv); 1679 iwl_cancel_deferred_work(priv);
2058 ieee80211_restart_hw(priv->hw); 1680 ieee80211_restart_hw(priv->hw);
2059 } else { 1681 } else {
@@ -2241,7 +1863,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
2241 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 1863 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2242 return ret; 1864 return ret;
2243 } 1865 }
2244 priv->mac80211_registered = 1; 1866 priv->shrd->mac80211_registered = 1;
2245 1867
2246 return 0; 1868 return 0;
2247} 1869}
@@ -2255,16 +1877,16 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
2255 IWL_DEBUG_MAC80211(priv, "enter\n"); 1877 IWL_DEBUG_MAC80211(priv, "enter\n");
2256 1878
2257 /* we should be verifying the device is ready to be opened */ 1879 /* we should be verifying the device is ready to be opened */
2258 mutex_lock(&priv->mutex); 1880 mutex_lock(&priv->shrd->mutex);
2259 ret = __iwl_up(priv); 1881 ret = __iwl_up(priv);
2260 mutex_unlock(&priv->mutex); 1882 mutex_unlock(&priv->shrd->mutex);
2261 if (ret) 1883 if (ret)
2262 return ret; 1884 return ret;
2263 1885
2264 IWL_DEBUG_INFO(priv, "Start UP work done.\n"); 1886 IWL_DEBUG_INFO(priv, "Start UP work done.\n");
2265 1887
2266 /* Now we should be done, and the READY bit should be set. */ 1888 /* Now we should be done, and the READY bit should be set. */
2267 if (WARN_ON(!test_bit(STATUS_READY, &priv->status))) 1889 if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
2268 ret = -EIO; 1890 ret = -EIO;
2269 1891
2270 iwlagn_led_enable(priv); 1892 iwlagn_led_enable(priv);
@@ -2287,11 +1909,11 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
2287 1909
2288 iwl_down(priv); 1910 iwl_down(priv);
2289 1911
2290 flush_workqueue(priv->workqueue); 1912 flush_workqueue(priv->shrd->workqueue);
2291 1913
2292 /* User space software may expect getting rfkill changes 1914 /* User space software may expect getting rfkill changes
2293 * even if interface is down */ 1915 * even if interface is down */
2294 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 1916 iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
2295 iwl_enable_rfkill_int(priv); 1917 iwl_enable_rfkill_int(priv);
2296 1918
2297 IWL_DEBUG_MAC80211(priv, "leave\n"); 1919 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2335,7 +1957,7 @@ static int iwlagn_send_patterns(struct iwl_priv *priv,
2335 } 1957 }
2336 1958
2337 cmd.data[0] = pattern_cmd; 1959 cmd.data[0] = pattern_cmd;
2338 err = trans_send_cmd(&priv->trans, &cmd); 1960 err = iwl_trans_send_cmd(trans(priv), &cmd);
2339 kfree(pattern_cmd); 1961 kfree(pattern_cmd);
2340 return err; 1962 return err;
2341} 1963}
@@ -2350,7 +1972,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
2350 if (iwlagn_mod_params.sw_crypto) 1972 if (iwlagn_mod_params.sw_crypto)
2351 return; 1973 return;
2352 1974
2353 mutex_lock(&priv->mutex); 1975 mutex_lock(&priv->shrd->mutex);
2354 1976
2355 if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif) 1977 if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
2356 goto out; 1978 goto out;
@@ -2361,7 +1983,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
2361 priv->have_rekey_data = true; 1983 priv->have_rekey_data = true;
2362 1984
2363 out: 1985 out:
2364 mutex_unlock(&priv->mutex); 1986 mutex_unlock(&priv->shrd->mutex);
2365} 1987}
2366 1988
2367struct wowlan_key_data { 1989struct wowlan_key_data {
@@ -2399,7 +2021,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
2399 u16 p1k[IWLAGN_P1K_SIZE]; 2021 u16 p1k[IWLAGN_P1K_SIZE];
2400 int ret, i; 2022 int ret, i;
2401 2023
2402 mutex_lock(&priv->mutex); 2024 mutex_lock(&priv->shrd->mutex);
2403 2025
2404 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || 2026 if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
2405 key->cipher == WLAN_CIPHER_SUITE_WEP104) && 2027 key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
@@ -2504,7 +2126,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
2504 break; 2126 break;
2505 } 2127 }
2506 2128
2507 mutex_unlock(&priv->mutex); 2129 mutex_unlock(&priv->shrd->mutex);
2508} 2130}
2509 2131
2510static int iwlagn_mac_suspend(struct ieee80211_hw *hw, 2132static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
@@ -2529,7 +2151,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2529 if (WARN_ON(!wowlan)) 2151 if (WARN_ON(!wowlan))
2530 return -EINVAL; 2152 return -EINVAL;
2531 2153
2532 mutex_lock(&priv->mutex); 2154 mutex_lock(&priv->shrd->mutex);
2533 2155
2534 /* Don't attempt WoWLAN when not associated, tear down instead. */ 2156 /* Don't attempt WoWLAN when not associated, tear down instead. */
2535 if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION || 2157 if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
@@ -2558,7 +2180,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2558 * since the uCode will add 0x10 before using the value. 2180 * since the uCode will add 0x10 before using the value.
2559 */ 2181 */
2560 for (i = 0; i < 8; i++) { 2182 for (i = 0; i < 8; i++) {
2561 seq = priv->stations[IWL_AP_ID].tid[i].seq_number; 2183 seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
2562 seq -= 0x10; 2184 seq -= 0x10;
2563 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq); 2185 wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
2564 } 2186 }
@@ -2590,9 +2212,9 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2590 2212
2591 memcpy(&rxon, &ctx->active, sizeof(rxon)); 2213 memcpy(&rxon, &ctx->active, sizeof(rxon));
2592 2214
2593 trans_stop_device(&priv->trans); 2215 iwl_trans_stop_device(trans(priv));
2594 2216
2595 priv->wowlan = true; 2217 priv->shrd->wowlan = true;
2596 2218
2597 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan, 2219 ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
2598 IWL_UCODE_WOWLAN); 2220 IWL_UCODE_WOWLAN);
@@ -2623,11 +2245,11 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2623 * constraints. Since we're in the suspend path 2245 * constraints. Since we're in the suspend path
2624 * that isn't really a problem though. 2246 * that isn't really a problem though.
2625 */ 2247 */
2626 mutex_unlock(&priv->mutex); 2248 mutex_unlock(&priv->shrd->mutex);
2627 ieee80211_iter_keys(priv->hw, ctx->vif, 2249 ieee80211_iter_keys(priv->hw, ctx->vif,
2628 iwlagn_wowlan_program_keys, 2250 iwlagn_wowlan_program_keys,
2629 &key_data); 2251 &key_data);
2630 mutex_lock(&priv->mutex); 2252 mutex_lock(&priv->shrd->mutex);
2631 if (key_data.error) { 2253 if (key_data.error) {
2632 ret = -EIO; 2254 ret = -EIO;
2633 goto error; 2255 goto error;
@@ -2642,13 +2264,13 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2642 .len[0] = sizeof(*key_data.rsc_tsc), 2264 .len[0] = sizeof(*key_data.rsc_tsc),
2643 }; 2265 };
2644 2266
2645 ret = trans_send_cmd(&priv->trans, &rsc_tsc_cmd); 2267 ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
2646 if (ret) 2268 if (ret)
2647 goto error; 2269 goto error;
2648 } 2270 }
2649 2271
2650 if (key_data.use_tkip) { 2272 if (key_data.use_tkip) {
2651 ret = trans_send_cmd_pdu(&priv->trans, 2273 ret = iwl_trans_send_cmd_pdu(trans(priv),
2652 REPLY_WOWLAN_TKIP_PARAMS, 2274 REPLY_WOWLAN_TKIP_PARAMS,
2653 CMD_SYNC, sizeof(tkip_cmd), 2275 CMD_SYNC, sizeof(tkip_cmd),
2654 &tkip_cmd); 2276 &tkip_cmd);
@@ -2664,7 +2286,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2664 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN); 2286 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
2665 kek_kck_cmd.replay_ctr = priv->replay_ctr; 2287 kek_kck_cmd.replay_ctr = priv->replay_ctr;
2666 2288
2667 ret = trans_send_cmd_pdu(&priv->trans, 2289 ret = iwl_trans_send_cmd_pdu(trans(priv),
2668 REPLY_WOWLAN_KEK_KCK_MATERIAL, 2290 REPLY_WOWLAN_KEK_KCK_MATERIAL,
2669 CMD_SYNC, sizeof(kek_kck_cmd), 2291 CMD_SYNC, sizeof(kek_kck_cmd),
2670 &kek_kck_cmd); 2292 &kek_kck_cmd);
@@ -2673,7 +2295,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2673 } 2295 }
2674 } 2296 }
2675 2297
2676 ret = trans_send_cmd_pdu(&priv->trans, REPLY_WOWLAN_WAKEUP_FILTER, 2298 ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
2677 CMD_SYNC, sizeof(wakeup_filter_cmd), 2299 CMD_SYNC, sizeof(wakeup_filter_cmd),
2678 &wakeup_filter_cmd); 2300 &wakeup_filter_cmd);
2679 if (ret) 2301 if (ret)
@@ -2686,17 +2308,17 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
2686 device_set_wakeup_enable(priv->bus->dev, true); 2308 device_set_wakeup_enable(priv->bus->dev, true);
2687 2309
2688 /* Now let the ucode operate on its own */ 2310 /* Now let the ucode operate on its own */
2689 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 2311 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
2690 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); 2312 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
2691 2313
2692 goto out; 2314 goto out;
2693 2315
2694 error: 2316 error:
2695 priv->wowlan = false; 2317 priv->shrd->wowlan = false;
2696 iwlagn_prepare_restart(priv); 2318 iwlagn_prepare_restart(priv);
2697 ieee80211_restart_hw(priv->hw); 2319 ieee80211_restart_hw(priv->hw);
2698 out: 2320 out:
2699 mutex_unlock(&priv->mutex); 2321 mutex_unlock(&priv->shrd->mutex);
2700 kfree(key_data.rsc_tsc); 2322 kfree(key_data.rsc_tsc);
2701 return ret; 2323 return ret;
2702} 2324}
@@ -2710,21 +2332,21 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
2710 u32 base, status = 0xffffffff; 2332 u32 base, status = 0xffffffff;
2711 int ret = -EIO; 2333 int ret = -EIO;
2712 2334
2713 mutex_lock(&priv->mutex); 2335 mutex_lock(&priv->shrd->mutex);
2714 2336
2715 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 2337 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
2716 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); 2338 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
2717 2339
2718 base = priv->device_pointers.error_event_table; 2340 base = priv->device_pointers.error_event_table;
2719 if (iwlagn_hw_valid_rtc_data_addr(base)) { 2341 if (iwlagn_hw_valid_rtc_data_addr(base)) {
2720 spin_lock_irqsave(&priv->reg_lock, flags); 2342 spin_lock_irqsave(&bus(priv)->reg_lock, flags);
2721 ret = iwl_grab_nic_access_silent(priv); 2343 ret = iwl_grab_nic_access_silent(bus(priv));
2722 if (ret == 0) { 2344 if (ret == 0) {
2723 iwl_write32(priv, HBUS_TARG_MEM_RADDR, base); 2345 iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
2724 status = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 2346 status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
2725 iwl_release_nic_access(priv); 2347 iwl_release_nic_access(bus(priv));
2726 } 2348 }
2727 spin_unlock_irqrestore(&priv->reg_lock, flags); 2349 spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
2728 2350
2729#ifdef CONFIG_IWLWIFI_DEBUGFS 2351#ifdef CONFIG_IWLWIFI_DEBUGFS
2730 if (ret == 0) { 2352 if (ret == 0) {
@@ -2735,7 +2357,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
2735 2357
2736 if (priv->wowlan_sram) 2358 if (priv->wowlan_sram)
2737 _iwl_read_targ_mem_words( 2359 _iwl_read_targ_mem_words(
2738 priv, 0x800000, priv->wowlan_sram, 2360 bus(priv), 0x800000, priv->wowlan_sram,
2739 priv->ucode_wowlan.data.len / 4); 2361 priv->ucode_wowlan.data.len / 4);
2740 } 2362 }
2741#endif 2363#endif
@@ -2744,7 +2366,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
2744 /* we'll clear ctx->vif during iwlagn_prepare_restart() */ 2366 /* we'll clear ctx->vif during iwlagn_prepare_restart() */
2745 vif = ctx->vif; 2367 vif = ctx->vif;
2746 2368
2747 priv->wowlan = false; 2369 priv->shrd->wowlan = false;
2748 2370
2749 device_set_wakeup_enable(priv->bus->dev, false); 2371 device_set_wakeup_enable(priv->bus->dev, false);
2750 2372
@@ -2754,7 +2376,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
2754 iwl_connection_init_rx_config(priv, ctx); 2376 iwl_connection_init_rx_config(priv, ctx);
2755 iwlagn_set_rxon_chain(priv, ctx); 2377 iwlagn_set_rxon_chain(priv, ctx);
2756 2378
2757 mutex_unlock(&priv->mutex); 2379 mutex_unlock(&priv->shrd->mutex);
2758 2380
2759 ieee80211_resume_disconnect(vif); 2381 ieee80211_resume_disconnect(vif);
2760 2382
@@ -2823,7 +2445,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2823 if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET) 2445 if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
2824 return 0; 2446 return 0;
2825 2447
2826 mutex_lock(&priv->mutex); 2448 mutex_lock(&priv->shrd->mutex);
2827 iwl_scan_cancel_timeout(priv, 100); 2449 iwl_scan_cancel_timeout(priv, 100);
2828 2450
2829 BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT); 2451 BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
@@ -2874,7 +2496,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2874 ret = -EINVAL; 2496 ret = -EINVAL;
2875 } 2497 }
2876 2498
2877 mutex_unlock(&priv->mutex); 2499 mutex_unlock(&priv->shrd->mutex);
2878 IWL_DEBUG_MAC80211(priv, "leave\n"); 2500 IWL_DEBUG_MAC80211(priv, "leave\n");
2879 2501
2880 return ret; 2502 return ret;
@@ -2889,6 +2511,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2889 struct iwl_priv *priv = hw->priv; 2511 struct iwl_priv *priv = hw->priv;
2890 int ret = -EINVAL; 2512 int ret = -EINVAL;
2891 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; 2513 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
2514 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
2892 2515
2893 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 2516 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
2894 sta->addr, tid); 2517 sta->addr, tid);
@@ -2896,7 +2519,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2896 if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)) 2519 if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
2897 return -EACCES; 2520 return -EACCES;
2898 2521
2899 mutex_lock(&priv->mutex); 2522 mutex_lock(&priv->shrd->mutex);
2900 2523
2901 switch (action) { 2524 switch (action) {
2902 case IEEE80211_AMPDU_RX_START: 2525 case IEEE80211_AMPDU_RX_START:
@@ -2906,7 +2529,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2906 case IEEE80211_AMPDU_RX_STOP: 2529 case IEEE80211_AMPDU_RX_STOP:
2907 IWL_DEBUG_HT(priv, "stop Rx\n"); 2530 IWL_DEBUG_HT(priv, "stop Rx\n");
2908 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 2531 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
2909 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2532 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
2910 ret = 0; 2533 ret = 0;
2911 break; 2534 break;
2912 case IEEE80211_AMPDU_TX_START: 2535 case IEEE80211_AMPDU_TX_START:
@@ -2926,7 +2549,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2926 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n", 2549 IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
2927 priv->agg_tids_count); 2550 priv->agg_tids_count);
2928 } 2551 }
2929 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2552 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
2930 ret = 0; 2553 ret = 0;
2931 if (priv->cfg->ht_params && 2554 if (priv->cfg->ht_params &&
2932 priv->cfg->ht_params->use_rts_for_aggregation) { 2555 priv->cfg->ht_params->use_rts_for_aggregation) {
@@ -2942,8 +2565,8 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2942 case IEEE80211_AMPDU_TX_OPERATIONAL: 2565 case IEEE80211_AMPDU_TX_OPERATIONAL:
2943 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); 2566 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
2944 2567
2945 trans_txq_agg_setup(&priv->trans, iwl_sta_id(sta), tid, 2568 iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
2946 buf_size); 2569 tid, buf_size);
2947 2570
2948 /* 2571 /*
2949 * If the limit is 0, then it wasn't initialised yet, 2572 * If the limit is 0, then it wasn't initialised yet,
@@ -2987,7 +2610,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
2987 ret = 0; 2610 ret = 0;
2988 break; 2611 break;
2989 } 2612 }
2990 mutex_unlock(&priv->mutex); 2613 mutex_unlock(&priv->shrd->mutex);
2991 2614
2992 return ret; 2615 return ret;
2993} 2616}
@@ -3005,7 +2628,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3005 2628
3006 IWL_DEBUG_INFO(priv, "received request to add station %pM\n", 2629 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3007 sta->addr); 2630 sta->addr);
3008 mutex_lock(&priv->mutex); 2631 mutex_lock(&priv->shrd->mutex);
3009 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", 2632 IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
3010 sta->addr); 2633 sta->addr);
3011 sta_priv->common.sta_id = IWL_INVALID_STATION; 2634 sta_priv->common.sta_id = IWL_INVALID_STATION;
@@ -3020,7 +2643,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3020 IWL_ERR(priv, "Unable to add station %pM (%d)\n", 2643 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3021 sta->addr, ret); 2644 sta->addr, ret);
3022 /* Should we return success if return code is EEXIST ? */ 2645 /* Should we return success if return code is EEXIST ? */
3023 mutex_unlock(&priv->mutex); 2646 mutex_unlock(&priv->shrd->mutex);
3024 return ret; 2647 return ret;
3025 } 2648 }
3026 2649
@@ -3030,7 +2653,7 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3030 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", 2653 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3031 sta->addr); 2654 sta->addr);
3032 iwl_rs_rate_init(priv, sta, sta_id); 2655 iwl_rs_rate_init(priv, sta, sta_id);
3033 mutex_unlock(&priv->mutex); 2656 mutex_unlock(&priv->shrd->mutex);
3034 2657
3035 return 0; 2658 return 0;
3036} 2659}
@@ -3056,14 +2679,14 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3056 2679
3057 IWL_DEBUG_MAC80211(priv, "enter\n"); 2680 IWL_DEBUG_MAC80211(priv, "enter\n");
3058 2681
3059 mutex_lock(&priv->mutex); 2682 mutex_lock(&priv->shrd->mutex);
3060 2683
3061 if (iwl_is_rfkill(priv)) 2684 if (iwl_is_rfkill(priv->shrd))
3062 goto out; 2685 goto out;
3063 2686
3064 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2687 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
3065 test_bit(STATUS_SCANNING, &priv->status) || 2688 test_bit(STATUS_SCANNING, &priv->shrd->status) ||
3066 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 2689 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
3067 goto out; 2690 goto out;
3068 2691
3069 if (!iwl_is_associated_ctx(ctx)) 2692 if (!iwl_is_associated_ctx(ctx))
@@ -3082,7 +2705,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3082 goto out; 2705 goto out;
3083 } 2706 }
3084 2707
3085 spin_lock_irq(&priv->lock); 2708 spin_lock_irq(&priv->shrd->lock);
3086 2709
3087 priv->current_ht_config.smps = conf->smps_mode; 2710 priv->current_ht_config.smps = conf->smps_mode;
3088 2711
@@ -3112,23 +2735,23 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3112 iwl_set_rxon_ht(priv, ht_conf); 2735 iwl_set_rxon_ht(priv, ht_conf);
3113 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); 2736 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
3114 2737
3115 spin_unlock_irq(&priv->lock); 2738 spin_unlock_irq(&priv->shrd->lock);
3116 2739
3117 iwl_set_rate(priv); 2740 iwl_set_rate(priv);
3118 /* 2741 /*
3119 * at this point, staging_rxon has the 2742 * at this point, staging_rxon has the
3120 * configuration for channel switch 2743 * configuration for channel switch
3121 */ 2744 */
3122 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); 2745 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
3123 priv->switch_channel = cpu_to_le16(ch); 2746 priv->switch_channel = cpu_to_le16(ch);
3124 if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) { 2747 if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
3125 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); 2748 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
3126 priv->switch_channel = 0; 2749 priv->switch_channel = 0;
3127 ieee80211_chswitch_done(ctx->vif, false); 2750 ieee80211_chswitch_done(ctx->vif, false);
3128 } 2751 }
3129 2752
3130out: 2753out:
3131 mutex_unlock(&priv->mutex); 2754 mutex_unlock(&priv->shrd->mutex);
3132 IWL_DEBUG_MAC80211(priv, "leave\n"); 2755 IWL_DEBUG_MAC80211(priv, "leave\n");
3133} 2756}
3134 2757
@@ -3158,7 +2781,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3158 2781
3159#undef CHK 2782#undef CHK
3160 2783
3161 mutex_lock(&priv->mutex); 2784 mutex_lock(&priv->shrd->mutex);
3162 2785
3163 for_each_context(priv, ctx) { 2786 for_each_context(priv, ctx) {
3164 ctx->staging.filter_flags &= ~filter_nand; 2787 ctx->staging.filter_flags &= ~filter_nand;
@@ -3170,7 +2793,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3170 */ 2793 */
3171 } 2794 }
3172 2795
3173 mutex_unlock(&priv->mutex); 2796 mutex_unlock(&priv->shrd->mutex);
3174 2797
3175 /* 2798 /*
3176 * Receiving all multicast frames is always enabled by the 2799 * Receiving all multicast frames is always enabled by the
@@ -3186,14 +2809,14 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
3186{ 2809{
3187 struct iwl_priv *priv = hw->priv; 2810 struct iwl_priv *priv = hw->priv;
3188 2811
3189 mutex_lock(&priv->mutex); 2812 mutex_lock(&priv->shrd->mutex);
3190 IWL_DEBUG_MAC80211(priv, "enter\n"); 2813 IWL_DEBUG_MAC80211(priv, "enter\n");
3191 2814
3192 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2815 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
3193 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n"); 2816 IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
3194 goto done; 2817 goto done;
3195 } 2818 }
3196 if (iwl_is_rfkill(priv)) { 2819 if (iwl_is_rfkill(priv->shrd)) {
3197 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n"); 2820 IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
3198 goto done; 2821 goto done;
3199 } 2822 }
@@ -3210,9 +2833,9 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
3210 } 2833 }
3211 } 2834 }
3212 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 2835 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
3213 iwlagn_wait_tx_queue_empty(priv); 2836 iwl_trans_wait_tx_queue_empty(trans(priv));
3214done: 2837done:
3215 mutex_unlock(&priv->mutex); 2838 mutex_unlock(&priv->shrd->mutex);
3216 IWL_DEBUG_MAC80211(priv, "leave\n"); 2839 IWL_DEBUG_MAC80211(priv, "leave\n");
3217} 2840}
3218 2841
@@ -3220,7 +2843,7 @@ void iwlagn_disable_roc(struct iwl_priv *priv)
3220{ 2843{
3221 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; 2844 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
3222 2845
3223 lockdep_assert_held(&priv->mutex); 2846 lockdep_assert_held(&priv->shrd->mutex);
3224 2847
3225 if (!priv->hw_roc_setup) 2848 if (!priv->hw_roc_setup)
3226 return; 2849 return;
@@ -3243,9 +2866,9 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
3243 struct iwl_priv *priv = container_of(work, struct iwl_priv, 2866 struct iwl_priv *priv = container_of(work, struct iwl_priv,
3244 hw_roc_disable_work.work); 2867 hw_roc_disable_work.work);
3245 2868
3246 mutex_lock(&priv->mutex); 2869 mutex_lock(&priv->shrd->mutex);
3247 iwlagn_disable_roc(priv); 2870 iwlagn_disable_roc(priv);
3248 mutex_unlock(&priv->mutex); 2871 mutex_unlock(&priv->shrd->mutex);
3249} 2872}
3250 2873
3251static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw, 2874static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
@@ -3263,7 +2886,7 @@ static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3263 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) 2886 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
3264 return -EOPNOTSUPP; 2887 return -EOPNOTSUPP;
3265 2888
3266 mutex_lock(&priv->mutex); 2889 mutex_lock(&priv->shrd->mutex);
3267 2890
3268 /* 2891 /*
3269 * TODO: Remove this hack! Firmware needs to be updated 2892 * TODO: Remove this hack! Firmware needs to be updated
@@ -3274,7 +2897,7 @@ static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3274 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && duration > 80) 2897 if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && duration > 80)
3275 duration = 80; 2898 duration = 80;
3276 2899
3277 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 2900 if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
3278 err = -EBUSY; 2901 err = -EBUSY;
3279 goto out; 2902 goto out;
3280 } 2903 }
@@ -3313,7 +2936,7 @@ static int iwl_mac_remain_on_channel(struct ieee80211_hw *hw,
3313 iwlagn_disable_roc(priv); 2936 iwlagn_disable_roc(priv);
3314 2937
3315 out: 2938 out:
3316 mutex_unlock(&priv->mutex); 2939 mutex_unlock(&priv->shrd->mutex);
3317 2940
3318 return err; 2941 return err;
3319} 2942}
@@ -3325,14 +2948,80 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3325 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN))) 2948 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
3326 return -EOPNOTSUPP; 2949 return -EOPNOTSUPP;
3327 2950
3328 mutex_lock(&priv->mutex); 2951 mutex_lock(&priv->shrd->mutex);
3329 iwl_scan_cancel_timeout(priv, priv->hw_roc_duration); 2952 iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
3330 iwlagn_disable_roc(priv); 2953 iwlagn_disable_roc(priv);
3331 mutex_unlock(&priv->mutex); 2954 mutex_unlock(&priv->shrd->mutex);
3332 2955
3333 return 0; 2956 return 0;
3334} 2957}
3335 2958
2959static int iwl_mac_tx_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2960 const u8 *bssid, enum ieee80211_tx_sync_type type)
2961{
2962 struct iwl_priv *priv = hw->priv;
2963 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2964 struct iwl_rxon_context *ctx = vif_priv->ctx;
2965 int ret;
2966 u8 sta_id;
2967
2968 mutex_lock(&priv->shrd->mutex);
2969
2970 if (iwl_is_associated_ctx(ctx)) {
2971 ret = 0;
2972 goto out;
2973 }
2974
2975 if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
2976 ret = -EBUSY;
2977 goto out;
2978 }
2979
2980 ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
2981 if (ret)
2982 goto out;
2983
2984 if (WARN_ON(sta_id != ctx->ap_sta_id)) {
2985 ret = -EIO;
2986 goto out_remove_sta;
2987 }
2988
2989 memcpy(ctx->bssid, bssid, ETH_ALEN);
2990 ctx->preauth_bssid = true;
2991
2992 ret = iwlagn_commit_rxon(priv, ctx);
2993
2994 if (ret == 0)
2995 goto out;
2996
2997 out_remove_sta:
2998 iwl_remove_station(priv, sta_id, bssid);
2999 out:
3000 mutex_unlock(&priv->shrd->mutex);
3001 return ret;
3002}
3003
3004static void iwl_mac_finish_tx_sync(struct ieee80211_hw *hw,
3005 struct ieee80211_vif *vif,
3006 const u8 *bssid,
3007 enum ieee80211_tx_sync_type type)
3008{
3009 struct iwl_priv *priv = hw->priv;
3010 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3011 struct iwl_rxon_context *ctx = vif_priv->ctx;
3012
3013 mutex_lock(&priv->shrd->mutex);
3014
3015 if (iwl_is_associated_ctx(ctx))
3016 goto out;
3017
3018 iwl_remove_station(priv, ctx->ap_sta_id, bssid);
3019 ctx->preauth_bssid = false;
3020 /* no need to commit */
3021 out:
3022 mutex_unlock(&priv->shrd->mutex);
3023}
3024
3336/***************************************************************************** 3025/*****************************************************************************
3337 * 3026 *
3338 * driver setup and teardown 3027 * driver setup and teardown
@@ -3341,7 +3030,7 @@ static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
3341 3030
3342static void iwl_setup_deferred_work(struct iwl_priv *priv) 3031static void iwl_setup_deferred_work(struct iwl_priv *priv)
3343{ 3032{
3344 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 3033 priv->shrd->workqueue = create_singlethread_workqueue(DRV_NAME);
3345 3034
3346 init_waitqueue_head(&priv->wait_command_queue); 3035 init_waitqueue_head(&priv->wait_command_queue);
3347 3036
@@ -3415,10 +3104,9 @@ static int iwl_init_drv(struct iwl_priv *priv)
3415{ 3104{
3416 int ret; 3105 int ret;
3417 3106
3418 spin_lock_init(&priv->sta_lock); 3107 spin_lock_init(&priv->shrd->sta_lock);
3419 spin_lock_init(&priv->hcmd_lock);
3420 3108
3421 mutex_init(&priv->mutex); 3109 mutex_init(&priv->shrd->mutex);
3422 3110
3423 priv->ieee_channels = NULL; 3111 priv->ieee_channels = NULL;
3424 priv->ieee_rates = NULL; 3112 priv->ieee_rates = NULL;
@@ -3459,7 +3147,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3459 goto err; 3147 goto err;
3460 } 3148 }
3461 3149
3462 ret = iwlcore_init_geos(priv); 3150 ret = iwl_init_geos(priv);
3463 if (ret) { 3151 if (ret) {
3464 IWL_ERR(priv, "initializing geos failed: %d\n", ret); 3152 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3465 goto err_free_channel_map; 3153 goto err_free_channel_map;
@@ -3477,8 +3165,10 @@ err:
3477static void iwl_uninit_drv(struct iwl_priv *priv) 3165static void iwl_uninit_drv(struct iwl_priv *priv)
3478{ 3166{
3479 iwl_calib_free_results(priv); 3167 iwl_calib_free_results(priv);
3480 iwlcore_free_geos(priv); 3168 iwl_free_geos(priv);
3481 iwl_free_channel_map(priv); 3169 iwl_free_channel_map(priv);
3170 if (priv->tx_cmd_pool)
3171 kmem_cache_destroy(priv->tx_cmd_pool);
3482 kfree(priv->scan_cmd); 3172 kfree(priv->scan_cmd);
3483 kfree(priv->beacon_cmd); 3173 kfree(priv->beacon_cmd);
3484#ifdef CONFIG_IWLWIFI_DEBUGFS 3174#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -3491,7 +3181,7 @@ static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
3491{ 3181{
3492 struct iwl_priv *priv = hw->priv; 3182 struct iwl_priv *priv = hw->priv;
3493 3183
3494 mutex_lock(&priv->mutex); 3184 mutex_lock(&priv->shrd->mutex);
3495 3185
3496 if (priv->cfg->bt_params && 3186 if (priv->cfg->bt_params &&
3497 priv->cfg->bt_params->advanced_bt_coexist) { 3187 priv->cfg->bt_params->advanced_bt_coexist) {
@@ -3506,7 +3196,7 @@ static void iwl_mac_rssi_callback(struct ieee80211_hw *hw,
3506 "ignoring RSSI callback\n"); 3196 "ignoring RSSI callback\n");
3507 } 3197 }
3508 3198
3509 mutex_unlock(&priv->mutex); 3199 mutex_unlock(&priv->shrd->mutex);
3510} 3200}
3511 3201
3512struct ieee80211_ops iwlagn_hw_ops = { 3202struct ieee80211_ops iwlagn_hw_ops = {
@@ -3540,27 +3230,38 @@ struct ieee80211_ops iwlagn_hw_ops = {
3540 .rssi_callback = iwl_mac_rssi_callback, 3230 .rssi_callback = iwl_mac_rssi_callback,
3541 CFG80211_TESTMODE_CMD(iwl_testmode_cmd) 3231 CFG80211_TESTMODE_CMD(iwl_testmode_cmd)
3542 CFG80211_TESTMODE_DUMP(iwl_testmode_dump) 3232 CFG80211_TESTMODE_DUMP(iwl_testmode_dump)
3233 .tx_sync = iwl_mac_tx_sync,
3234 .finish_tx_sync = iwl_mac_finish_tx_sync,
3543}; 3235};
3544 3236
3545static u32 iwl_hw_detect(struct iwl_priv *priv) 3237static u32 iwl_hw_detect(struct iwl_priv *priv)
3546{ 3238{
3547 return iwl_read32(priv, CSR_HW_REV); 3239 return iwl_read32(bus(priv), CSR_HW_REV);
3548} 3240}
3549 3241
3242/* Size of one Rx buffer in host DRAM */
3243#define IWL_RX_BUF_SIZE_4K (4 * 1024)
3244#define IWL_RX_BUF_SIZE_8K (8 * 1024)
3245
3550static int iwl_set_hw_params(struct iwl_priv *priv) 3246static int iwl_set_hw_params(struct iwl_priv *priv)
3551{ 3247{
3552 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
3553 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
3554 if (iwlagn_mod_params.amsdu_size_8K) 3248 if (iwlagn_mod_params.amsdu_size_8K)
3555 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K); 3249 hw_params(priv).rx_page_order =
3250 get_order(IWL_RX_BUF_SIZE_8K);
3556 else 3251 else
3557 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K); 3252 hw_params(priv).rx_page_order =
3558 3253 get_order(IWL_RX_BUF_SIZE_4K);
3559 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
3560 3254
3561 if (iwlagn_mod_params.disable_11n) 3255 if (iwlagn_mod_params.disable_11n)
3562 priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE; 3256 priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
3563 3257
3258 hw_params(priv).num_ampdu_queues =
3259 priv->cfg->base_params->num_of_ampdu_queues;
3260 hw_params(priv).shadow_reg_enable =
3261 priv->cfg->base_params->shadow_reg_enable;
3262 hw_params(priv).sku = priv->cfg->sku;
3263 hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
3264
3564 /* Device-specific setup */ 3265 /* Device-specific setup */
3565 return priv->cfg->lib->set_hw_params(priv); 3266 return priv->cfg->lib->set_hw_params(priv);
3566} 3267}
@@ -3587,7 +3288,8 @@ out:
3587 return hw; 3288 return hw;
3588} 3289}
3589 3290
3590int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg) 3291int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
3292 struct iwl_cfg *cfg)
3591{ 3293{
3592 int err = 0; 3294 int err = 0;
3593 struct iwl_priv *priv; 3295 struct iwl_priv *priv;
@@ -3606,7 +3308,17 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3606 3308
3607 priv = hw->priv; 3309 priv = hw->priv;
3608 priv->bus = bus; 3310 priv->bus = bus;
3609 bus_set_drv_data(priv->bus, priv); 3311 priv->shrd = &priv->_shrd;
3312 priv->shrd->bus = bus;
3313 priv->shrd->priv = priv;
3314 priv->shrd->hw = hw;
3315 bus_set_drv_data(priv->bus, priv->shrd);
3316
3317 priv->shrd->trans = trans_ops->alloc(priv->shrd);
3318 if (priv->shrd->trans == NULL) {
3319 err = -ENOMEM;
3320 goto out_free_traffic_mem;
3321 }
3610 3322
3611 /* At this point both hw and priv are allocated. */ 3323 /* At this point both hw and priv are allocated. */
3612 3324
@@ -3614,15 +3326,15 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3614 3326
3615 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); 3327 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
3616 priv->cfg = cfg; 3328 priv->cfg = cfg;
3617 priv->inta_mask = CSR_INI_SET_MASK;
3618 3329
3619 /* is antenna coupling more than 35dB ? */ 3330 /* is antenna coupling more than 35dB ? */
3620 priv->bt_ant_couple_ok = 3331 priv->bt_ant_couple_ok =
3621 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? 3332 (iwlagn_mod_params.ant_coupling >
3622 true : false; 3333 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
3334 true : false;
3623 3335
3624 /* enable/disable bt channel inhibition */ 3336 /* enable/disable bt channel inhibition */
3625 priv->bt_ch_announce = iwlagn_bt_ch_announce; 3337 priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
3626 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", 3338 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
3627 (priv->bt_ch_announce) ? "On" : "Off"); 3339 (priv->bt_ch_announce) ? "On" : "Off");
3628 3340
@@ -3632,15 +3344,15 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3632 /* these spin locks will be used in apm_ops.init and EEPROM access 3344 /* these spin locks will be used in apm_ops.init and EEPROM access
3633 * we should init now 3345 * we should init now
3634 */ 3346 */
3635 spin_lock_init(&priv->reg_lock); 3347 spin_lock_init(&bus(priv)->reg_lock);
3636 spin_lock_init(&priv->lock); 3348 spin_lock_init(&priv->shrd->lock);
3637 3349
3638 /* 3350 /*
3639 * stop and reset the on-board processor just in case it is in a 3351 * stop and reset the on-board processor just in case it is in a
3640 * strange state ... like being left stranded by a primary kernel 3352 * strange state ... like being left stranded by a primary kernel
3641 * and this is now the kdump kernel trying to start up 3353 * and this is now the kdump kernel trying to start up
3642 */ 3354 */
3643 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3355 iwl_write32(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3644 3356
3645 /*********************** 3357 /***********************
3646 * 3. Read REV register 3358 * 3. Read REV register
@@ -3649,11 +3361,11 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3649 IWL_INFO(priv, "Detected %s, REV=0x%X\n", 3361 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3650 priv->cfg->name, hw_rev); 3362 priv->cfg->name, hw_rev);
3651 3363
3652 err = iwl_trans_register(&priv->trans, priv); 3364 err = iwl_trans_request_irq(trans(priv));
3653 if (err) 3365 if (err)
3654 goto out_free_traffic_mem; 3366 goto out_free_trans;
3655 3367
3656 if (trans_prepare_card_hw(&priv->trans)) { 3368 if (iwl_trans_prepare_card_hw(trans(priv))) {
3657 err = -EIO; 3369 err = -EIO;
3658 IWL_WARN(priv, "Failed, HW not ready\n"); 3370 IWL_WARN(priv, "Failed, HW not ready\n");
3659 goto out_free_trans; 3371 goto out_free_trans;
@@ -3721,13 +3433,14 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3721 iwl_enable_rfkill_int(priv); 3433 iwl_enable_rfkill_int(priv);
3722 3434
3723 /* If platform's RF_KILL switch is NOT set to KILL */ 3435 /* If platform's RF_KILL switch is NOT set to KILL */
3724 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 3436 if (iwl_read32(bus(priv),
3725 clear_bit(STATUS_RF_KILL_HW, &priv->status); 3437 CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
3438 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
3726 else 3439 else
3727 set_bit(STATUS_RF_KILL_HW, &priv->status); 3440 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
3728 3441
3729 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 3442 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
3730 test_bit(STATUS_RF_KILL_HW, &priv->status)); 3443 test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
3731 3444
3732 iwl_power_initialize(priv); 3445 iwl_power_initialize(priv);
3733 iwl_tt_initialize(priv); 3446 iwl_tt_initialize(priv);
@@ -3741,13 +3454,13 @@ int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg)
3741 return 0; 3454 return 0;
3742 3455
3743out_destroy_workqueue: 3456out_destroy_workqueue:
3744 destroy_workqueue(priv->workqueue); 3457 destroy_workqueue(priv->shrd->workqueue);
3745 priv->workqueue = NULL; 3458 priv->shrd->workqueue = NULL;
3746 iwl_uninit_drv(priv); 3459 iwl_uninit_drv(priv);
3747out_free_eeprom: 3460out_free_eeprom:
3748 iwl_eeprom_free(priv); 3461 iwl_eeprom_free(priv);
3749out_free_trans: 3462out_free_trans:
3750 trans_free(&priv->trans); 3463 iwl_trans_free(trans(priv));
3751out_free_traffic_mem: 3464out_free_traffic_mem:
3752 iwl_free_traffic_mem(priv); 3465 iwl_free_traffic_mem(priv);
3753 ieee80211_free_hw(priv->hw); 3466 ieee80211_free_hw(priv->hw);
@@ -3757,8 +3470,6 @@ out:
3757 3470
3758void __devexit iwl_remove(struct iwl_priv * priv) 3471void __devexit iwl_remove(struct iwl_priv * priv)
3759{ 3472{
3760 unsigned long flags;
3761
3762 wait_for_completion(&priv->firmware_loading_complete); 3473 wait_for_completion(&priv->firmware_loading_complete);
3763 3474
3764 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 3475 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
@@ -3771,48 +3482,36 @@ void __devexit iwl_remove(struct iwl_priv * priv)
3771 * to be called and iwl_down since we are removing the device 3482 * to be called and iwl_down since we are removing the device
3772 * we need to set STATUS_EXIT_PENDING bit. 3483 * we need to set STATUS_EXIT_PENDING bit.
3773 */ 3484 */
3774 set_bit(STATUS_EXIT_PENDING, &priv->status); 3485 set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
3775 3486
3776 iwl_testmode_cleanup(priv); 3487 iwl_testmode_cleanup(priv);
3777 iwl_leds_exit(priv); 3488 iwl_leds_exit(priv);
3778 3489
3779 if (priv->mac80211_registered) { 3490 if (priv->shrd->mac80211_registered) {
3780 ieee80211_unregister_hw(priv->hw); 3491 ieee80211_unregister_hw(priv->hw);
3781 priv->mac80211_registered = 0; 3492 priv->shrd->mac80211_registered = 0;
3782 } 3493 }
3783 3494
3784 /* Reset to low power before unloading driver. */
3785 iwl_apm_stop(priv);
3786
3787 iwl_tt_exit(priv); 3495 iwl_tt_exit(priv);
3788 3496
3789 /* make sure we flush any pending irq or 3497 /*This will stop the queues, move the device to low power state */
3790 * tasklet for the driver 3498 iwl_trans_stop_device(trans(priv));
3791 */
3792 spin_lock_irqsave(&priv->lock, flags);
3793 iwl_disable_interrupts(priv);
3794 spin_unlock_irqrestore(&priv->lock, flags);
3795
3796 trans_sync_irq(&priv->trans);
3797 3499
3798 iwl_dealloc_ucode(priv); 3500 iwl_dealloc_ucode(priv);
3799 3501
3800 trans_rx_free(&priv->trans);
3801 trans_tx_free(&priv->trans);
3802
3803 iwl_eeprom_free(priv); 3502 iwl_eeprom_free(priv);
3804 3503
3805 /*netif_stop_queue(dev); */ 3504 /*netif_stop_queue(dev); */
3806 flush_workqueue(priv->workqueue); 3505 flush_workqueue(priv->shrd->workqueue);
3807 3506
3808 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes 3507 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3809 * priv->workqueue... so we can't take down the workqueue 3508 * priv->shrd->workqueue... so we can't take down the workqueue
3810 * until now... */ 3509 * until now... */
3811 destroy_workqueue(priv->workqueue); 3510 destroy_workqueue(priv->shrd->workqueue);
3812 priv->workqueue = NULL; 3511 priv->shrd->workqueue = NULL;
3813 iwl_free_traffic_mem(priv); 3512 iwl_free_traffic_mem(priv);
3814 3513
3815 trans_free(&priv->trans); 3514 iwl_trans_free(trans(priv));
3816 3515
3817 bus_set_drv_data(priv->bus, NULL); 3516 bus_set_drv_data(priv->bus, NULL);
3818 3517
@@ -3863,7 +3562,8 @@ module_exit(iwl_exit);
3863module_init(iwl_init); 3562module_init(iwl_init);
3864 3563
3865#ifdef CONFIG_IWLWIFI_DEBUG 3564#ifdef CONFIG_IWLWIFI_DEBUG
3866module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); 3565module_param_named(debug, iwlagn_mod_params.debug_level, uint,
3566 S_IRUGO | S_IWUSR);
3867MODULE_PARM_DESC(debug, "debug output mask"); 3567MODULE_PARM_DESC(debug, "debug output mask");
3868#endif 3568#endif
3869 3569
@@ -3879,18 +3579,21 @@ MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3879module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO); 3579module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
3880MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 3580MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3881 3581
3882module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int, 3582module_param_named(ucode_alternative,
3883 S_IRUGO); 3583 iwlagn_mod_params.wanted_ucode_alternative,
3584 int, S_IRUGO);
3884MODULE_PARM_DESC(ucode_alternative, 3585MODULE_PARM_DESC(ucode_alternative,
3885 "specify ucode alternative to use from ucode file"); 3586 "specify ucode alternative to use from ucode file");
3886 3587
3887module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO); 3588module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
3589 int, S_IRUGO);
3888MODULE_PARM_DESC(antenna_coupling, 3590MODULE_PARM_DESC(antenna_coupling,
3889 "specify antenna coupling in dB (defualt: 0 dB)"); 3591 "specify antenna coupling in dB (defualt: 0 dB)");
3890 3592
3891module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO); 3593module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
3594 bool, S_IRUGO);
3892MODULE_PARM_DESC(bt_ch_inhibition, 3595MODULE_PARM_DESC(bt_ch_inhibition,
3893 "Disable BT channel inhibition (default: enable)"); 3596 "Enable BT channel inhibition (default: enable)");
3894 3597
3895module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO); 3598module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
3896MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])"); 3599MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
@@ -3936,6 +3639,11 @@ module_param_named(power_level, iwlagn_mod_params.power_level,
3936MODULE_PARM_DESC(power_level, 3639MODULE_PARM_DESC(power_level,
3937 "default power save level (range from 1 - 5, default: 1)"); 3640 "default power save level (range from 1 - 5, default: 1)");
3938 3641
3642module_param_named(auto_agg, iwlagn_mod_params.auto_agg,
3643 bool, S_IRUGO);
3644MODULE_PARM_DESC(auto_agg,
3645 "enable agg w/o check traffic load (default: enable)");
3646
3939/* 3647/*
3940 * For now, keep using power level 1 instead of automatically 3648 * For now, keep using power level 1 instead of automatically
3941 * adjusting ... 3649 * adjusting ...
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index df2960ae92aa..a7b4948e43da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,54 +65,9 @@
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67 67
68/* configuration for the _agn devices */
69extern struct iwl_cfg iwl5300_agn_cfg;
70extern struct iwl_cfg iwl5100_agn_cfg;
71extern struct iwl_cfg iwl5350_agn_cfg;
72extern struct iwl_cfg iwl5100_bgn_cfg;
73extern struct iwl_cfg iwl5100_abg_cfg;
74extern struct iwl_cfg iwl5150_agn_cfg;
75extern struct iwl_cfg iwl5150_abg_cfg;
76extern struct iwl_cfg iwl6005_2agn_cfg;
77extern struct iwl_cfg iwl6005_2abg_cfg;
78extern struct iwl_cfg iwl6005_2bg_cfg;
79extern struct iwl_cfg iwl1030_bgn_cfg;
80extern struct iwl_cfg iwl1030_bg_cfg;
81extern struct iwl_cfg iwl6030_2agn_cfg;
82extern struct iwl_cfg iwl6030_2abg_cfg;
83extern struct iwl_cfg iwl6030_2bgn_cfg;
84extern struct iwl_cfg iwl6030_2bg_cfg;
85extern struct iwl_cfg iwl6000i_2agn_cfg;
86extern struct iwl_cfg iwl6000i_2abg_cfg;
87extern struct iwl_cfg iwl6000i_2bg_cfg;
88extern struct iwl_cfg iwl6000_3agn_cfg;
89extern struct iwl_cfg iwl6050_2agn_cfg;
90extern struct iwl_cfg iwl6050_2abg_cfg;
91extern struct iwl_cfg iwl6150_bgn_cfg;
92extern struct iwl_cfg iwl6150_bg_cfg;
93extern struct iwl_cfg iwl1000_bgn_cfg;
94extern struct iwl_cfg iwl1000_bg_cfg;
95extern struct iwl_cfg iwl100_bgn_cfg;
96extern struct iwl_cfg iwl100_bg_cfg;
97extern struct iwl_cfg iwl130_bgn_cfg;
98extern struct iwl_cfg iwl130_bg_cfg;
99extern struct iwl_cfg iwl2000_2bgn_cfg;
100extern struct iwl_cfg iwl2000_2bg_cfg;
101extern struct iwl_cfg iwl2030_2bgn_cfg;
102extern struct iwl_cfg iwl2030_2bg_cfg;
103extern struct iwl_cfg iwl6035_2agn_cfg;
104extern struct iwl_cfg iwl6035_2abg_cfg;
105extern struct iwl_cfg iwl6035_2bg_cfg;
106extern struct iwl_cfg iwl105_bg_cfg;
107extern struct iwl_cfg iwl105_bgn_cfg;
108extern struct iwl_cfg iwl135_bg_cfg;
109extern struct iwl_cfg iwl135_bgn_cfg;
110
111extern struct iwl_mod_params iwlagn_mod_params;
112
113extern struct ieee80211_ops iwlagn_hw_ops; 68extern struct ieee80211_ops iwlagn_hw_ops;
114 69
115int iwl_reset_ict(struct iwl_priv *priv); 70int iwl_reset_ict(struct iwl_trans *trans);
116 71
117static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd) 72static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
118{ 73{
@@ -122,10 +77,6 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
122 hdr->data_valid = 1; 77 hdr->data_valid = 1;
123} 78}
124 79
125/* tx queue */
126void iwl_free_tfds_in_queue(struct iwl_priv *priv,
127 int sta_id, int tid, int freed);
128
129/* RXON */ 80/* RXON */
130int iwlagn_set_pan_params(struct iwl_priv *priv); 81int iwlagn_set_pan_params(struct iwl_priv *priv);
131int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 82int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -147,13 +98,10 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
147 enum iwlagn_ucode_type ucode_type); 98 enum iwlagn_ucode_type ucode_type);
148 99
149/* lib */ 100/* lib */
150void iwl_check_abort_status(struct iwl_priv *priv,
151 u8 frame_count, u32 status);
152int iwlagn_hw_valid_rtc_data_addr(u32 addr); 101int iwlagn_hw_valid_rtc_data_addr(u32 addr);
153int iwlagn_send_tx_power(struct iwl_priv *priv); 102int iwlagn_send_tx_power(struct iwl_priv *priv);
154void iwlagn_temperature(struct iwl_priv *priv); 103void iwlagn_temperature(struct iwl_priv *priv);
155u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv); 104u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
156int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
157int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 105int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
158void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 106void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
159int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 107int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
@@ -165,21 +113,14 @@ void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
165 113
166 114
167/* tx */ 115/* tx */
168void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
169 int index);
170void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
171 struct ieee80211_tx_info *info);
172int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); 116int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
173int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 117int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
174 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 118 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
175int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 119int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
176 struct ieee80211_sta *sta, u16 tid); 120 struct ieee80211_sta *sta, u16 tid);
177int iwlagn_txq_check_empty(struct iwl_priv *priv,
178 int sta_id, u8 tid, int txq_id);
179void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, 121void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
180 struct iwl_rx_mem_buffer *rxb); 122 struct iwl_rx_mem_buffer *rxb);
181void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 123void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
182int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
183 124
184static inline u32 iwl_tx_status_to_mac80211(u32 status) 125static inline u32 iwl_tx_status_to_mac80211(u32 status)
185{ 126{
@@ -287,7 +228,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
287} 228}
288 229
289/* eeprom */ 230/* eeprom */
290void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv); 231void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
291void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac); 232void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
292 233
293/* notification wait support */ 234/* notification wait support */
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index f3ee1c0c004c..83aed46673e1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -60,16 +60,22 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_pci_h__ 63#ifndef __iwl_bus_h__
64#define __iwl_pci_h__ 64#define __iwl_bus_h__
65 65
66/*This file includes the declaration that are exported from the bus layer */
67
68#include <linux/types.h>
69#include <linux/spinlock.h>
70
71struct iwl_shared;
66struct iwl_bus; 72struct iwl_bus;
67 73
68/** 74/**
69 * struct iwl_bus_ops - bus specific operations 75 * struct iwl_bus_ops - bus specific operations
70 * @get_pm_support: must returns true if the bus can go to sleep 76 * @get_pm_support: must returns true if the bus can go to sleep
71 * @apm_config: will be called during the config of the APM configuration 77 * @apm_config: will be called during the config of the APM configuration
72 * @set_drv_data: set the drv_data pointer to the bus layer 78 * @set_drv_data: set the shared data pointer to the bus layer
73 * @get_hw_id: prints the hw_id in the provided buffer 79 * @get_hw_id: prints the hw_id in the provided buffer
74 * @write8: write a byte to register at offset ofs 80 * @write8: write a byte to register at offset ofs
75 * @write32: write a dword to register at offset ofs 81 * @write32: write a dword to register at offset ofs
@@ -78,20 +84,29 @@ struct iwl_bus;
78struct iwl_bus_ops { 84struct iwl_bus_ops {
79 bool (*get_pm_support)(struct iwl_bus *bus); 85 bool (*get_pm_support)(struct iwl_bus *bus);
80 void (*apm_config)(struct iwl_bus *bus); 86 void (*apm_config)(struct iwl_bus *bus);
81 void (*set_drv_data)(struct iwl_bus *bus, void *drv_data); 87 void (*set_drv_data)(struct iwl_bus *bus, struct iwl_shared *shrd);
82 void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len); 88 void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
83 void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val); 89 void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
84 void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val); 90 void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
85 u32 (*read32)(struct iwl_bus *bus, u32 ofs); 91 u32 (*read32)(struct iwl_bus *bus, u32 ofs);
86}; 92};
87 93
94/**
95 * struct iwl_bus - bus common data
96 * @dev - pointer to struct device * that represent the device
97 * @ops - pointer to iwl_bus_ops
98 * @shrd - pointer to iwl_shared which holds shared data from the upper layer
99 * @irq - the irq number for the device
100 * @reg_lock - protect hw register access
101 */
88struct iwl_bus { 102struct iwl_bus {
89 /* Common data to all buses */ 103 /* Common data to all buses */
90 void *drv_data; /* driver's context */
91 struct device *dev; 104 struct device *dev;
92 struct iwl_bus_ops *ops; 105 const struct iwl_bus_ops *ops;
106 struct iwl_shared *shrd;
93 107
94 unsigned int irq; 108 unsigned int irq;
109 spinlock_t reg_lock;
95 110
96 /* pointer to bus specific struct */ 111 /* pointer to bus specific struct */
97 /*Ensure that this pointer will always be aligned to sizeof pointer */ 112 /*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -108,9 +123,10 @@ static inline void bus_apm_config(struct iwl_bus *bus)
108 bus->ops->apm_config(bus); 123 bus->ops->apm_config(bus);
109} 124}
110 125
111static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data) 126static inline void bus_set_drv_data(struct iwl_bus *bus,
127 struct iwl_shared *shrd)
112{ 128{
113 bus->ops->set_drv_data(bus, drv_data); 129 bus->ops->set_drv_data(bus, shrd);
114} 130}
115 131
116static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len) 132static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
@@ -136,4 +152,4 @@ static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
136int __must_check iwl_pci_register_driver(void); 152int __must_check iwl_pci_register_driver(void);
137void iwl_pci_unregister_driver(void); 153void iwl_pci_unregister_driver(void);
138 154
139#endif 155#endif /* __iwl_bus_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 0016c61b3000..cb06196e0e80 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -69,6 +69,9 @@
69#ifndef __iwl_commands_h__ 69#ifndef __iwl_commands_h__
70#define __iwl_commands_h__ 70#define __iwl_commands_h__
71 71
72#include <linux/etherdevice.h>
73#include <linux/ieee80211.h>
74
72struct iwl_priv; 75struct iwl_priv;
73 76
74/* uCode version contains 4 values: Major/Minor/API/Serial */ 77/* uCode version contains 4 values: Major/Minor/API/Serial */
@@ -670,7 +673,6 @@ struct iwl_rxon_assoc_cmd {
670 673
671#define IWL_CONN_MAX_LISTEN_INTERVAL 10 674#define IWL_CONN_MAX_LISTEN_INTERVAL 10
672#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */ 675#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
673#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
674 676
675/* 677/*
676 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response) 678 * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
@@ -806,6 +808,7 @@ struct iwl_qosparam_cmd {
806#define IWLAGN_STATION_COUNT 16 808#define IWLAGN_STATION_COUNT 16
807 809
808#define IWL_INVALID_STATION 255 810#define IWL_INVALID_STATION 255
811#define IWL_MAX_TID_COUNT 9
809 812
810#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2) 813#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
811#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8) 814#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -3909,6 +3912,7 @@ struct iwlagn_wowlan_kek_kck_material_cmd {
3909 * Union of all expected notifications/responses: 3912 * Union of all expected notifications/responses:
3910 * 3913 *
3911 *****************************************************************************/ 3914 *****************************************************************************/
3915#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
3912 3916
3913struct iwl_rx_packet { 3917struct iwl_rx_packet {
3914 /* 3918 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index e269987cd64c..20dd1a5506ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -42,22 +42,21 @@
42#include "iwl-sta.h" 42#include "iwl-sta.h"
43#include "iwl-agn.h" 43#include "iwl-agn.h"
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-shared.h"
45#include "iwl-agn.h" 46#include "iwl-agn.h"
46#include "iwl-trans.h" 47#include "iwl-trans.h"
47 48
48u32 iwl_debug_level;
49
50const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 49const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
51 50
52#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 51#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
53#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 52#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
54static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 53static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
55 struct ieee80211_sta_ht_cap *ht_info, 54 struct ieee80211_sta_ht_cap *ht_info,
56 enum ieee80211_band band) 55 enum ieee80211_band band)
57{ 56{
58 u16 max_bit_rate = 0; 57 u16 max_bit_rate = 0;
59 u8 rx_chains_num = priv->hw_params.rx_chains_num; 58 u8 rx_chains_num = hw_params(priv).rx_chains_num;
60 u8 tx_chains_num = priv->hw_params.tx_chains_num; 59 u8 tx_chains_num = hw_params(priv).tx_chains_num;
61 60
62 ht_info->cap = 0; 61 ht_info->cap = 0;
63 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 62 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
@@ -69,7 +68,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
69 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 68 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
70 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 69 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
71 max_bit_rate = MAX_BIT_RATE_20_MHZ; 70 max_bit_rate = MAX_BIT_RATE_20_MHZ;
72 if (priv->hw_params.ht40_channel & BIT(band)) { 71 if (hw_params(priv).ht40_channel & BIT(band)) {
73 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 72 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
74 ht_info->cap |= IEEE80211_HT_CAP_SGI_40; 73 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
75 ht_info->mcs.rx_mask[4] = 0x01; 74 ht_info->mcs.rx_mask[4] = 0x01;
@@ -107,9 +106,9 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
107} 106}
108 107
109/** 108/**
110 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom 109 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
111 */ 110 */
112int iwlcore_init_geos(struct iwl_priv *priv) 111int iwl_init_geos(struct iwl_priv *priv)
113{ 112{
114 struct iwl_channel_info *ch; 113 struct iwl_channel_info *ch;
115 struct ieee80211_supported_band *sband; 114 struct ieee80211_supported_band *sband;
@@ -122,7 +121,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
122 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates || 121 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
123 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) { 122 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
124 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n"); 123 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
125 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 124 set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
126 return 0; 125 return 0;
127 } 126 }
128 127
@@ -146,7 +145,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
146 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE; 145 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
147 146
148 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) 147 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
149 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap, 148 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
150 IEEE80211_BAND_5GHZ); 149 IEEE80211_BAND_5GHZ);
151 150
152 sband = &priv->bands[IEEE80211_BAND_2GHZ]; 151 sband = &priv->bands[IEEE80211_BAND_2GHZ];
@@ -156,7 +155,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
156 sband->n_bitrates = IWL_RATE_COUNT_LEGACY; 155 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
157 156
158 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE) 157 if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
159 iwlcore_init_ht_hw_capab(priv, &sband->ht_cap, 158 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
160 IEEE80211_BAND_2GHZ); 159 IEEE80211_BAND_2GHZ);
161 160
162 priv->ieee_channels = channels; 161 priv->ieee_channels = channels;
@@ -222,19 +221,19 @@ int iwlcore_init_geos(struct iwl_priv *priv)
222 priv->bands[IEEE80211_BAND_2GHZ].n_channels, 221 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
223 priv->bands[IEEE80211_BAND_5GHZ].n_channels); 222 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
224 223
225 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 224 set_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
226 225
227 return 0; 226 return 0;
228} 227}
229 228
230/* 229/*
231 * iwlcore_free_geos - undo allocations in iwlcore_init_geos 230 * iwl_free_geos - undo allocations in iwl_init_geos
232 */ 231 */
233void iwlcore_free_geos(struct iwl_priv *priv) 232void iwl_free_geos(struct iwl_priv *priv)
234{ 233{
235 kfree(priv->ieee_channels); 234 kfree(priv->ieee_channels);
236 kfree(priv->ieee_rates); 235 kfree(priv->ieee_rates);
237 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 236 clear_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status);
238} 237}
239 238
240static bool iwl_is_channel_extension(struct iwl_priv *priv, 239static bool iwl_is_channel_extension(struct iwl_priv *priv,
@@ -326,7 +325,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
326 325
327 conf = ieee80211_get_hw_conf(priv->hw); 326 conf = ieee80211_get_hw_conf(priv->hw);
328 327
329 lockdep_assert_held(&priv->mutex); 328 lockdep_assert_held(&priv->shrd->mutex);
330 329
331 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); 330 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
332 331
@@ -360,7 +359,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
360 beacon_int = le16_to_cpu(ctx->timing.beacon_interval); 359 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
361 } else { 360 } else {
362 beacon_int = iwl_adjust_beacon_interval(beacon_int, 361 beacon_int = iwl_adjust_beacon_interval(beacon_int,
363 priv->hw_params.max_beacon_itrvl * TIME_UNIT); 362 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
364 ctx->timing.beacon_interval = cpu_to_le16(beacon_int); 363 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
365 } 364 }
366 365
@@ -379,7 +378,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
379 le32_to_cpu(ctx->timing.beacon_init_val), 378 le32_to_cpu(ctx->timing.beacon_init_val),
380 le16_to_cpu(ctx->timing.atim_window)); 379 le16_to_cpu(ctx->timing.atim_window));
381 380
382 return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd, 381 return iwl_trans_send_cmd_pdu(trans(priv), ctx->rxon_timing_cmd,
383 CMD_SYNC, sizeof(ctx->timing), &ctx->timing); 382 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
384} 383}
385 384
@@ -809,10 +808,11 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
809 */ 808 */
810 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 809 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
811 810
812 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 811 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
813 return; 812 return;
814 813
815 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 814 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
815 &priv->shrd->status))
816 ieee80211_chswitch_done(ctx->vif, is_success); 816 ieee80211_chswitch_done(ctx->vif, is_success);
817} 817}
818 818
@@ -857,16 +857,16 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
857 unsigned long reload_jiffies; 857 unsigned long reload_jiffies;
858 858
859 /* Set the FW error flag -- cleared on iwl_down */ 859 /* Set the FW error flag -- cleared on iwl_down */
860 set_bit(STATUS_FW_ERROR, &priv->status); 860 set_bit(STATUS_FW_ERROR, &priv->shrd->status);
861 861
862 /* Cancel currently queued command. */ 862 /* Cancel currently queued command. */
863 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 863 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
864 864
865 iwlagn_abort_notification_waits(priv); 865 iwlagn_abort_notification_waits(priv);
866 866
867 /* Keep the restart process from trying to send host 867 /* Keep the restart process from trying to send host
868 * commands by clearing the ready bit */ 868 * commands by clearing the ready bit */
869 clear_bit(STATUS_READY, &priv->status); 869 clear_bit(STATUS_READY, &priv->shrd->status);
870 870
871 wake_up_interruptible(&priv->wait_command_queue); 871 wake_up_interruptible(&priv->wait_command_queue);
872 872
@@ -891,63 +891,26 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
891 priv->reload_count = 0; 891 priv->reload_count = 0;
892 } 892 }
893 893
894 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { 894 if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
895 if (iwlagn_mod_params.restart_fw) { 895 if (iwlagn_mod_params.restart_fw) {
896 IWL_DEBUG(priv, IWL_DL_FW_ERRORS, 896 IWL_DEBUG_FW_ERRORS(priv,
897 "Restarting adapter due to uCode error.\n"); 897 "Restarting adapter due to uCode error.\n");
898 queue_work(priv->workqueue, &priv->restart); 898 queue_work(priv->shrd->workqueue, &priv->restart);
899 } else 899 } else
900 IWL_DEBUG(priv, IWL_DL_FW_ERRORS, 900 IWL_DEBUG_FW_ERRORS(priv,
901 "Detected FW error, but not restarting\n"); 901 "Detected FW error, but not restarting\n");
902 } 902 }
903} 903}
904 904
905/**
906 * iwl_irq_handle_error - called for HW or SW error interrupt from card
907 */
908void iwl_irq_handle_error(struct iwl_priv *priv)
909{
910 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
911 if (priv->cfg->internal_wimax_coex &&
912 (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
913 APMS_CLK_VAL_MRB_FUNC_MODE) ||
914 (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
915 APMG_PS_CTRL_VAL_RESET_REQ))) {
916 /*
917 * Keep the restart process from trying to send host
918 * commands by clearing the ready bit.
919 */
920 clear_bit(STATUS_READY, &priv->status);
921 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
922 wake_up_interruptible(&priv->wait_command_queue);
923 IWL_ERR(priv, "RF is used by WiMAX\n");
924 return;
925 }
926
927 IWL_ERR(priv, "Loaded firmware version: %s\n",
928 priv->hw->wiphy->fw_version);
929
930 iwl_dump_nic_error_log(priv);
931 iwl_dump_csr(priv);
932 iwl_dump_fh(priv, NULL, false);
933 iwl_dump_nic_event_log(priv, false, NULL, false);
934#ifdef CONFIG_IWLWIFI_DEBUG
935 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
936 iwl_print_rx_config_cmd(priv,
937 &priv->contexts[IWL_RXON_CTX_BSS]);
938#endif
939
940 iwlagn_fw_error(priv, false);
941}
942
943static int iwl_apm_stop_master(struct iwl_priv *priv) 905static int iwl_apm_stop_master(struct iwl_priv *priv)
944{ 906{
945 int ret = 0; 907 int ret = 0;
946 908
947 /* stop device's busmaster DMA activity */ 909 /* stop device's busmaster DMA activity */
948 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 910 iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
949 911
950 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED, 912 ret = iwl_poll_bit(bus(priv), CSR_RESET,
913 CSR_RESET_REG_FLAG_MASTER_DISABLED,
951 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 914 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
952 if (ret) 915 if (ret)
953 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n"); 916 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
@@ -961,13 +924,13 @@ void iwl_apm_stop(struct iwl_priv *priv)
961{ 924{
962 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n"); 925 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
963 926
964 clear_bit(STATUS_DEVICE_ENABLED, &priv->status); 927 clear_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
965 928
966 /* Stop device's DMA activity */ 929 /* Stop device's DMA activity */
967 iwl_apm_stop_master(priv); 930 iwl_apm_stop_master(priv);
968 931
969 /* Reset the entire device */ 932 /* Reset the entire device */
970 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 933 iwl_set_bit(bus(priv), CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
971 934
972 udelay(10); 935 udelay(10);
973 936
@@ -975,7 +938,7 @@ void iwl_apm_stop(struct iwl_priv *priv)
975 * Clear "initialization complete" bit to move adapter from 938 * Clear "initialization complete" bit to move adapter from
976 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 939 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
977 */ 940 */
978 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 941 iwl_clear_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
979} 942}
980 943
981 944
@@ -995,45 +958,45 @@ int iwl_apm_init(struct iwl_priv *priv)
995 */ 958 */
996 959
997 /* Disable L0S exit timer (platform NMI Work/Around) */ 960 /* Disable L0S exit timer (platform NMI Work/Around) */
998 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 961 iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
999 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 962 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1000 963
1001 /* 964 /*
1002 * Disable L0s without affecting L1; 965 * Disable L0s without affecting L1;
1003 * don't wait for ICH L0s (ICH bug W/A) 966 * don't wait for ICH L0s (ICH bug W/A)
1004 */ 967 */
1005 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 968 iwl_set_bit(bus(priv), CSR_GIO_CHICKEN_BITS,
1006 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 969 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1007 970
1008 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 971 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1009 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 972 iwl_set_bit(bus(priv), CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1010 973
1011 /* 974 /*
1012 * Enable HAP INTA (interrupt from management bus) to 975 * Enable HAP INTA (interrupt from management bus) to
1013 * wake device's PCI Express link L1a -> L0s 976 * wake device's PCI Express link L1a -> L0s
1014 */ 977 */
1015 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 978 iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
1016 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 979 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1017 980
1018 bus_apm_config(priv->bus); 981 bus_apm_config(priv->bus);
1019 982
1020 /* Configure analog phase-lock-loop before activating to D0A */ 983 /* Configure analog phase-lock-loop before activating to D0A */
1021 if (priv->cfg->base_params->pll_cfg_val) 984 if (priv->cfg->base_params->pll_cfg_val)
1022 iwl_set_bit(priv, CSR_ANA_PLL_CFG, 985 iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
1023 priv->cfg->base_params->pll_cfg_val); 986 priv->cfg->base_params->pll_cfg_val);
1024 987
1025 /* 988 /*
1026 * Set "initialization complete" bit to move adapter from 989 * Set "initialization complete" bit to move adapter from
1027 * D0U* --> D0A* (powered-up active) state. 990 * D0U* --> D0A* (powered-up active) state.
1028 */ 991 */
1029 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 992 iwl_set_bit(bus(priv), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1030 993
1031 /* 994 /*
1032 * Wait for clock stabilization; once stabilized, access to 995 * Wait for clock stabilization; once stabilized, access to
1033 * device-internal resources is supported, e.g. iwl_write_prph() 996 * device-internal resources is supported, e.g. iwl_write_prph()
1034 * and accesses to uCode SRAM. 997 * and accesses to uCode SRAM.
1035 */ 998 */
1036 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 999 ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
1037 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1000 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1038 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 1001 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1039 if (ret < 0) { 1002 if (ret < 0) {
@@ -1048,14 +1011,14 @@ int iwl_apm_init(struct iwl_priv *priv)
1048 * do not disable clocks. This preserves any hardware bits already 1011 * do not disable clocks. This preserves any hardware bits already
1049 * set by default in "CLK_CTRL_REG" after reset. 1012 * set by default in "CLK_CTRL_REG" after reset.
1050 */ 1013 */
1051 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT); 1014 iwl_write_prph(bus(priv), APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1052 udelay(20); 1015 udelay(20);
1053 1016
1054 /* Disable L1-Active */ 1017 /* Disable L1-Active */
1055 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 1018 iwl_set_bits_prph(bus(priv), APMG_PCIDEV_STT_REG,
1056 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1019 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1057 1020
1058 set_bit(STATUS_DEVICE_ENABLED, &priv->status); 1021 set_bit(STATUS_DEVICE_ENABLED, &priv->shrd->status);
1059 1022
1060out: 1023out:
1061 return ret; 1024 return ret;
@@ -1069,7 +1032,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1069 bool defer; 1032 bool defer;
1070 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1033 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1071 1034
1072 lockdep_assert_held(&priv->mutex); 1035 lockdep_assert_held(&priv->shrd->mutex);
1073 1036
1074 if (priv->tx_power_user_lmt == tx_power && !force) 1037 if (priv->tx_power_user_lmt == tx_power && !force)
1075 return 0; 1038 return 0;
@@ -1089,7 +1052,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1089 return -EINVAL; 1052 return -EINVAL;
1090 } 1053 }
1091 1054
1092 if (!iwl_is_ready_rf(priv)) 1055 if (!iwl_is_ready_rf(priv->shrd))
1093 return -EIO; 1056 return -EIO;
1094 1057
1095 /* scan complete and commit_rxon use tx_power_next value, 1058 /* scan complete and commit_rxon use tx_power_next value,
@@ -1097,7 +1060,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1097 priv->tx_power_next = tx_power; 1060 priv->tx_power_next = tx_power;
1098 1061
1099 /* do not set tx power when scanning or channel changing */ 1062 /* do not set tx power when scanning or channel changing */
1100 defer = test_bit(STATUS_SCANNING, &priv->status) || 1063 defer = test_bit(STATUS_SCANNING, &priv->shrd->status) ||
1101 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); 1064 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
1102 if (defer && !force) { 1065 if (defer && !force) {
1103 IWL_DEBUG_INFO(priv, "Deferring tx power set\n"); 1066 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
@@ -1135,7 +1098,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
1135 IWL_DEBUG_INFO(priv, "BT coex %s\n", 1098 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1136 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 1099 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1137 1100
1138 if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG, 1101 if (iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
1139 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd)) 1102 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
1140 IWL_ERR(priv, "failed to send BT Coex Config\n"); 1103 IWL_ERR(priv, "failed to send BT Coex Config\n");
1141} 1104}
@@ -1148,22 +1111,17 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1148 }; 1111 };
1149 1112
1150 if (flags & CMD_ASYNC) 1113 if (flags & CMD_ASYNC)
1151 return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD, 1114 return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
1152 CMD_ASYNC, 1115 CMD_ASYNC,
1153 sizeof(struct iwl_statistics_cmd), 1116 sizeof(struct iwl_statistics_cmd),
1154 &statistics_cmd); 1117 &statistics_cmd);
1155 else 1118 else
1156 return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD, 1119 return iwl_trans_send_cmd_pdu(trans(priv), REPLY_STATISTICS_CMD,
1157 CMD_SYNC, 1120 CMD_SYNC,
1158 sizeof(struct iwl_statistics_cmd), 1121 sizeof(struct iwl_statistics_cmd),
1159 &statistics_cmd); 1122 &statistics_cmd);
1160} 1123}
1161 1124
1162void iwl_clear_isr_stats(struct iwl_priv *priv)
1163{
1164 memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
1165}
1166
1167int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 1125int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1168 const struct ieee80211_tx_queue_params *params) 1126 const struct ieee80211_tx_queue_params *params)
1169{ 1127{
@@ -1174,7 +1132,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1174 1132
1175 IWL_DEBUG_MAC80211(priv, "enter\n"); 1133 IWL_DEBUG_MAC80211(priv, "enter\n");
1176 1134
1177 if (!iwl_is_ready_rf(priv)) { 1135 if (!iwl_is_ready_rf(priv->shrd)) {
1178 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n"); 1136 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1179 return -EIO; 1137 return -EIO;
1180 } 1138 }
@@ -1186,7 +1144,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1186 1144
1187 q = AC_NUM - 1 - queue; 1145 q = AC_NUM - 1 - queue;
1188 1146
1189 spin_lock_irqsave(&priv->lock, flags); 1147 spin_lock_irqsave(&priv->shrd->lock, flags);
1190 1148
1191 /* 1149 /*
1192 * MULTI-FIXME 1150 * MULTI-FIXME
@@ -1204,7 +1162,7 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1204 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; 1162 ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
1205 } 1163 }
1206 1164
1207 spin_unlock_irqrestore(&priv->lock, flags); 1165 spin_unlock_irqrestore(&priv->shrd->lock, flags);
1208 1166
1209 IWL_DEBUG_MAC80211(priv, "leave\n"); 1167 IWL_DEBUG_MAC80211(priv, "leave\n");
1210 return 0; 1168 return 0;
@@ -1232,7 +1190,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
1232 struct ieee80211_vif *vif = ctx->vif; 1190 struct ieee80211_vif *vif = ctx->vif;
1233 int err; 1191 int err;
1234 1192
1235 lockdep_assert_held(&priv->mutex); 1193 lockdep_assert_held(&priv->shrd->mutex);
1236 1194
1237 /* 1195 /*
1238 * This variable will be correct only when there's just 1196 * This variable will be correct only when there's just
@@ -1276,11 +1234,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1276 1234
1277 cancel_delayed_work_sync(&priv->hw_roc_disable_work); 1235 cancel_delayed_work_sync(&priv->hw_roc_disable_work);
1278 1236
1279 mutex_lock(&priv->mutex); 1237 mutex_lock(&priv->shrd->mutex);
1280 1238
1281 iwlagn_disable_roc(priv); 1239 iwlagn_disable_roc(priv);
1282 1240
1283 if (!iwl_is_ready_rf(priv)) { 1241 if (!iwl_is_ready_rf(priv->shrd)) {
1284 IWL_WARN(priv, "Try to add interface when device not ready\n"); 1242 IWL_WARN(priv, "Try to add interface when device not ready\n");
1285 err = -EINVAL; 1243 err = -EINVAL;
1286 goto out; 1244 goto out;
@@ -1323,7 +1281,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1323 ctx->vif = NULL; 1281 ctx->vif = NULL;
1324 priv->iw_mode = NL80211_IFTYPE_STATION; 1282 priv->iw_mode = NL80211_IFTYPE_STATION;
1325 out: 1283 out:
1326 mutex_unlock(&priv->mutex); 1284 mutex_unlock(&priv->shrd->mutex);
1327 1285
1328 IWL_DEBUG_MAC80211(priv, "leave\n"); 1286 IWL_DEBUG_MAC80211(priv, "leave\n");
1329 return err; 1287 return err;
@@ -1335,7 +1293,7 @@ static void iwl_teardown_interface(struct iwl_priv *priv,
1335{ 1293{
1336 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1294 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1337 1295
1338 lockdep_assert_held(&priv->mutex); 1296 lockdep_assert_held(&priv->shrd->mutex);
1339 1297
1340 if (priv->scan_vif == vif) { 1298 if (priv->scan_vif == vif) {
1341 iwl_scan_cancel_timeout(priv, 200); 1299 iwl_scan_cancel_timeout(priv, 200);
@@ -1367,14 +1325,14 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1367 1325
1368 IWL_DEBUG_MAC80211(priv, "enter\n"); 1326 IWL_DEBUG_MAC80211(priv, "enter\n");
1369 1327
1370 mutex_lock(&priv->mutex); 1328 mutex_lock(&priv->shrd->mutex);
1371 1329
1372 WARN_ON(ctx->vif != vif); 1330 WARN_ON(ctx->vif != vif);
1373 ctx->vif = NULL; 1331 ctx->vif = NULL;
1374 1332
1375 iwl_teardown_interface(priv, vif, false); 1333 iwl_teardown_interface(priv, vif, false);
1376 1334
1377 mutex_unlock(&priv->mutex); 1335 mutex_unlock(&priv->shrd->mutex);
1378 1336
1379 IWL_DEBUG_MAC80211(priv, "leave\n"); 1337 IWL_DEBUG_MAC80211(priv, "leave\n");
1380 1338
@@ -1398,7 +1356,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1398{ 1356{
1399 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE; 1357 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1400 1358
1401 if (iwl_debug_level & IWL_DL_TX) { 1359 if (iwl_get_debug_level(priv->shrd) & IWL_DL_TX) {
1402 if (!priv->tx_traffic) { 1360 if (!priv->tx_traffic) {
1403 priv->tx_traffic = 1361 priv->tx_traffic =
1404 kzalloc(traffic_size, GFP_KERNEL); 1362 kzalloc(traffic_size, GFP_KERNEL);
@@ -1406,7 +1364,7 @@ int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1406 return -ENOMEM; 1364 return -ENOMEM;
1407 } 1365 }
1408 } 1366 }
1409 if (iwl_debug_level & IWL_DL_RX) { 1367 if (iwl_get_debug_level(priv->shrd) & IWL_DL_RX) {
1410 if (!priv->rx_traffic) { 1368 if (!priv->rx_traffic) {
1411 priv->rx_traffic = 1369 priv->rx_traffic =
1412 kzalloc(traffic_size, GFP_KERNEL); 1370 kzalloc(traffic_size, GFP_KERNEL);
@@ -1433,7 +1391,7 @@ void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1433 __le16 fc; 1391 __le16 fc;
1434 u16 len; 1392 u16 len;
1435 1393
1436 if (likely(!(iwl_debug_level & IWL_DL_TX))) 1394 if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_TX)))
1437 return; 1395 return;
1438 1396
1439 if (!priv->tx_traffic) 1397 if (!priv->tx_traffic)
@@ -1457,7 +1415,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1457 __le16 fc; 1415 __le16 fc;
1458 u16 len; 1416 u16 len;
1459 1417
1460 if (likely(!(iwl_debug_level & IWL_DL_RX))) 1418 if (likely(!(iwl_get_debug_level(priv->shrd) & IWL_DL_RX)))
1461 return; 1419 return;
1462 1420
1463 if (!priv->rx_traffic) 1421 if (!priv->rx_traffic)
@@ -1614,7 +1572,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1614 1572
1615static void iwl_force_rf_reset(struct iwl_priv *priv) 1573static void iwl_force_rf_reset(struct iwl_priv *priv)
1616{ 1574{
1617 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1575 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
1618 return; 1576 return;
1619 1577
1620 if (!iwl_is_any_associated(priv)) { 1578 if (!iwl_is_any_associated(priv)) {
@@ -1639,7 +1597,7 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
1639{ 1597{
1640 struct iwl_force_reset *force_reset; 1598 struct iwl_force_reset *force_reset;
1641 1599
1642 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1600 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
1643 return -EINVAL; 1601 return -EINVAL;
1644 1602
1645 if (mode >= IWL_MAX_FORCE_RESET) { 1603 if (mode >= IWL_MAX_FORCE_RESET) {
@@ -1698,9 +1656,9 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1698 1656
1699 newtype = ieee80211_iftype_p2p(newtype, newp2p); 1657 newtype = ieee80211_iftype_p2p(newtype, newp2p);
1700 1658
1701 mutex_lock(&priv->mutex); 1659 mutex_lock(&priv->shrd->mutex);
1702 1660
1703 if (!ctx->vif || !iwl_is_ready_rf(priv)) { 1661 if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
1704 /* 1662 /*
1705 * Huh? But wait ... this can maybe happen when 1663 * Huh? But wait ... this can maybe happen when
1706 * we're in the middle of a firmware restart! 1664 * we're in the middle of a firmware restart!
@@ -1762,36 +1720,16 @@ int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1762 err = 0; 1720 err = 0;
1763 1721
1764 out: 1722 out:
1765 mutex_unlock(&priv->mutex); 1723 mutex_unlock(&priv->shrd->mutex);
1766 return err; 1724 return err;
1767} 1725}
1768 1726
1769/* 1727static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
1770 * On every watchdog tick we check (latest) time stamp. If it does not
1771 * change during timeout period and queue is not empty we reset firmware.
1772 */
1773static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
1774{ 1728{
1775 struct iwl_tx_queue *txq = &priv->txq[cnt]; 1729 if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
1776 struct iwl_queue *q = &txq->q; 1730 int ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1777 unsigned long timeout;
1778 int ret;
1779
1780 if (q->read_ptr == q->write_ptr) {
1781 txq->time_stamp = jiffies;
1782 return 0;
1783 }
1784
1785 timeout = txq->time_stamp +
1786 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
1787
1788 if (time_after(jiffies, timeout)) {
1789 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
1790 q->id, priv->cfg->base_params->wd_timeout);
1791 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1792 return (ret == -EAGAIN) ? 0 : 1; 1731 return (ret == -EAGAIN) ? 0 : 1;
1793 } 1732 }
1794
1795 return 0; 1733 return 0;
1796} 1734}
1797 1735
@@ -1811,7 +1749,7 @@ void iwl_bg_watchdog(unsigned long data)
1811 int cnt; 1749 int cnt;
1812 unsigned long timeout; 1750 unsigned long timeout;
1813 1751
1814 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1752 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
1815 return; 1753 return;
1816 1754
1817 timeout = priv->cfg->base_params->wd_timeout; 1755 timeout = priv->cfg->base_params->wd_timeout;
@@ -1819,14 +1757,14 @@ void iwl_bg_watchdog(unsigned long data)
1819 return; 1757 return;
1820 1758
1821 /* monitor and check for stuck cmd queue */ 1759 /* monitor and check for stuck cmd queue */
1822 if (iwl_check_stuck_queue(priv, priv->cmd_queue)) 1760 if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue))
1823 return; 1761 return;
1824 1762
1825 /* monitor and check for other stuck queues */ 1763 /* monitor and check for other stuck queues */
1826 if (iwl_is_any_associated(priv)) { 1764 if (iwl_is_any_associated(priv)) {
1827 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) { 1765 for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) {
1828 /* skip as we already checked the command queue */ 1766 /* skip as we already checked the command queue */
1829 if (cnt == priv->cmd_queue) 1767 if (cnt == priv->shrd->cmd_queue)
1830 continue; 1768 continue;
1831 if (iwl_check_stuck_queue(priv, cnt)) 1769 if (iwl_check_stuck_queue(priv, cnt))
1832 return; 1770 return;
@@ -1865,12 +1803,12 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
1865 1803
1866 quot = (usec / interval) & 1804 quot = (usec / interval) &
1867 (iwl_beacon_time_mask_high(priv, 1805 (iwl_beacon_time_mask_high(priv,
1868 priv->hw_params.beacon_time_tsf_bits) >> 1806 hw_params(priv).beacon_time_tsf_bits) >>
1869 priv->hw_params.beacon_time_tsf_bits); 1807 hw_params(priv).beacon_time_tsf_bits);
1870 rem = (usec % interval) & iwl_beacon_time_mask_low(priv, 1808 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
1871 priv->hw_params.beacon_time_tsf_bits); 1809 hw_params(priv).beacon_time_tsf_bits);
1872 1810
1873 return (quot << priv->hw_params.beacon_time_tsf_bits) + rem; 1811 return (quot << hw_params(priv).beacon_time_tsf_bits) + rem;
1874} 1812}
1875 1813
1876/* base is usually what we get from ucode with each received frame, 1814/* base is usually what we get from ucode with each received frame,
@@ -1880,64 +1818,50 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
1880 u32 addon, u32 beacon_interval) 1818 u32 addon, u32 beacon_interval)
1881{ 1819{
1882 u32 base_low = base & iwl_beacon_time_mask_low(priv, 1820 u32 base_low = base & iwl_beacon_time_mask_low(priv,
1883 priv->hw_params.beacon_time_tsf_bits); 1821 hw_params(priv).beacon_time_tsf_bits);
1884 u32 addon_low = addon & iwl_beacon_time_mask_low(priv, 1822 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
1885 priv->hw_params.beacon_time_tsf_bits); 1823 hw_params(priv).beacon_time_tsf_bits);
1886 u32 interval = beacon_interval * TIME_UNIT; 1824 u32 interval = beacon_interval * TIME_UNIT;
1887 u32 res = (base & iwl_beacon_time_mask_high(priv, 1825 u32 res = (base & iwl_beacon_time_mask_high(priv,
1888 priv->hw_params.beacon_time_tsf_bits)) + 1826 hw_params(priv).beacon_time_tsf_bits)) +
1889 (addon & iwl_beacon_time_mask_high(priv, 1827 (addon & iwl_beacon_time_mask_high(priv,
1890 priv->hw_params.beacon_time_tsf_bits)); 1828 hw_params(priv).beacon_time_tsf_bits));
1891 1829
1892 if (base_low > addon_low) 1830 if (base_low > addon_low)
1893 res += base_low - addon_low; 1831 res += base_low - addon_low;
1894 else if (base_low < addon_low) { 1832 else if (base_low < addon_low) {
1895 res += interval + base_low - addon_low; 1833 res += interval + base_low - addon_low;
1896 res += (1 << priv->hw_params.beacon_time_tsf_bits); 1834 res += (1 << hw_params(priv).beacon_time_tsf_bits);
1897 } else 1835 } else
1898 res += (1 << priv->hw_params.beacon_time_tsf_bits); 1836 res += (1 << hw_params(priv).beacon_time_tsf_bits);
1899 1837
1900 return cpu_to_le32(res); 1838 return cpu_to_le32(res);
1901} 1839}
1902 1840
1903#ifdef CONFIG_PM 1841void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
1904 1842 enum iwl_rxon_context_id ctx,
1905int iwl_suspend(struct iwl_priv *priv) 1843 u8 sta_id, u8 tid)
1906{ 1844{
1907 /* 1845 struct ieee80211_vif *vif = priv->contexts[ctx].vif;
1908 * This function is called when system goes into suspend state 1846 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1909 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1910 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1911 * it will not call apm_ops.stop() to stop the DMA operation.
1912 * Calling apm_ops.stop here to make sure we stop the DMA.
1913 *
1914 * But of course ... if we have configured WoWLAN then we did other
1915 * things already :-)
1916 */
1917 if (!priv->wowlan)
1918 iwl_apm_stop(priv);
1919 1847
1920 return 0; 1848 if (ctx == NUM_IWL_RXON_CTX)
1849 ctx = priv->stations[sta_id].ctxid;
1850 vif = priv->contexts[ctx].vif;
1851
1852 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
1921} 1853}
1922 1854
1923int iwl_resume(struct iwl_priv *priv) 1855void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
1856 enum iwl_rxon_context_id ctx,
1857 u8 sta_id, u8 tid)
1924{ 1858{
1925 bool hw_rfkill = false; 1859 struct ieee80211_vif *vif;
1926 1860 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1927 iwl_enable_interrupts(priv);
1928 1861
1929 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 1862 if (ctx == NUM_IWL_RXON_CTX)
1930 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 1863 ctx = priv->stations[sta_id].ctxid;
1931 hw_rfkill = true; 1864 vif = priv->contexts[ctx].vif;
1932 1865
1933 if (hw_rfkill) 1866 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
1934 set_bit(STATUS_RF_KILL_HW, &priv->status);
1935 else
1936 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1937
1938 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
1939
1940 return 0;
1941} 1867}
1942
1943#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 42bcb469d32c..2ea8a2e0dfbc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -71,11 +71,6 @@
71struct iwl_host_cmd; 71struct iwl_host_cmd;
72struct iwl_cmd; 72struct iwl_cmd;
73 73
74
75#define IWLWIFI_VERSION "in-tree:"
76#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
77#define DRV_AUTHOR "<ilw@linux.intel.com>"
78
79#define TIME_UNIT 1024 74#define TIME_UNIT 1024
80 75
81#define IWL_CMD(x) case x: return #x 76#define IWL_CMD(x) case x: return #x
@@ -101,23 +96,6 @@ struct iwl_lib_ops {
101 void (*temperature)(struct iwl_priv *priv); 96 void (*temperature)(struct iwl_priv *priv);
102}; 97};
103 98
104struct iwl_mod_params {
105 int sw_crypto; /* def: 0 = using hardware encryption */
106 int num_of_queues; /* def: HW dependent */
107 int disable_11n; /* def: 0 = 11n capabilities enabled */
108 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
109 int antenna; /* def: 0 = both antennas (use diversity) */
110 int restart_fw; /* def: 1 = restart firmware */
111 bool plcp_check; /* def: true = enable plcp health check */
112 bool ack_check; /* def: false = disable ack health check */
113 bool wd_disable; /* def: false = enable stuck queue check */
114 bool bt_coex_active; /* def: true = enable bt coex */
115 int led_mode; /* def: 0 = system default */
116 bool no_sleep_autoadjust; /* def: true = disable autoadjust */
117 bool power_save; /* def: false = disable power save */
118 int power_level; /* def: 1 = power level */
119};
120
121/* 99/*
122 * @max_ll_items: max number of OTP blocks 100 * @max_ll_items: max number of OTP blocks
123 * @shadow_ram_support: shadow support for OTP memory 101 * @shadow_ram_support: shadow support for OTP memory
@@ -222,16 +200,7 @@ struct iwl_ht_params {
222 * We enable the driver to be backward compatible wrt API version. The 200 * We enable the driver to be backward compatible wrt API version. The
223 * driver specifies which APIs it supports (with @ucode_api_max being the 201 * driver specifies which APIs it supports (with @ucode_api_max being the
224 * highest and @ucode_api_min the lowest). Firmware will only be loaded if 202 * highest and @ucode_api_min the lowest). Firmware will only be loaded if
225 * it has a supported API version. The firmware's API version will be 203 * it has a supported API version.
226 * stored in @iwl_priv, enabling the driver to make runtime changes based
227 * on firmware version used.
228 *
229 * For example,
230 * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
231 * Driver interacts with Firmware API version >= 2.
232 * } else {
233 * Driver interacts with Firmware API version 1.
234 * }
235 * 204 *
236 * The ideal usage of this infrastructure is to treat a new ucode API 205 * The ideal usage of this infrastructure is to treat a new ucode API
237 * release as a new hardware revision. 206 * release as a new hardware revision.
@@ -292,7 +261,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
292void iwl_connection_init_rx_config(struct iwl_priv *priv, 261void iwl_connection_init_rx_config(struct iwl_priv *priv,
293 struct iwl_rxon_context *ctx); 262 struct iwl_rxon_context *ctx);
294void iwl_set_rate(struct iwl_priv *priv); 263void iwl_set_rate(struct iwl_priv *priv);
295void iwl_irq_handle_error(struct iwl_priv *priv);
296int iwl_mac_add_interface(struct ieee80211_hw *hw, 264int iwl_mac_add_interface(struct ieee80211_hw *hw,
297 struct ieee80211_vif *vif); 265 struct ieee80211_vif *vif);
298void iwl_mac_remove_interface(struct ieee80211_hw *hw, 266void iwl_mac_remove_interface(struct ieee80211_hw *hw,
@@ -398,22 +366,10 @@ u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
398__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, 366__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
399 u32 addon, u32 beacon_interval); 367 u32 addon, u32 beacon_interval);
400 368
401#ifdef CONFIG_PM
402int iwl_suspend(struct iwl_priv *priv);
403int iwl_resume(struct iwl_priv *priv);
404#endif /* !CONFIG_PM */
405
406int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
407void __devexit iwl_remove(struct iwl_priv * priv);
408 369
409/***************************************************** 370/*****************************************************
410* Error Handling Debugging 371* Error Handling Debugging
411******************************************************/ 372******************************************************/
412void iwl_dump_nic_error_log(struct iwl_priv *priv);
413int iwl_dump_nic_event_log(struct iwl_priv *priv,
414 bool full_log, char **buf, bool display);
415void iwl_dump_csr(struct iwl_priv *priv);
416int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
417#ifdef CONFIG_IWLWIFI_DEBUG 373#ifdef CONFIG_IWLWIFI_DEBUG
418void iwl_print_rx_config_cmd(struct iwl_priv *priv, 374void iwl_print_rx_config_cmd(struct iwl_priv *priv,
419 struct iwl_rxon_context *ctx); 375 struct iwl_rxon_context *ctx);
@@ -424,79 +380,11 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
424} 380}
425#endif 381#endif
426 382
427void iwl_clear_isr_stats(struct iwl_priv *priv);
428
429/***************************************************** 383/*****************************************************
430* GEOS 384* GEOS
431******************************************************/ 385******************************************************/
432int iwlcore_init_geos(struct iwl_priv *priv); 386int iwl_init_geos(struct iwl_priv *priv);
433void iwlcore_free_geos(struct iwl_priv *priv); 387void iwl_free_geos(struct iwl_priv *priv);
434
435/*************** DRIVER STATUS FUNCTIONS *****/
436
437#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
438/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
439#define STATUS_INT_ENABLED 2
440#define STATUS_RF_KILL_HW 3
441#define STATUS_CT_KILL 4
442#define STATUS_INIT 5
443#define STATUS_ALIVE 6
444#define STATUS_READY 7
445#define STATUS_TEMPERATURE 8
446#define STATUS_GEO_CONFIGURED 9
447#define STATUS_EXIT_PENDING 10
448#define STATUS_STATISTICS 12
449#define STATUS_SCANNING 13
450#define STATUS_SCAN_ABORTING 14
451#define STATUS_SCAN_HW 15
452#define STATUS_POWER_PMI 16
453#define STATUS_FW_ERROR 17
454#define STATUS_DEVICE_ENABLED 18
455#define STATUS_CHANNEL_SWITCH_PENDING 19
456
457
458static inline int iwl_is_ready(struct iwl_priv *priv)
459{
460 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
461 * set but EXIT_PENDING is not */
462 return test_bit(STATUS_READY, &priv->status) &&
463 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
464 !test_bit(STATUS_EXIT_PENDING, &priv->status);
465}
466
467static inline int iwl_is_alive(struct iwl_priv *priv)
468{
469 return test_bit(STATUS_ALIVE, &priv->status);
470}
471
472static inline int iwl_is_init(struct iwl_priv *priv)
473{
474 return test_bit(STATUS_INIT, &priv->status);
475}
476
477static inline int iwl_is_rfkill_hw(struct iwl_priv *priv)
478{
479 return test_bit(STATUS_RF_KILL_HW, &priv->status);
480}
481
482static inline int iwl_is_rfkill(struct iwl_priv *priv)
483{
484 return iwl_is_rfkill_hw(priv);
485}
486
487static inline int iwl_is_ctkill(struct iwl_priv *priv)
488{
489 return test_bit(STATUS_CT_KILL, &priv->status);
490}
491
492static inline int iwl_is_ready_rf(struct iwl_priv *priv)
493{
494
495 if (iwl_is_rfkill(priv))
496 return 0;
497
498 return iwl_is_ready(priv);
499}
500 388
501extern void iwl_send_bt_config(struct iwl_priv *priv); 389extern void iwl_send_bt_config(struct iwl_priv *priv);
502extern int iwl_send_statistics_request(struct iwl_priv *priv, 390extern int iwl_send_statistics_request(struct iwl_priv *priv,
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index f9a407e40aff..7014f4124484 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -29,50 +29,51 @@
29#ifndef __iwl_debug_h__ 29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#include "iwl-bus.h"
33#include "iwl-shared.h"
34
32struct iwl_priv; 35struct iwl_priv;
33extern u32 iwl_debug_level;
34 36
35#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a) 37/*No matter what is m (priv, bus, trans), this will work */
36#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a) 38#define IWL_ERR(m, f, a...) dev_err(bus(m)->dev, f, ## a)
37#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a) 39#define IWL_WARN(m, f, a...) dev_warn(bus(m)->dev, f, ## a)
38#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a) 40#define IWL_INFO(m, f, a...) dev_info(bus(m)->dev, f, ## a)
41#define IWL_CRIT(m, f, a...) dev_crit(bus(m)->dev, f, ## a)
39 42
40#define iwl_print_hex_error(priv, p, len) \ 43#define iwl_print_hex_error(m, p, len) \
41do { \ 44do { \
42 print_hex_dump(KERN_ERR, "iwl data: ", \ 45 print_hex_dump(KERN_ERR, "iwl data: ", \
43 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 46 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
44} while (0) 47} while (0)
45 48
46#ifdef CONFIG_IWLWIFI_DEBUG 49#ifdef CONFIG_IWLWIFI_DEBUG
47#define IWL_DEBUG(__priv, level, fmt, args...) \ 50#define IWL_DEBUG(m, level, fmt, args...) \
48do { \ 51do { \
49 if (iwl_get_debug_level(__priv) & (level)) \ 52 if (iwl_get_debug_level((m)->shrd) & (level)) \
50 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ 53 dev_printk(KERN_ERR, bus(m)->dev, \
51 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 54 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
52 __func__ , ## args); \ 55 __func__ , ## args); \
53} while (0) 56} while (0)
54 57
55#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \ 58#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
56do { \ 59do { \
57 if ((iwl_get_debug_level(__priv) & (level)) && net_ratelimit()) \ 60 if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
58 dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \ 61 dev_printk(KERN_ERR, bus(m)->dev, \
59 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \ 62 "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
60 __func__ , ## args); \ 63 __func__ , ## args); \
61} while (0) 64} while (0)
62 65
63#define iwl_print_hex_dump(priv, level, p, len) \ 66#define iwl_print_hex_dump(m, level, p, len) \
64do { \ 67do { \
65 if (iwl_get_debug_level(priv) & level) \ 68 if (iwl_get_debug_level((m)->shrd) & level) \
66 print_hex_dump(KERN_DEBUG, "iwl data: ", \ 69 print_hex_dump(KERN_DEBUG, "iwl data: ", \
67 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \ 70 DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
68} while (0) 71} while (0)
69 72
70#else 73#else
71#define IWL_DEBUG(__priv, level, fmt, args...) 74#define IWL_DEBUG(m, level, fmt, args...)
72#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) 75#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
73static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, 76#define iwl_print_hex_dump(m, level, p, len)
74 const void *p, u32 len)
75{}
76#endif /* CONFIG_IWLWIFI_DEBUG */ 77#endif /* CONFIG_IWLWIFI_DEBUG */
77 78
78#ifdef CONFIG_IWLWIFI_DEBUGFS 79#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -166,6 +167,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
166#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) 167#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
167#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) 168#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
168#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) 169#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
170#define IWL_DEBUG_FW_ERRORS(p, f, a...) IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
169#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a) 171#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
170#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \ 172#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
171 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a) 173 IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index ec1485b2d3fe..e320cc10167e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -254,7 +254,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
254 sram = priv->dbgfs_sram_offset & ~0x3; 254 sram = priv->dbgfs_sram_offset & ~0x3;
255 255
256 /* read the first u32 from sram */ 256 /* read the first u32 from sram */
257 val = iwl_read_targ_mem(priv, sram); 257 val = iwl_read_targ_mem(bus(priv), sram);
258 258
259 for (; len; len--) { 259 for (; len; len--) {
260 /* put the address at the start of every line */ 260 /* put the address at the start of every line */
@@ -273,7 +273,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
273 if (++offset == 4) { 273 if (++offset == 4) {
274 sram += 4; 274 sram += 4;
275 offset = 0; 275 offset = 0;
276 val = iwl_read_targ_mem(priv, sram); 276 val = iwl_read_targ_mem(bus(priv), sram);
277 } 277 }
278 278
279 /* put in extra spaces and split lines for human readability */ 279 /* put in extra spaces and split lines for human readability */
@@ -340,7 +340,8 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
340{ 340{
341 struct iwl_priv *priv = file->private_data; 341 struct iwl_priv *priv = file->private_data;
342 struct iwl_station_entry *station; 342 struct iwl_station_entry *station;
343 int max_sta = priv->hw_params.max_stations; 343 struct iwl_tid_data *tid_data;
344 int max_sta = hw_params(priv).max_stations;
344 char *buf; 345 char *buf;
345 int i, j, pos = 0; 346 int i, j, pos = 0;
346 ssize_t ret; 347 ssize_t ret;
@@ -363,22 +364,18 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
363 i, station->sta.sta.addr, 364 i, station->sta.sta.addr,
364 station->sta.station_flags_msk); 365 station->sta.station_flags_msk);
365 pos += scnprintf(buf + pos, bufsz - pos, 366 pos += scnprintf(buf + pos, bufsz - pos,
366 "TID\tseq_num\ttxq_id\tframes\ttfds\t"); 367 "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
367 pos += scnprintf(buf + pos, bufsz - pos,
368 "start_idx\tbitmap\t\t\trate_n_flags\n");
369 368
370 for (j = 0; j < MAX_TID_COUNT; j++) { 369 for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
370 tid_data = &priv->shrd->tid_data[i][j];
371 pos += scnprintf(buf + pos, bufsz - pos, 371 pos += scnprintf(buf + pos, bufsz - pos,
372 "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x", 372 "%d:\t%#x\t%#x\t%u\t%#x",
373 j, station->tid[j].seq_number, 373 j, tid_data->seq_number,
374 station->tid[j].agg.txq_id, 374 tid_data->agg.txq_id,
375 station->tid[j].agg.frame_count, 375 tid_data->tfds_in_queue,
376 station->tid[j].tfds_in_queue, 376 tid_data->agg.rate_n_flags);
377 station->tid[j].agg.start_idx, 377
378 station->tid[j].agg.bitmap, 378 if (tid_data->agg.wait_for_ba)
379 station->tid[j].agg.rate_n_flags);
380
381 if (station->tid[j].agg.wait_for_ba)
382 pos += scnprintf(buf + pos, bufsz - pos, 379 pos += scnprintf(buf + pos, bufsz - pos,
383 " - waitforba"); 380 " - waitforba");
384 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 381 pos += scnprintf(buf + pos, bufsz - pos, "\n");
@@ -442,46 +439,6 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
442 return ret; 439 return ret;
443} 440}
444 441
445static ssize_t iwl_dbgfs_log_event_read(struct file *file,
446 char __user *user_buf,
447 size_t count, loff_t *ppos)
448{
449 struct iwl_priv *priv = file->private_data;
450 char *buf;
451 int pos = 0;
452 ssize_t ret = -ENOMEM;
453
454 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
455 if (buf) {
456 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
457 kfree(buf);
458 }
459 return ret;
460}
461
462static ssize_t iwl_dbgfs_log_event_write(struct file *file,
463 const char __user *user_buf,
464 size_t count, loff_t *ppos)
465{
466 struct iwl_priv *priv = file->private_data;
467 u32 event_log_flag;
468 char buf[8];
469 int buf_size;
470
471 memset(buf, 0, sizeof(buf));
472 buf_size = min(count, sizeof(buf) - 1);
473 if (copy_from_user(buf, user_buf, buf_size))
474 return -EFAULT;
475 if (sscanf(buf, "%d", &event_log_flag) != 1)
476 return -EFAULT;
477 if (event_log_flag == 1)
478 iwl_dump_nic_event_log(priv, true, NULL, false);
479
480 return count;
481}
482
483
484
485static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf, 442static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
486 size_t count, loff_t *ppos) 443 size_t count, loff_t *ppos)
487{ 444{
@@ -492,7 +449,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
492 char *buf; 449 char *buf;
493 ssize_t ret; 450 ssize_t ret;
494 451
495 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status)) 452 if (!test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status))
496 return -EAGAIN; 453 return -EAGAIN;
497 454
498 buf = kzalloc(bufsz, GFP_KERNEL); 455 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -562,45 +519,46 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
562 const size_t bufsz = sizeof(buf); 519 const size_t bufsz = sizeof(buf);
563 520
564 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n", 521 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
565 test_bit(STATUS_HCMD_ACTIVE, &priv->status)); 522 test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
566 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n", 523 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
567 test_bit(STATUS_INT_ENABLED, &priv->status)); 524 test_bit(STATUS_INT_ENABLED, &priv->shrd->status));
568 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 525 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
569 test_bit(STATUS_RF_KILL_HW, &priv->status)); 526 test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
570 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", 527 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
571 test_bit(STATUS_CT_KILL, &priv->status)); 528 test_bit(STATUS_CT_KILL, &priv->shrd->status));
572 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", 529 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
573 test_bit(STATUS_INIT, &priv->status)); 530 test_bit(STATUS_INIT, &priv->shrd->status));
574 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", 531 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
575 test_bit(STATUS_ALIVE, &priv->status)); 532 test_bit(STATUS_ALIVE, &priv->shrd->status));
576 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n", 533 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
577 test_bit(STATUS_READY, &priv->status)); 534 test_bit(STATUS_READY, &priv->shrd->status));
578 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n", 535 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
579 test_bit(STATUS_TEMPERATURE, &priv->status)); 536 test_bit(STATUS_TEMPERATURE, &priv->shrd->status));
580 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n", 537 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
581 test_bit(STATUS_GEO_CONFIGURED, &priv->status)); 538 test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status));
582 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n", 539 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
583 test_bit(STATUS_EXIT_PENDING, &priv->status)); 540 test_bit(STATUS_EXIT_PENDING, &priv->shrd->status));
584 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n", 541 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
585 test_bit(STATUS_STATISTICS, &priv->status)); 542 test_bit(STATUS_STATISTICS, &priv->shrd->status));
586 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n", 543 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
587 test_bit(STATUS_SCANNING, &priv->status)); 544 test_bit(STATUS_SCANNING, &priv->shrd->status));
588 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n", 545 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
589 test_bit(STATUS_SCAN_ABORTING, &priv->status)); 546 test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status));
590 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", 547 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
591 test_bit(STATUS_SCAN_HW, &priv->status)); 548 test_bit(STATUS_SCAN_HW, &priv->shrd->status));
592 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", 549 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
593 test_bit(STATUS_POWER_PMI, &priv->status)); 550 test_bit(STATUS_POWER_PMI, &priv->shrd->status));
594 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", 551 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
595 test_bit(STATUS_FW_ERROR, &priv->status)); 552 test_bit(STATUS_FW_ERROR, &priv->shrd->status));
596 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 553 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
597} 554}
598 555
599static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 556static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
600 char __user *user_buf, 557 char __user *user_buf,
601 size_t count, loff_t *ppos) { 558 size_t count, loff_t *ppos) {
602 559
603 struct iwl_priv *priv = file->private_data; 560 struct iwl_priv *priv = file->private_data;
561
604 int pos = 0; 562 int pos = 0;
605 int cnt = 0; 563 int cnt = 0;
606 char *buf; 564 char *buf;
@@ -613,61 +571,25 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
613 return -ENOMEM; 571 return -ENOMEM;
614 } 572 }
615 573
616 pos += scnprintf(buf + pos, bufsz - pos,
617 "Interrupt Statistics Report:\n");
618
619 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
620 priv->isr_stats.hw);
621 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
622 priv->isr_stats.sw);
623 if (priv->isr_stats.sw || priv->isr_stats.hw) {
624 pos += scnprintf(buf + pos, bufsz - pos,
625 "\tLast Restarting Code: 0x%X\n",
626 priv->isr_stats.err_code);
627 }
628#ifdef CONFIG_IWLWIFI_DEBUG
629 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
630 priv->isr_stats.sch);
631 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
632 priv->isr_stats.alive);
633#endif
634 pos += scnprintf(buf + pos, bufsz - pos,
635 "HW RF KILL switch toggled:\t %u\n",
636 priv->isr_stats.rfkill);
637
638 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
639 priv->isr_stats.ctkill);
640
641 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
642 priv->isr_stats.wakeup);
643
644 pos += scnprintf(buf + pos, bufsz - pos,
645 "Rx command responses:\t\t %u\n",
646 priv->isr_stats.rx);
647 for (cnt = 0; cnt < REPLY_MAX; cnt++) { 574 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
648 if (priv->isr_stats.rx_handlers[cnt] > 0) 575 if (priv->rx_handlers_stats[cnt] > 0)
649 pos += scnprintf(buf + pos, bufsz - pos, 576 pos += scnprintf(buf + pos, bufsz - pos,
650 "\tRx handler[%36s]:\t\t %u\n", 577 "\tRx handler[%36s]:\t\t %u\n",
651 get_cmd_string(cnt), 578 get_cmd_string(cnt),
652 priv->isr_stats.rx_handlers[cnt]); 579 priv->rx_handlers_stats[cnt]);
653 } 580 }
654 581
655 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
656 priv->isr_stats.tx);
657
658 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
659 priv->isr_stats.unhandled);
660
661 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 582 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
662 kfree(buf); 583 kfree(buf);
663 return ret; 584 return ret;
664} 585}
665 586
666static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 587static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
667 const char __user *user_buf, 588 const char __user *user_buf,
668 size_t count, loff_t *ppos) 589 size_t count, loff_t *ppos)
669{ 590{
670 struct iwl_priv *priv = file->private_data; 591 struct iwl_priv *priv = file->private_data;
592
671 char buf[8]; 593 char buf[8];
672 int buf_size; 594 int buf_size;
673 u32 reset_flag; 595 u32 reset_flag;
@@ -679,7 +601,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
679 if (sscanf(buf, "%x", &reset_flag) != 1) 601 if (sscanf(buf, "%x", &reset_flag) != 1)
680 return -EFAULT; 602 return -EFAULT;
681 if (reset_flag == 0) 603 if (reset_flag == 0)
682 iwl_clear_isr_stats(priv); 604 memset(&priv->rx_handlers_stats[0], 0,
605 sizeof(priv->rx_handlers_stats));
683 606
684 return count; 607 return count;
685} 608}
@@ -814,14 +737,14 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
814 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) 737 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
815 return -EINVAL; 738 return -EINVAL;
816 739
817 if (!iwl_is_ready_rf(priv)) 740 if (!iwl_is_ready_rf(priv->shrd))
818 return -EAGAIN; 741 return -EAGAIN;
819 742
820 priv->power_data.debug_sleep_level_override = value; 743 priv->power_data.debug_sleep_level_override = value;
821 744
822 mutex_lock(&priv->mutex); 745 mutex_lock(&priv->shrd->mutex);
823 iwl_power_update_mode(priv, true); 746 iwl_power_update_mode(priv, true);
824 mutex_unlock(&priv->mutex); 747 mutex_unlock(&priv->shrd->mutex);
825 748
826 return count; 749 return count;
827} 750}
@@ -870,188 +793,17 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
870 793
871DEBUGFS_READ_WRITE_FILE_OPS(sram); 794DEBUGFS_READ_WRITE_FILE_OPS(sram);
872DEBUGFS_READ_FILE_OPS(wowlan_sram); 795DEBUGFS_READ_FILE_OPS(wowlan_sram);
873DEBUGFS_READ_WRITE_FILE_OPS(log_event);
874DEBUGFS_READ_FILE_OPS(nvm); 796DEBUGFS_READ_FILE_OPS(nvm);
875DEBUGFS_READ_FILE_OPS(stations); 797DEBUGFS_READ_FILE_OPS(stations);
876DEBUGFS_READ_FILE_OPS(channels); 798DEBUGFS_READ_FILE_OPS(channels);
877DEBUGFS_READ_FILE_OPS(status); 799DEBUGFS_READ_FILE_OPS(status);
878DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 800DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
879DEBUGFS_READ_FILE_OPS(qos); 801DEBUGFS_READ_FILE_OPS(qos);
880DEBUGFS_READ_FILE_OPS(thermal_throttling); 802DEBUGFS_READ_FILE_OPS(thermal_throttling);
881DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); 803DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
882DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 804DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
883DEBUGFS_READ_FILE_OPS(current_sleep_command); 805DEBUGFS_READ_FILE_OPS(current_sleep_command);
884 806
885static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
886 char __user *user_buf,
887 size_t count, loff_t *ppos)
888{
889 struct iwl_priv *priv = file->private_data;
890 int pos = 0, ofs = 0;
891 int cnt = 0, entry;
892 struct iwl_tx_queue *txq;
893 struct iwl_queue *q;
894 struct iwl_rx_queue *rxq = &priv->rxq;
895 char *buf;
896 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
897 (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
898 const u8 *ptr;
899 ssize_t ret;
900
901 if (!priv->txq) {
902 IWL_ERR(priv, "txq not ready\n");
903 return -EAGAIN;
904 }
905 buf = kzalloc(bufsz, GFP_KERNEL);
906 if (!buf) {
907 IWL_ERR(priv, "Can not allocate buffer\n");
908 return -ENOMEM;
909 }
910 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
911 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
912 txq = &priv->txq[cnt];
913 q = &txq->q;
914 pos += scnprintf(buf + pos, bufsz - pos,
915 "q[%d]: read_ptr: %u, write_ptr: %u\n",
916 cnt, q->read_ptr, q->write_ptr);
917 }
918 if (priv->tx_traffic && (iwl_debug_level & IWL_DL_TX)) {
919 ptr = priv->tx_traffic;
920 pos += scnprintf(buf + pos, bufsz - pos,
921 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
922 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
923 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
924 entry++, ofs += 16) {
925 pos += scnprintf(buf + pos, bufsz - pos,
926 "0x%.4x ", ofs);
927 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
928 buf + pos, bufsz - pos, 0);
929 pos += strlen(buf + pos);
930 if (bufsz - pos > 0)
931 buf[pos++] = '\n';
932 }
933 }
934 }
935
936 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
937 pos += scnprintf(buf + pos, bufsz - pos,
938 "read: %u, write: %u\n",
939 rxq->read, rxq->write);
940
941 if (priv->rx_traffic && (iwl_debug_level & IWL_DL_RX)) {
942 ptr = priv->rx_traffic;
943 pos += scnprintf(buf + pos, bufsz - pos,
944 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
945 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
946 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
947 entry++, ofs += 16) {
948 pos += scnprintf(buf + pos, bufsz - pos,
949 "0x%.4x ", ofs);
950 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
951 buf + pos, bufsz - pos, 0);
952 pos += strlen(buf + pos);
953 if (bufsz - pos > 0)
954 buf[pos++] = '\n';
955 }
956 }
957 }
958
959 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
960 kfree(buf);
961 return ret;
962}
963
964static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
965 const char __user *user_buf,
966 size_t count, loff_t *ppos)
967{
968 struct iwl_priv *priv = file->private_data;
969 char buf[8];
970 int buf_size;
971 int traffic_log;
972
973 memset(buf, 0, sizeof(buf));
974 buf_size = min(count, sizeof(buf) - 1);
975 if (copy_from_user(buf, user_buf, buf_size))
976 return -EFAULT;
977 if (sscanf(buf, "%d", &traffic_log) != 1)
978 return -EFAULT;
979 if (traffic_log == 0)
980 iwl_reset_traffic_log(priv);
981
982 return count;
983}
984
985static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
986 char __user *user_buf,
987 size_t count, loff_t *ppos) {
988
989 struct iwl_priv *priv = file->private_data;
990 struct iwl_tx_queue *txq;
991 struct iwl_queue *q;
992 char *buf;
993 int pos = 0;
994 int cnt;
995 int ret;
996 const size_t bufsz = sizeof(char) * 64 *
997 priv->cfg->base_params->num_of_queues;
998
999 if (!priv->txq) {
1000 IWL_ERR(priv, "txq not ready\n");
1001 return -EAGAIN;
1002 }
1003 buf = kzalloc(bufsz, GFP_KERNEL);
1004 if (!buf)
1005 return -ENOMEM;
1006
1007 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
1008 txq = &priv->txq[cnt];
1009 q = &txq->q;
1010 pos += scnprintf(buf + pos, bufsz - pos,
1011 "hwq %.2d: read=%u write=%u stop=%d"
1012 " swq_id=%#.2x (ac %d/hwq %d)\n",
1013 cnt, q->read_ptr, q->write_ptr,
1014 !!test_bit(cnt, priv->queue_stopped),
1015 txq->swq_id, txq->swq_id & 3,
1016 (txq->swq_id >> 2) & 0x1f);
1017 if (cnt >= 4)
1018 continue;
1019 /* for the ACs, display the stop count too */
1020 pos += scnprintf(buf + pos, bufsz - pos,
1021 " stop-count: %d\n",
1022 atomic_read(&priv->queue_stop_count[cnt]));
1023 }
1024 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1025 kfree(buf);
1026 return ret;
1027}
1028
1029static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1030 char __user *user_buf,
1031 size_t count, loff_t *ppos) {
1032
1033 struct iwl_priv *priv = file->private_data;
1034 struct iwl_rx_queue *rxq = &priv->rxq;
1035 char buf[256];
1036 int pos = 0;
1037 const size_t bufsz = sizeof(buf);
1038
1039 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1040 rxq->read);
1041 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1042 rxq->write);
1043 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1044 rxq->free_count);
1045 if (rxq->rb_stts) {
1046 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1047 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1048 } else {
1049 pos += scnprintf(buf + pos, bufsz - pos,
1050 "closed_rb_num: Not Allocated\n");
1051 }
1052 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1053}
1054
1055static const char *fmt_value = " %-30s %10u\n"; 807static const char *fmt_value = " %-30s %10u\n";
1056static const char *fmt_hex = " %-30s 0x%02X\n"; 808static const char *fmt_hex = " %-30s 0x%02X\n";
1057static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; 809static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
@@ -1096,7 +848,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1096 struct statistics_rx_non_phy *delta_general, *max_general; 848 struct statistics_rx_non_phy *delta_general, *max_general;
1097 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht; 849 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
1098 850
1099 if (!iwl_is_alive(priv)) 851 if (!iwl_is_alive(priv->shrd))
1100 return -EAGAIN; 852 return -EAGAIN;
1101 853
1102 buf = kzalloc(bufsz, GFP_KERNEL); 854 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1522,7 +1274,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1522 ssize_t ret; 1274 ssize_t ret;
1523 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx; 1275 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1524 1276
1525 if (!iwl_is_alive(priv)) 1277 if (!iwl_is_alive(priv->shrd))
1526 return -EAGAIN; 1278 return -EAGAIN;
1527 1279
1528 buf = kzalloc(bufsz, GFP_KERNEL); 1280 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1716,7 +1468,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1716 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg; 1468 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1717 struct statistics_div *div, *accum_div, *delta_div, *max_div; 1469 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1718 1470
1719 if (!iwl_is_alive(priv)) 1471 if (!iwl_is_alive(priv->shrd))
1720 return -EAGAIN; 1472 return -EAGAIN;
1721 1473
1722 buf = kzalloc(bufsz, GFP_KERNEL); 1474 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -1829,16 +1581,16 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1829 ssize_t ret; 1581 ssize_t ret;
1830 struct statistics_bt_activity *bt, *accum_bt; 1582 struct statistics_bt_activity *bt, *accum_bt;
1831 1583
1832 if (!iwl_is_alive(priv)) 1584 if (!iwl_is_alive(priv->shrd))
1833 return -EAGAIN; 1585 return -EAGAIN;
1834 1586
1835 if (!priv->bt_enable_flag) 1587 if (!priv->bt_enable_flag)
1836 return -EINVAL; 1588 return -EINVAL;
1837 1589
1838 /* make request to uCode to retrieve statistics information */ 1590 /* make request to uCode to retrieve statistics information */
1839 mutex_lock(&priv->mutex); 1591 mutex_lock(&priv->shrd->mutex);
1840 ret = iwl_send_statistics_request(priv, CMD_SYNC, false); 1592 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1841 mutex_unlock(&priv->mutex); 1593 mutex_unlock(&priv->shrd->mutex);
1842 1594
1843 if (ret) { 1595 if (ret) {
1844 IWL_ERR(priv, 1596 IWL_ERR(priv,
@@ -1917,7 +1669,7 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1917 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200; 1669 (sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
1918 ssize_t ret; 1670 ssize_t ret;
1919 1671
1920 if (!iwl_is_alive(priv)) 1672 if (!iwl_is_alive(priv->shrd))
1921 return -EAGAIN; 1673 return -EAGAIN;
1922 1674
1923 buf = kzalloc(bufsz, GFP_KERNEL); 1675 buf = kzalloc(bufsz, GFP_KERNEL);
@@ -2199,7 +1951,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
2199 const size_t bufsz = sizeof(buf); 1951 const size_t bufsz = sizeof(buf);
2200 u32 pwrsave_status; 1952 u32 pwrsave_status;
2201 1953
2202 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) & 1954 pwrsave_status = iwl_read32(bus(priv), CSR_GP_CNTRL) &
2203 CSR_GP_REG_POWER_SAVE_STATUS_MSK; 1955 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
2204 1956
2205 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); 1957 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2229,30 +1981,9 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
2229 return -EFAULT; 1981 return -EFAULT;
2230 1982
2231 /* make request to uCode to retrieve statistics information */ 1983 /* make request to uCode to retrieve statistics information */
2232 mutex_lock(&priv->mutex); 1984 mutex_lock(&priv->shrd->mutex);
2233 iwl_send_statistics_request(priv, CMD_SYNC, true); 1985 iwl_send_statistics_request(priv, CMD_SYNC, true);
2234 mutex_unlock(&priv->mutex); 1986 mutex_unlock(&priv->shrd->mutex);
2235
2236 return count;
2237}
2238
2239static ssize_t iwl_dbgfs_csr_write(struct file *file,
2240 const char __user *user_buf,
2241 size_t count, loff_t *ppos)
2242{
2243 struct iwl_priv *priv = file->private_data;
2244 char buf[8];
2245 int buf_size;
2246 int csr;
2247
2248 memset(buf, 0, sizeof(buf));
2249 buf_size = min(count, sizeof(buf) - 1);
2250 if (copy_from_user(buf, user_buf, buf_size))
2251 return -EFAULT;
2252 if (sscanf(buf, "%d", &csr) != 1)
2253 return -EFAULT;
2254
2255 iwl_dump_csr(priv);
2256 1987
2257 return count; 1988 return count;
2258} 1989}
@@ -2333,25 +2064,6 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
2333 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 2064 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
2334} 2065}
2335 2066
2336static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2337 char __user *user_buf,
2338 size_t count, loff_t *ppos)
2339{
2340 struct iwl_priv *priv = file->private_data;
2341 char *buf;
2342 int pos = 0;
2343 ssize_t ret = -EFAULT;
2344
2345 ret = pos = iwl_dump_fh(priv, &buf, true);
2346 if (buf) {
2347 ret = simple_read_from_buffer(user_buf,
2348 count, ppos, buf, pos);
2349 kfree(buf);
2350 }
2351
2352 return ret;
2353}
2354
2355static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file, 2067static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
2356 char __user *user_buf, 2068 char __user *user_buf,
2357 size_t count, loff_t *ppos) { 2069 size_t count, loff_t *ppos) {
@@ -2504,7 +2216,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2504 if (sscanf(buf, "%d", &flush) != 1) 2216 if (sscanf(buf, "%d", &flush) != 1)
2505 return -EINVAL; 2217 return -EINVAL;
2506 2218
2507 if (iwl_is_rfkill(priv)) 2219 if (iwl_is_rfkill(priv->shrd))
2508 return -EFAULT; 2220 return -EFAULT;
2509 2221
2510 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 2222 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
@@ -2628,9 +2340,6 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2628 2340
2629DEBUGFS_READ_FILE_OPS(rx_statistics); 2341DEBUGFS_READ_FILE_OPS(rx_statistics);
2630DEBUGFS_READ_FILE_OPS(tx_statistics); 2342DEBUGFS_READ_FILE_OPS(tx_statistics);
2631DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
2632DEBUGFS_READ_FILE_OPS(rx_queue);
2633DEBUGFS_READ_FILE_OPS(tx_queue);
2634DEBUGFS_READ_FILE_OPS(ucode_rx_stats); 2343DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2635DEBUGFS_READ_FILE_OPS(ucode_tx_stats); 2344DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2636DEBUGFS_READ_FILE_OPS(ucode_general_stats); 2345DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2639,9 +2348,7 @@ DEBUGFS_READ_FILE_OPS(chain_noise);
2639DEBUGFS_READ_FILE_OPS(power_save_status); 2348DEBUGFS_READ_FILE_OPS(power_save_status);
2640DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 2349DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2641DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); 2350DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
2642DEBUGFS_WRITE_FILE_OPS(csr);
2643DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); 2351DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2644DEBUGFS_READ_FILE_OPS(fh_reg);
2645DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); 2352DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2646DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); 2353DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2647DEBUGFS_READ_WRITE_FILE_OPS(force_reset); 2354DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
@@ -2682,11 +2389,10 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2682 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR); 2389 DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
2683 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR); 2390 DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
2684 DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR); 2391 DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
2685 DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
2686 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR); 2392 DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
2687 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR); 2393 DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
2688 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR); 2394 DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
2689 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); 2395 DEBUGFS_ADD_FILE(rx_handlers, dir_data, S_IWUSR | S_IRUSR);
2690 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); 2396 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
2691 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); 2397 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
2692 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); 2398 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
@@ -2694,14 +2400,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2694 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); 2400 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2695 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); 2401 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
2696 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR); 2402 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
2697 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
2698 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
2699 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
2700 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); 2403 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2701 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); 2404 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2702 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); 2405 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
2703 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
2704 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
2705 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); 2406 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2706 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); 2407 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2707 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); 2408 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
@@ -2725,6 +2426,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2725 &priv->disable_sens_cal); 2426 &priv->disable_sens_cal);
2726 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, 2427 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
2727 &priv->disable_chain_noise_cal); 2428 &priv->disable_chain_noise_cal);
2429
2430 if (iwl_trans_dbgfs_register(trans(priv), dir_debug))
2431 goto err;
2728 return 0; 2432 return 0;
2729 2433
2730err: 2434err:
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index dd34c7c502fa..1e54293532b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -36,12 +36,12 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/wait.h> 37#include <linux/wait.h>
38#include <linux/leds.h> 38#include <linux/leds.h>
39#include <linux/slab.h>
39#include <net/ieee80211_radiotap.h> 40#include <net/ieee80211_radiotap.h>
40 41
41#include "iwl-eeprom.h" 42#include "iwl-eeprom.h"
42#include "iwl-csr.h" 43#include "iwl-csr.h"
43#include "iwl-prph.h" 44#include "iwl-prph.h"
44#include "iwl-fh.h"
45#include "iwl-debug.h" 45#include "iwl-debug.h"
46#include "iwl-agn-hw.h" 46#include "iwl-agn-hw.h"
47#include "iwl-led.h" 47#include "iwl-led.h"
@@ -50,8 +50,7 @@
50#include "iwl-agn-tt.h" 50#include "iwl-agn-tt.h"
51#include "iwl-bus.h" 51#include "iwl-bus.h"
52#include "iwl-trans.h" 52#include "iwl-trans.h"
53 53#include "iwl-shared.h"
54#define DRV_NAME "iwlagn"
55 54
56struct iwl_tx_queue; 55struct iwl_tx_queue;
57 56
@@ -90,14 +89,6 @@ struct iwl_tx_queue;
90#define DEFAULT_SHORT_RETRY_LIMIT 7U 89#define DEFAULT_SHORT_RETRY_LIMIT 7U
91#define DEFAULT_LONG_RETRY_LIMIT 4U 90#define DEFAULT_LONG_RETRY_LIMIT 4U
92 91
93struct iwl_rx_mem_buffer {
94 dma_addr_t page_dma;
95 struct page *page;
96 struct list_head list;
97};
98
99#define rxb_addr(r) page_address(r->page)
100
101/* defined below */ 92/* defined below */
102struct iwl_device_cmd; 93struct iwl_device_cmd;
103 94
@@ -156,12 +147,6 @@ struct iwl_queue {
156 * space less than this */ 147 * space less than this */
157}; 148};
158 149
159/* One for each TFD */
160struct iwl_tx_info {
161 struct sk_buff *skb;
162 struct iwl_rxon_context *ctx;
163};
164
165/** 150/**
166 * struct iwl_tx_queue - Tx Queue for DMA 151 * struct iwl_tx_queue - Tx Queue for DMA
167 * @q: generic Rx/Tx queue descriptor 152 * @q: generic Rx/Tx queue descriptor
@@ -173,6 +158,8 @@ struct iwl_tx_info {
173 * @time_stamp: time (in jiffies) of last read_ptr change 158 * @time_stamp: time (in jiffies) of last read_ptr change
174 * @need_update: indicates need to update read/write index 159 * @need_update: indicates need to update read/write index
175 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled 160 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
161 * @sta_id: valid if sched_retry is set
162 * @tid: valid if sched_retry is set
176 * 163 *
177 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 164 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
178 * descriptors) and required locking structures. 165 * descriptors) and required locking structures.
@@ -185,12 +172,15 @@ struct iwl_tx_queue {
185 struct iwl_tfd *tfds; 172 struct iwl_tfd *tfds;
186 struct iwl_device_cmd **cmd; 173 struct iwl_device_cmd **cmd;
187 struct iwl_cmd_meta *meta; 174 struct iwl_cmd_meta *meta;
188 struct iwl_tx_info *txb; 175 struct sk_buff **skbs;
189 unsigned long time_stamp; 176 unsigned long time_stamp;
190 u8 need_update; 177 u8 need_update;
191 u8 sched_retry; 178 u8 sched_retry;
192 u8 active; 179 u8 active;
193 u8 swq_id; 180 u8 swq_id;
181
182 u16 sta_id;
183 u16 tid;
194}; 184};
195 185
196#define IWL_NUM_SCAN_RATES (2) 186#define IWL_NUM_SCAN_RATES (2)
@@ -254,13 +244,6 @@ struct iwl_channel_info {
254#define IWL_DEFAULT_CMD_QUEUE_NUM 4 244#define IWL_DEFAULT_CMD_QUEUE_NUM 4
255#define IWL_IPAN_CMD_QUEUE_NUM 9 245#define IWL_IPAN_CMD_QUEUE_NUM 9
256 246
257/*
258 * This queue number is required for proper operation
259 * because the ucode will stop/start the scheduler as
260 * required.
261 */
262#define IWL_IPAN_MCAST_QUEUE 8
263
264#define IEEE80211_DATA_LEN 2304 247#define IEEE80211_DATA_LEN 2304
265#define IEEE80211_4ADDR_LEN 30 248#define IEEE80211_4ADDR_LEN 30
266#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN) 249#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
@@ -334,81 +317,11 @@ struct iwl_host_cmd {
334#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 317#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
335#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 318#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
336 319
337/**
338 * struct iwl_rx_queue - Rx queue
339 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
340 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
341 * @read: Shared index to newest available Rx buffer
342 * @write: Shared index to oldest written Rx packet
343 * @free_count: Number of pre-allocated buffers in rx_free
344 * @rx_free: list of free SKBs for use
345 * @rx_used: List of Rx buffers with no SKB
346 * @need_update: flag to indicate we need to update read/write index
347 * @rb_stts: driver's pointer to receive buffer status
348 * @rb_stts_dma: bus address of receive buffer status
349 *
350 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
351 */
352struct iwl_rx_queue {
353 __le32 *bd;
354 dma_addr_t bd_dma;
355 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
356 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
357 u32 read;
358 u32 write;
359 u32 free_count;
360 u32 write_actual;
361 struct list_head rx_free;
362 struct list_head rx_used;
363 int need_update;
364 struct iwl_rb_status *rb_stts;
365 dma_addr_t rb_stts_dma;
366 spinlock_t lock;
367};
368
369#define IWL_SUPPORTED_RATES_IE_LEN 8 320#define IWL_SUPPORTED_RATES_IE_LEN 8
370 321
371#define MAX_TID_COUNT 9
372
373#define IWL_INVALID_RATE 0xFF 322#define IWL_INVALID_RATE 0xFF
374#define IWL_INVALID_VALUE -1 323#define IWL_INVALID_VALUE -1
375 324
376/**
377 * struct iwl_ht_agg -- aggregation status while waiting for block-ack
378 * @txq_id: Tx queue used for Tx attempt
379 * @frame_count: # frames attempted by Tx command
380 * @wait_for_ba: Expect block-ack before next Tx reply
381 * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
382 * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
383 * @bitmap1: High order, one bit for each frame pending ACK in Tx window
384 * @rate_n_flags: Rate at which Tx was attempted
385 *
386 * If REPLY_TX indicates that aggregation was attempted, driver must wait
387 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
388 * until block ack arrives.
389 */
390struct iwl_ht_agg {
391 u16 txq_id;
392 u16 frame_count;
393 u16 wait_for_ba;
394 u16 start_idx;
395 u64 bitmap;
396 u32 rate_n_flags;
397#define IWL_AGG_OFF 0
398#define IWL_AGG_ON 1
399#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
400#define IWL_EMPTYING_HW_QUEUE_DELBA 3
401 u8 state;
402 u8 tx_fifo;
403};
404
405
406struct iwl_tid_data {
407 u16 seq_number; /* agn only */
408 u16 tfds_in_queue;
409 struct iwl_ht_agg agg;
410};
411
412union iwl_ht_rate_supp { 325union iwl_ht_rate_supp {
413 u16 rates; 326 u16 rates;
414 struct { 327 struct {
@@ -459,7 +372,6 @@ struct iwl_qos_info {
459 */ 372 */
460struct iwl_station_entry { 373struct iwl_station_entry {
461 struct iwl_addsta_cmd sta; 374 struct iwl_addsta_cmd sta;
462 struct iwl_tid_data tid[MAX_TID_COUNT];
463 u8 used, ctxid; 375 u8 used, ctxid;
464 struct iwl_link_quality_cmd *lq; 376 struct iwl_link_quality_cmd *lq;
465}; 377};
@@ -647,54 +559,6 @@ struct iwl_sensitivity_ranges {
647#define CELSIUS_TO_KELVIN(x) ((x)+273) 559#define CELSIUS_TO_KELVIN(x) ((x)+273)
648 560
649 561
650/**
651 * struct iwl_hw_params
652 * @max_txq_num: Max # Tx queues supported
653 * @scd_bc_tbls_size: size of scheduler byte count tables
654 * @tfd_size: TFD size
655 * @tx/rx_chains_num: Number of TX/RX chains
656 * @valid_tx/rx_ant: usable antennas
657 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
658 * @max_rxq_log: Log-base-2 of max_rxq_size
659 * @rx_page_order: Rx buffer page order
660 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
661 * @max_stations:
662 * @ht40_channel: is 40MHz width possible in band 2.4
663 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
664 * @sw_crypto: 0 for hw, 1 for sw
665 * @max_xxx_size: for ucode uses
666 * @ct_kill_threshold: temperature threshold
667 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
668 * @calib_init_cfg: setup initial calibrations for the hw
669 * @calib_rt_cfg: setup runtime calibrations for the hw
670 * @struct iwl_sensitivity_ranges: range of sensitivity values
671 */
672struct iwl_hw_params {
673 u8 max_txq_num;
674 u16 scd_bc_tbls_size;
675 u32 tfd_size;
676 u8 tx_chains_num;
677 u8 rx_chains_num;
678 u8 valid_tx_ant;
679 u8 valid_rx_ant;
680 u16 max_rxq_size;
681 u16 max_rxq_log;
682 u32 rx_page_order;
683 u8 max_stations;
684 u8 ht40_channel;
685 u8 max_beacon_itrvl; /* in 1024 ms */
686 u32 max_inst_size;
687 u32 max_data_size;
688 u32 ct_kill_threshold; /* value in hw-dependent units */
689 u32 ct_kill_exit_threshold; /* value in hw-dependent units */
690 /* for 1000, 6000 series and up */
691 u16 beacon_time_tsf_bits;
692 u32 calib_init_cfg;
693 u32 calib_rt_cfg;
694 const struct iwl_sensitivity_ranges *sens;
695};
696
697
698/****************************************************************************** 562/******************************************************************************
699 * 563 *
700 * Functions implemented in core module which are forward declared here 564 * Functions implemented in core module which are forward declared here
@@ -710,26 +574,6 @@ struct iwl_hw_params {
710 ****************************************************************************/ 574 ****************************************************************************/
711extern void iwl_update_chain_flags(struct iwl_priv *priv); 575extern void iwl_update_chain_flags(struct iwl_priv *priv);
712extern const u8 iwl_bcast_addr[ETH_ALEN]; 576extern const u8 iwl_bcast_addr[ETH_ALEN];
713extern int iwl_queue_space(const struct iwl_queue *q);
714static inline int iwl_queue_used(const struct iwl_queue *q, int i)
715{
716 return q->write_ptr >= q->read_ptr ?
717 (i >= q->read_ptr && i < q->write_ptr) :
718 !(i < q->read_ptr && i >= q->write_ptr);
719}
720
721
722static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
723{
724 return index & (q->n_window - 1);
725}
726
727
728struct iwl_dma_ptr {
729 dma_addr_t dma;
730 void *addr;
731 size_t size;
732};
733 577
734#define IWL_OPERATION_MODE_AUTO 0 578#define IWL_OPERATION_MODE_AUTO 0
735#define IWL_OPERATION_MODE_HT_ONLY 1 579#define IWL_OPERATION_MODE_HT_ONLY 1
@@ -897,22 +741,6 @@ enum iwl_pa_type {
897 IWL_PA_INTERNAL = 1, 741 IWL_PA_INTERNAL = 1,
898}; 742};
899 743
900/* interrupt statistics */
901struct isr_statistics {
902 u32 hw;
903 u32 sw;
904 u32 err_code;
905 u32 sch;
906 u32 alive;
907 u32 rfkill;
908 u32 ctkill;
909 u32 wakeup;
910 u32 rx;
911 u32 rx_handlers[REPLY_MAX];
912 u32 tx;
913 u32 unhandled;
914};
915
916/* reply_tx_statistics (for _agn devices) */ 744/* reply_tx_statistics (for _agn devices) */
917struct reply_tx_error_statistics { 745struct reply_tx_error_statistics {
918 u32 pp_delay; 746 u32 pp_delay;
@@ -1114,20 +942,9 @@ struct iwl_notification_wait {
1114 bool triggered, aborted; 942 bool triggered, aborted;
1115}; 943};
1116 944
1117enum iwl_rxon_context_id {
1118 IWL_RXON_CTX_BSS,
1119 IWL_RXON_CTX_PAN,
1120
1121 NUM_IWL_RXON_CTX
1122};
1123
1124struct iwl_rxon_context { 945struct iwl_rxon_context {
1125 struct ieee80211_vif *vif; 946 struct ieee80211_vif *vif;
1126 947
1127 const u8 *ac_to_fifo;
1128 const u8 *ac_to_queue;
1129 u8 mcast_queue;
1130
1131 /* 948 /*
1132 * We could use the vif to indicate active, but we 949 * We could use the vif to indicate active, but we
1133 * also need it to be active during disabling when 950 * also need it to be active during disabling when
@@ -1175,6 +992,9 @@ struct iwl_rxon_context {
1175 u8 extension_chan_offset; 992 u8 extension_chan_offset;
1176 } ht; 993 } ht;
1177 994
995 u8 bssid[ETH_ALEN];
996 bool preauth_bssid;
997
1178 bool last_tx_rejected; 998 bool last_tx_rejected;
1179}; 999};
1180 1000
@@ -1203,16 +1023,17 @@ struct iwl_testmode_trace {
1203}; 1023};
1204#endif 1024#endif
1205 1025
1206/* uCode ownership */
1207#define IWL_OWNERSHIP_DRIVER 0
1208#define IWL_OWNERSHIP_TM 1
1209
1210struct iwl_priv { 1026struct iwl_priv {
1211 1027
1028 /*data shared among all the driver's layers */
1029 struct iwl_shared _shrd;
1030 struct iwl_shared *shrd;
1031
1212 /* ieee device used by generic ieee processing code */ 1032 /* ieee device used by generic ieee processing code */
1213 struct ieee80211_hw *hw; 1033 struct ieee80211_hw *hw;
1214 struct ieee80211_channel *ieee_channels; 1034 struct ieee80211_channel *ieee_channels;
1215 struct ieee80211_rate *ieee_rates; 1035 struct ieee80211_rate *ieee_rates;
1036 struct kmem_cache *tx_cmd_pool;
1216 struct iwl_cfg *cfg; 1037 struct iwl_cfg *cfg;
1217 1038
1218 enum ieee80211_band band; 1039 enum ieee80211_band band;
@@ -1238,6 +1059,9 @@ struct iwl_priv {
1238 /* jiffies when last recovery from statistics was performed */ 1059 /* jiffies when last recovery from statistics was performed */
1239 unsigned long rx_statistics_jiffies; 1060 unsigned long rx_statistics_jiffies;
1240 1061
1062 /*counters */
1063 u32 rx_handlers_stats[REPLY_MAX];
1064
1241 /* force reset */ 1065 /* force reset */
1242 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; 1066 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
1243 1067
@@ -1268,21 +1092,12 @@ struct iwl_priv {
1268 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1092 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
1269 u8 mgmt_tx_ant; 1093 u8 mgmt_tx_ant;
1270 1094
1271 /* spinlock */ 1095 /*TODO: remove these pointers - use bus(priv) instead */
1272 spinlock_t lock; /* protect general shared data */
1273 spinlock_t hcmd_lock; /* protect hcmd */
1274 spinlock_t reg_lock; /* protect hw register access */
1275 struct mutex mutex;
1276
1277 struct iwl_bus *bus; /* bus specific data */ 1096 struct iwl_bus *bus; /* bus specific data */
1278 struct iwl_trans trans;
1279 1097
1280 /* microcode/device supports multiple contexts */ 1098 /* microcode/device supports multiple contexts */
1281 u8 valid_contexts; 1099 u8 valid_contexts;
1282 1100
1283 /* command queue number */
1284 u8 cmd_queue;
1285
1286 /* max number of station keys */ 1101 /* max number of station keys */
1287 u8 sta_key_max_num; 1102 u8 sta_key_max_num;
1288 1103
@@ -1296,9 +1111,6 @@ struct iwl_priv {
1296 u32 ucode_ver; /* version of ucode, copy of 1111 u32 ucode_ver; /* version of ucode, copy of
1297 iwl_ucode.ver */ 1112 iwl_ucode.ver */
1298 1113
1299 /* uCode owner: default: IWL_OWNERSHIP_DRIVER */
1300 u8 ucode_owner;
1301
1302 struct fw_img ucode_rt; 1114 struct fw_img ucode_rt;
1303 struct fw_img ucode_init; 1115 struct fw_img ucode_init;
1304 struct fw_img ucode_wowlan; 1116 struct fw_img ucode_wowlan;
@@ -1334,48 +1146,21 @@ struct iwl_priv {
1334 1146
1335 int activity_timer_active; 1147 int activity_timer_active;
1336 1148
1337 /* Rx and Tx DMA processing queues */
1338 struct iwl_rx_queue rxq;
1339 struct iwl_tx_queue *txq;
1340 unsigned long txq_ctx_active_msk;
1341 struct iwl_dma_ptr kw; /* keep warm address */
1342 struct iwl_dma_ptr scd_bc_tbls;
1343
1344 u32 scd_base_addr; /* scheduler sram base address */
1345
1346 unsigned long status;
1347
1348 /* counts mgmt, ctl, and data packets */ 1149 /* counts mgmt, ctl, and data packets */
1349 struct traffic_stats tx_stats; 1150 struct traffic_stats tx_stats;
1350 struct traffic_stats rx_stats; 1151 struct traffic_stats rx_stats;
1351 1152
1352 /* counts interrupts */
1353 struct isr_statistics isr_stats;
1354
1355 struct iwl_power_mgr power_data; 1153 struct iwl_power_mgr power_data;
1356 struct iwl_tt_mgmt thermal_throttle; 1154 struct iwl_tt_mgmt thermal_throttle;
1357 1155
1358 /* station table variables */ 1156 /* station table variables */
1359
1360 /* Note: if lock and sta_lock are needed, lock must be acquired first */
1361 spinlock_t sta_lock;
1362 int num_stations; 1157 int num_stations;
1363 struct iwl_station_entry stations[IWLAGN_STATION_COUNT]; 1158 struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
1364 unsigned long ucode_key_table; 1159 unsigned long ucode_key_table;
1365 1160
1366 /* queue refcounts */
1367#define IWL_MAX_HW_QUEUES 32
1368 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
1369 /* for each AC */
1370 atomic_t queue_stop_count[4];
1371
1372 /* Indication if ieee80211_ops->open has been called */ 1161 /* Indication if ieee80211_ops->open has been called */
1373 u8 is_open; 1162 u8 is_open;
1374 1163
1375 u8 mac80211_registered;
1376
1377 bool wowlan;
1378
1379 /* eeprom -- this is in the card's little endian byte order */ 1164 /* eeprom -- this is in the card's little endian byte order */
1380 u8 *eeprom; 1165 u8 *eeprom;
1381 int nvm_device_type; 1166 int nvm_device_type;
@@ -1411,14 +1196,6 @@ struct iwl_priv {
1411 } accum_stats, delta_stats, max_delta_stats; 1196 } accum_stats, delta_stats, max_delta_stats;
1412#endif 1197#endif
1413 1198
1414 /* INT ICT Table */
1415 __le32 *ict_tbl;
1416 void *ict_tbl_vir;
1417 dma_addr_t ict_tbl_dma;
1418 dma_addr_t aligned_ict_tbl_dma;
1419 int ict_index;
1420 u32 inta;
1421 bool use_ict;
1422 /* 1199 /*
1423 * reporting the number of tids has AGG on. 0 means 1200 * reporting the number of tids has AGG on. 0 means
1424 * no AGGREGATION 1201 * no AGGREGATION
@@ -1475,15 +1252,8 @@ struct iwl_priv {
1475 struct iwl_rxon_context *cur_rssi_ctx; 1252 struct iwl_rxon_context *cur_rssi_ctx;
1476 bool bt_is_sco; 1253 bool bt_is_sco;
1477 1254
1478 struct iwl_hw_params hw_params;
1479
1480 u32 inta_mask;
1481
1482 struct workqueue_struct *workqueue;
1483
1484 struct work_struct restart; 1255 struct work_struct restart;
1485 struct work_struct scan_completed; 1256 struct work_struct scan_completed;
1486 struct work_struct rx_replenish;
1487 struct work_struct abort_scan; 1257 struct work_struct abort_scan;
1488 1258
1489 struct work_struct beacon_update; 1259 struct work_struct beacon_update;
@@ -1499,8 +1269,6 @@ struct iwl_priv {
1499 struct work_struct bt_full_concurrency; 1269 struct work_struct bt_full_concurrency;
1500 struct work_struct bt_runtime_config; 1270 struct work_struct bt_runtime_config;
1501 1271
1502 struct tasklet_struct irq_tasklet;
1503
1504 struct delayed_work scan_check; 1272 struct delayed_work scan_check;
1505 1273
1506 /* TX Power */ 1274 /* TX Power */
@@ -1509,12 +1277,6 @@ struct iwl_priv {
1509 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */ 1277 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
1510 s8 tx_power_next; 1278 s8 tx_power_next;
1511 1279
1512
1513#ifdef CONFIG_IWLWIFI_DEBUG
1514 /* debugging info */
1515 u32 debug_level; /* per device debugging will override global
1516 iwl_debug_level if set */
1517#endif /* CONFIG_IWLWIFI_DEBUG */
1518#ifdef CONFIG_IWLWIFI_DEBUGFS 1280#ifdef CONFIG_IWLWIFI_DEBUGFS
1519 /* debugfs */ 1281 /* debugfs */
1520 u16 tx_traffic_idx; 1282 u16 tx_traffic_idx;
@@ -1552,47 +1314,7 @@ struct iwl_priv {
1552 bool have_rekey_data; 1314 bool have_rekey_data;
1553}; /*iwl_priv */ 1315}; /*iwl_priv */
1554 1316
1555static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) 1317extern struct iwl_mod_params iwlagn_mod_params;
1556{
1557 set_bit(txq_id, &priv->txq_ctx_active_msk);
1558}
1559
1560static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1561{
1562 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1563}
1564
1565#ifdef CONFIG_IWLWIFI_DEBUG
1566/*
1567 * iwl_get_debug_level: Return active debug level for device
1568 *
1569 * Using sysfs it is possible to set per device debug level. This debug
1570 * level will be used if set, otherwise the global debug level which can be
1571 * set via module parameter is used.
1572 */
1573static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1574{
1575 if (priv->debug_level)
1576 return priv->debug_level;
1577 else
1578 return iwl_debug_level;
1579}
1580#else
1581static inline u32 iwl_get_debug_level(struct iwl_priv *priv)
1582{
1583 return iwl_debug_level;
1584}
1585#endif
1586
1587
1588static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
1589 int txq_id, int idx)
1590{
1591 if (priv->txq[txq_id].txb[idx].skb)
1592 return (struct ieee80211_hdr *)priv->txq[txq_id].
1593 txb[idx].skb->data;
1594 return NULL;
1595}
1596 1318
1597static inline struct iwl_rxon_context * 1319static inline struct iwl_rxon_context *
1598iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) 1320iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
@@ -1659,13 +1381,4 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1659 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1381 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1660} 1382}
1661 1383
1662static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
1663{
1664 __free_pages(page, priv->hw_params.rx_page_order);
1665}
1666
1667static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
1668{
1669 free_pages(page, priv->hw_params.rx_page_order);
1670}
1671#endif /* __iwl_dev_h__ */ 1384#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 2c84ba95afca..8a51c5ccda1e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,6 +29,8 @@
29 29
30#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
31 31
32struct iwl_priv;
33
32#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__) 34#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
33#undef TRACE_EVENT 35#undef TRACE_EVENT
34#define TRACE_EVENT(name, proto, ...) \ 36#define TRACE_EVENT(name, proto, ...) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 19d31a5e32e5..80ee65be9cd1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -155,11 +155,11 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
155 155
156 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { 156 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
157 /* Request semaphore */ 157 /* Request semaphore */
158 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 158 iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
159 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); 159 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
160 160
161 /* See if we got it */ 161 /* See if we got it */
162 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 162 ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
163 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 163 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
164 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 164 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
165 EEPROM_SEM_TIMEOUT); 165 EEPROM_SEM_TIMEOUT);
@@ -176,14 +176,14 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
176 176
177static void iwl_eeprom_release_semaphore(struct iwl_priv *priv) 177static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
178{ 178{
179 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG, 179 iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
180 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); 180 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
181 181
182} 182}
183 183
184static int iwl_eeprom_verify_signature(struct iwl_priv *priv) 184static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
185{ 185{
186 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 186 u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
187 int ret = 0; 187 int ret = 0;
188 188
189 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp); 189 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
@@ -216,17 +216,17 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
216 216
217static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) 217static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
218{ 218{
219 iwl_read32(priv, CSR_OTP_GP_REG); 219 iwl_read32(bus(priv), CSR_OTP_GP_REG);
220 220
221 if (mode == IWL_OTP_ACCESS_ABSOLUTE) 221 if (mode == IWL_OTP_ACCESS_ABSOLUTE)
222 iwl_clear_bit(priv, CSR_OTP_GP_REG, 222 iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
223 CSR_OTP_GP_REG_OTP_ACCESS_MODE); 223 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
224 else 224 else
225 iwl_set_bit(priv, CSR_OTP_GP_REG, 225 iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
226 CSR_OTP_GP_REG_OTP_ACCESS_MODE); 226 CSR_OTP_GP_REG_OTP_ACCESS_MODE);
227} 227}
228 228
229static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) 229static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
230{ 230{
231 u32 otpgp; 231 u32 otpgp;
232 int nvm_type; 232 int nvm_type;
@@ -243,7 +243,7 @@ static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
243 nvm_type = NVM_DEVICE_TYPE_EEPROM; 243 nvm_type = NVM_DEVICE_TYPE_EEPROM;
244 break; 244 break;
245 default: 245 default:
246 otpgp = iwl_read32(priv, CSR_OTP_GP_REG); 246 otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
247 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) 247 if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
248 nvm_type = NVM_DEVICE_TYPE_OTP; 248 nvm_type = NVM_DEVICE_TYPE_OTP;
249 else 249 else
@@ -258,22 +258,22 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
258 int ret; 258 int ret;
259 259
260 /* Enable 40MHz radio clock */ 260 /* Enable 40MHz radio clock */
261 iwl_write32(priv, CSR_GP_CNTRL, 261 iwl_write32(bus(priv), CSR_GP_CNTRL,
262 iwl_read32(priv, CSR_GP_CNTRL) | 262 iwl_read32(bus(priv), CSR_GP_CNTRL) |
263 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 263 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
264 264
265 /* wait for clock to be ready */ 265 /* wait for clock to be ready */
266 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 266 ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
267 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 267 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
268 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 268 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
269 25000); 269 25000);
270 if (ret < 0) 270 if (ret < 0)
271 IWL_ERR(priv, "Time out access OTP\n"); 271 IWL_ERR(priv, "Time out access OTP\n");
272 else { 272 else {
273 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, 273 iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
274 APMG_PS_CTRL_VAL_RESET_REQ); 274 APMG_PS_CTRL_VAL_RESET_REQ);
275 udelay(5); 275 udelay(5);
276 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, 276 iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
277 APMG_PS_CTRL_VAL_RESET_REQ); 277 APMG_PS_CTRL_VAL_RESET_REQ);
278 278
279 /* 279 /*
@@ -281,7 +281,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
281 * this is only applicable for HW with OTP shadow RAM 281 * this is only applicable for HW with OTP shadow RAM
282 */ 282 */
283 if (priv->cfg->base_params->shadow_ram_support) 283 if (priv->cfg->base_params->shadow_ram_support)
284 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG, 284 iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
285 CSR_RESET_LINK_PWR_MGMT_DISABLED); 285 CSR_RESET_LINK_PWR_MGMT_DISABLED);
286 } 286 }
287 return ret; 287 return ret;
@@ -293,9 +293,9 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
293 u32 r; 293 u32 r;
294 u32 otpgp; 294 u32 otpgp;
295 295
296 iwl_write32(priv, CSR_EEPROM_REG, 296 iwl_write32(bus(priv), CSR_EEPROM_REG,
297 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 297 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
298 ret = iwl_poll_bit(priv, CSR_EEPROM_REG, 298 ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
299 CSR_EEPROM_REG_READ_VALID_MSK, 299 CSR_EEPROM_REG_READ_VALID_MSK,
300 CSR_EEPROM_REG_READ_VALID_MSK, 300 CSR_EEPROM_REG_READ_VALID_MSK,
301 IWL_EEPROM_ACCESS_TIMEOUT); 301 IWL_EEPROM_ACCESS_TIMEOUT);
@@ -303,13 +303,13 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
303 IWL_ERR(priv, "Time out reading OTP[%d]\n", addr); 303 IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
304 return ret; 304 return ret;
305 } 305 }
306 r = iwl_read32(priv, CSR_EEPROM_REG); 306 r = iwl_read32(bus(priv), CSR_EEPROM_REG);
307 /* check for ECC errors: */ 307 /* check for ECC errors: */
308 otpgp = iwl_read32(priv, CSR_OTP_GP_REG); 308 otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
309 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { 309 if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
310 /* stop in this case */ 310 /* stop in this case */
311 /* set the uncorrectable OTP ECC bit for acknowledgement */ 311 /* set the uncorrectable OTP ECC bit for acknowledgement */
312 iwl_set_bit(priv, CSR_OTP_GP_REG, 312 iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
313 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); 313 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
314 IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n"); 314 IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
315 return -EINVAL; 315 return -EINVAL;
@@ -317,7 +317,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
317 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { 317 if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
318 /* continue in this case */ 318 /* continue in this case */
319 /* set the correctable OTP ECC bit for acknowledgement */ 319 /* set the correctable OTP ECC bit for acknowledgement */
320 iwl_set_bit(priv, CSR_OTP_GP_REG, 320 iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
321 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); 321 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
322 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n"); 322 IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
323 } 323 }
@@ -424,14 +424,14 @@ u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
424int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev) 424int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
425{ 425{
426 __le16 *e; 426 __le16 *e;
427 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 427 u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
428 int sz; 428 int sz;
429 int ret; 429 int ret;
430 u16 addr; 430 u16 addr;
431 u16 validblockaddr = 0; 431 u16 validblockaddr = 0;
432 u16 cache_addr = 0; 432 u16 cache_addr = 0;
433 433
434 priv->nvm_device_type = iwlcore_get_nvm_type(priv, hw_rev); 434 priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
435 if (priv->nvm_device_type == -ENOENT) 435 if (priv->nvm_device_type == -ENOENT)
436 return -ENOENT; 436 return -ENOENT;
437 /* allocate eeprom */ 437 /* allocate eeprom */
@@ -469,11 +469,11 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
469 ret = -ENOENT; 469 ret = -ENOENT;
470 goto done; 470 goto done;
471 } 471 }
472 iwl_write32(priv, CSR_EEPROM_GP, 472 iwl_write32(bus(priv), CSR_EEPROM_GP,
473 iwl_read32(priv, CSR_EEPROM_GP) & 473 iwl_read32(bus(priv), CSR_EEPROM_GP) &
474 ~CSR_EEPROM_GP_IF_OWNER_MSK); 474 ~CSR_EEPROM_GP_IF_OWNER_MSK);
475 475
476 iwl_set_bit(priv, CSR_OTP_GP_REG, 476 iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
477 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | 477 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
478 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); 478 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
479 /* traversing the linked list if no shadow ram supported */ 479 /* traversing the linked list if no shadow ram supported */
@@ -498,10 +498,10 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
498 for (addr = 0; addr < sz; addr += sizeof(u16)) { 498 for (addr = 0; addr < sz; addr += sizeof(u16)) {
499 u32 r; 499 u32 r;
500 500
501 iwl_write32(priv, CSR_EEPROM_REG, 501 iwl_write32(bus(priv), CSR_EEPROM_REG,
502 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 502 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
503 503
504 ret = iwl_poll_bit(priv, CSR_EEPROM_REG, 504 ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
505 CSR_EEPROM_REG_READ_VALID_MSK, 505 CSR_EEPROM_REG_READ_VALID_MSK,
506 CSR_EEPROM_REG_READ_VALID_MSK, 506 CSR_EEPROM_REG_READ_VALID_MSK,
507 IWL_EEPROM_ACCESS_TIMEOUT); 507 IWL_EEPROM_ACCESS_TIMEOUT);
@@ -509,7 +509,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
509 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr); 509 IWL_ERR(priv, "Time out reading EEPROM[%d]\n", addr);
510 goto done; 510 goto done;
511 } 511 }
512 r = iwl_read32(priv, CSR_EEPROM_REG); 512 r = iwl_read32(bus(priv), CSR_EEPROM_REG);
513 e[addr / 2] = cpu_to_le16(r >> 16); 513 e[addr / 2] = cpu_to_le16(r >> 16);
514 } 514 }
515 } 515 }
@@ -838,7 +838,7 @@ void iwl_rf_config(struct iwl_priv *priv)
838 838
839 /* write radio config values to register */ 839 /* write radio config values to register */
840 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { 840 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
841 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 841 iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
842 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 842 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
843 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 843 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
844 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 844 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -850,7 +850,7 @@ void iwl_rf_config(struct iwl_priv *priv)
850 WARN_ON(1); 850 WARN_ON(1);
851 851
852 /* set CSR_HW_CONFIG_REG for uCode use */ 852 /* set CSR_HW_CONFIG_REG for uCode use */
853 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 853 iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
854 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 854 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
855 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 855 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
856} 856}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index e4bf8ac5e64e..e2b5e0ea5d9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -301,7 +301,6 @@ void iwl_eeprom_free(struct iwl_priv *priv);
301int iwl_eeprom_check_version(struct iwl_priv *priv); 301int iwl_eeprom_check_version(struct iwl_priv *priv);
302int iwl_eeprom_check_sku(struct iwl_priv *priv); 302int iwl_eeprom_check_sku(struct iwl_priv *priv);
303const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 303const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
304int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
305u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); 304u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
306int iwl_init_channel_map(struct iwl_priv *priv); 305int iwl_init_channel_map(struct iwl_priv *priv);
307void iwl_free_channel_map(struct iwl_priv *priv); 306void iwl_free_channel_map(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 0ad60b3c04db..5bede9d7f955 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -63,6 +63,8 @@
63#ifndef __iwl_fh_h__ 63#ifndef __iwl_fh_h__
64#define __iwl_fh_h__ 64#define __iwl_fh_h__
65 65
66#include <linux/types.h>
67
66/****************************/ 68/****************************/
67/* Flow Handler Definitions */ 69/* Flow Handler Definitions */
68/****************************/ 70/****************************/
@@ -266,8 +268,6 @@
266#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) 268#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
267#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) 269#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
268 270
269#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
270
271/** 271/**
272 * Rx Shared Status Registers (RSSR) 272 * Rx Shared Status Registers (RSSR)
273 * 273 *
@@ -422,10 +422,6 @@
422#define RX_FREE_BUFFERS 64 422#define RX_FREE_BUFFERS 64
423#define RX_LOW_WATERMARK 8 423#define RX_LOW_WATERMARK 8
424 424
425/* Size of one Rx buffer in host DRAM */
426#define IWL_RX_BUF_SIZE_4K (4 * 1024)
427#define IWL_RX_BUF_SIZE_8K (8 * 1024)
428
429/** 425/**
430 * struct iwl_rb_status - reseve buffer status 426 * struct iwl_rb_status - reseve buffer status
431 * host memory mapped FH registers 427 * host memory mapped FH registers
@@ -508,4 +504,16 @@ struct iwl_tfd {
508/* Keep Warm Size */ 504/* Keep Warm Size */
509#define IWL_KW_SIZE 0x1000 /* 4k */ 505#define IWL_KW_SIZE 0x1000 /* 4k */
510 506
507/* Fixed (non-configurable) rx data from phy */
508
509/**
510 * struct iwlagn_schedq_bc_tbl scheduler byte count table
511 * base physical address provided by SCD_DRAM_BASE_ADDR
512 * @tfd_offset 0-12 - tx command byte count
513 * 12-16 - station index
514 */
515struct iwlagn_scd_bc_tbl {
516 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
517} __packed;
518
511#endif /* !__iwl_fh_h__ */ 519#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 9d91552d13c1..d3feac9e45b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -64,99 +64,10 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
64 return --index & (n_bd - 1); 64 return --index & (n_bd - 1);
65} 65}
66 66
67/*
68 * we have 8 bits used like this:
69 *
70 * 7 6 5 4 3 2 1 0
71 * | | | | | | | |
72 * | | | | | | +-+-------- AC queue (0-3)
73 * | | | | | |
74 * | +-+-+-+-+------------ HW queue ID
75 * |
76 * +---------------------- unused
77 */
78static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
79{
80 BUG_ON(ac > 3); /* only have 2 bits */
81 BUG_ON(hwq > 31); /* only use 5 bits */
82
83 txq->swq_id = (hwq << 2) | ac;
84}
85
86static inline void iwl_wake_queue(struct iwl_priv *priv,
87 struct iwl_tx_queue *txq)
88{
89 u8 queue = txq->swq_id;
90 u8 ac = queue & 3;
91 u8 hwq = (queue >> 2) & 0x1f;
92
93 if (test_and_clear_bit(hwq, priv->queue_stopped))
94 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
95 ieee80211_wake_queue(priv->hw, ac);
96}
97
98static inline void iwl_stop_queue(struct iwl_priv *priv,
99 struct iwl_tx_queue *txq)
100{
101 u8 queue = txq->swq_id;
102 u8 ac = queue & 3;
103 u8 hwq = (queue >> 2) & 0x1f;
104
105 if (!test_and_set_bit(hwq, priv->queue_stopped))
106 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
107 ieee80211_stop_queue(priv->hw, ac);
108}
109
110static inline void iwl_wake_any_queue(struct iwl_priv *priv,
111 struct iwl_rxon_context *ctx)
112{
113 u8 ac;
114
115 for (ac = 0; ac < AC_NUM; ac++) {
116 IWL_DEBUG_INFO(priv, "Queue Status: Q[%d] %s\n",
117 ac, (atomic_read(&priv->queue_stop_count[ac]) > 0)
118 ? "stopped" : "awake");
119 iwl_wake_queue(priv, &priv->txq[ctx->ac_to_queue[ac]]);
120 }
121}
122
123#ifdef ieee80211_stop_queue
124#undef ieee80211_stop_queue
125#endif
126
127#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
128
129#ifdef ieee80211_wake_queue
130#undef ieee80211_wake_queue
131#endif
132
133#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
134
135static inline void iwl_disable_interrupts(struct iwl_priv *priv)
136{
137 clear_bit(STATUS_INT_ENABLED, &priv->status);
138
139 /* disable interrupts from uCode/NIC to host */
140 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
141
142 /* acknowledge/clear/reset any interrupts still pending
143 * from uCode or flow handler (Rx/Tx DMA) */
144 iwl_write32(priv, CSR_INT, 0xffffffff);
145 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
146 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
147}
148
149static inline void iwl_enable_rfkill_int(struct iwl_priv *priv) 67static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
150{ 68{
151 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n"); 69 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
152 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); 70 iwl_write32(bus(priv), CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
153}
154
155static inline void iwl_enable_interrupts(struct iwl_priv *priv)
156{
157 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
158 set_bit(STATUS_INT_ENABLED, &priv->status);
159 iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
160} 71}
161 72
162/** 73/**
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index aa4a90674452..3ffa8e62b856 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -25,46 +25,50 @@
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/delay.h>
29#include <linux/device.h>
28 30
29#include "iwl-io.h" 31#include "iwl-io.h"
32#include"iwl-csr.h"
33#include "iwl-debug.h"
30 34
31#define IWL_POLL_INTERVAL 10 /* microseconds */ 35#define IWL_POLL_INTERVAL 10 /* microseconds */
32 36
33static inline void __iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) 37static inline void __iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
34{ 38{
35 iwl_write32(priv, reg, iwl_read32(priv, reg) | mask); 39 iwl_write32(bus, reg, iwl_read32(bus, reg) | mask);
36} 40}
37 41
38static inline void __iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) 42static inline void __iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
39{ 43{
40 iwl_write32(priv, reg, iwl_read32(priv, reg) & ~mask); 44 iwl_write32(bus, reg, iwl_read32(bus, reg) & ~mask);
41} 45}
42 46
43void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask) 47void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask)
44{ 48{
45 unsigned long flags; 49 unsigned long flags;
46 50
47 spin_lock_irqsave(&priv->reg_lock, flags); 51 spin_lock_irqsave(&bus->reg_lock, flags);
48 __iwl_set_bit(priv, reg, mask); 52 __iwl_set_bit(bus, reg, mask);
49 spin_unlock_irqrestore(&priv->reg_lock, flags); 53 spin_unlock_irqrestore(&bus->reg_lock, flags);
50} 54}
51 55
52void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask) 56void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask)
53{ 57{
54 unsigned long flags; 58 unsigned long flags;
55 59
56 spin_lock_irqsave(&priv->reg_lock, flags); 60 spin_lock_irqsave(&bus->reg_lock, flags);
57 __iwl_clear_bit(priv, reg, mask); 61 __iwl_clear_bit(bus, reg, mask);
58 spin_unlock_irqrestore(&priv->reg_lock, flags); 62 spin_unlock_irqrestore(&bus->reg_lock, flags);
59} 63}
60 64
61int iwl_poll_bit(struct iwl_priv *priv, u32 addr, 65int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
62 u32 bits, u32 mask, int timeout) 66 u32 bits, u32 mask, int timeout)
63{ 67{
64 int t = 0; 68 int t = 0;
65 69
66 do { 70 do {
67 if ((iwl_read32(priv, addr) & mask) == (bits & mask)) 71 if ((iwl_read32(bus, addr) & mask) == (bits & mask))
68 return t; 72 return t;
69 udelay(IWL_POLL_INTERVAL); 73 udelay(IWL_POLL_INTERVAL);
70 t += IWL_POLL_INTERVAL; 74 t += IWL_POLL_INTERVAL;
@@ -73,14 +77,14 @@ int iwl_poll_bit(struct iwl_priv *priv, u32 addr,
73 return -ETIMEDOUT; 77 return -ETIMEDOUT;
74} 78}
75 79
76int iwl_grab_nic_access_silent(struct iwl_priv *priv) 80int iwl_grab_nic_access_silent(struct iwl_bus *bus)
77{ 81{
78 int ret; 82 int ret;
79 83
80 lockdep_assert_held(&priv->reg_lock); 84 lockdep_assert_held(&bus->reg_lock);
81 85
82 /* this bit wakes up the NIC */ 86 /* this bit wakes up the NIC */
83 __iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 87 __iwl_set_bit(bus, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
84 88
85 /* 89 /*
86 * These bits say the device is running, and should keep running for 90 * These bits say the device is running, and should keep running for
@@ -101,70 +105,70 @@ int iwl_grab_nic_access_silent(struct iwl_priv *priv)
101 * 5000 series and later (including 1000 series) have non-volatile SRAM, 105 * 5000 series and later (including 1000 series) have non-volatile SRAM,
102 * and do not save/restore SRAM when power cycling. 106 * and do not save/restore SRAM when power cycling.
103 */ 107 */
104 ret = iwl_poll_bit(priv, CSR_GP_CNTRL, 108 ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
105 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 109 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
106 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 110 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
107 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); 111 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
108 if (ret < 0) { 112 if (ret < 0) {
109 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI); 113 iwl_write32(bus, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
110 return -EIO; 114 return -EIO;
111 } 115 }
112 116
113 return 0; 117 return 0;
114} 118}
115 119
116int iwl_grab_nic_access(struct iwl_priv *priv) 120int iwl_grab_nic_access(struct iwl_bus *bus)
117{ 121{
118 int ret = iwl_grab_nic_access_silent(priv); 122 int ret = iwl_grab_nic_access_silent(bus);
119 if (ret) { 123 if (ret) {
120 u32 val = iwl_read32(priv, CSR_GP_CNTRL); 124 u32 val = iwl_read32(bus, CSR_GP_CNTRL);
121 IWL_ERR(priv, 125 IWL_ERR(bus,
122 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val); 126 "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
123 } 127 }
124 128
125 return ret; 129 return ret;
126} 130}
127 131
128void iwl_release_nic_access(struct iwl_priv *priv) 132void iwl_release_nic_access(struct iwl_bus *bus)
129{ 133{
130 lockdep_assert_held(&priv->reg_lock); 134 lockdep_assert_held(&bus->reg_lock);
131 __iwl_clear_bit(priv, CSR_GP_CNTRL, 135 __iwl_clear_bit(bus, CSR_GP_CNTRL,
132 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 136 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
133} 137}
134 138
135u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg) 139u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
136{ 140{
137 u32 value; 141 u32 value;
138 unsigned long flags; 142 unsigned long flags;
139 143
140 spin_lock_irqsave(&priv->reg_lock, flags); 144 spin_lock_irqsave(&bus->reg_lock, flags);
141 iwl_grab_nic_access(priv); 145 iwl_grab_nic_access(bus);
142 value = iwl_read32(priv, reg); 146 value = iwl_read32(bus(bus), reg);
143 iwl_release_nic_access(priv); 147 iwl_release_nic_access(bus);
144 spin_unlock_irqrestore(&priv->reg_lock, flags); 148 spin_unlock_irqrestore(&bus->reg_lock, flags);
145 149
146 return value; 150 return value;
147} 151}
148 152
149void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value) 153void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value)
150{ 154{
151 unsigned long flags; 155 unsigned long flags;
152 156
153 spin_lock_irqsave(&priv->reg_lock, flags); 157 spin_lock_irqsave(&bus->reg_lock, flags);
154 if (!iwl_grab_nic_access(priv)) { 158 if (!iwl_grab_nic_access(bus)) {
155 iwl_write32(priv, reg, value); 159 iwl_write32(bus, reg, value);
156 iwl_release_nic_access(priv); 160 iwl_release_nic_access(bus);
157 } 161 }
158 spin_unlock_irqrestore(&priv->reg_lock, flags); 162 spin_unlock_irqrestore(&bus->reg_lock, flags);
159} 163}
160 164
161int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask, 165int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
162 int timeout) 166 int timeout)
163{ 167{
164 int t = 0; 168 int t = 0;
165 169
166 do { 170 do {
167 if ((iwl_read_direct32(priv, addr) & mask) == mask) 171 if ((iwl_read_direct32(bus, addr) & mask) == mask)
168 return t; 172 return t;
169 udelay(IWL_POLL_INTERVAL); 173 udelay(IWL_POLL_INTERVAL);
170 t += IWL_POLL_INTERVAL; 174 t += IWL_POLL_INTERVAL;
@@ -173,122 +177,122 @@ int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask,
173 return -ETIMEDOUT; 177 return -ETIMEDOUT;
174} 178}
175 179
176static inline u32 __iwl_read_prph(struct iwl_priv *priv, u32 reg) 180static inline u32 __iwl_read_prph(struct iwl_bus *bus, u32 reg)
177{ 181{
178 iwl_write32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); 182 iwl_write32(bus, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
179 rmb(); 183 rmb();
180 return iwl_read32(priv, HBUS_TARG_PRPH_RDAT); 184 return iwl_read32(bus, HBUS_TARG_PRPH_RDAT);
181} 185}
182 186
183static inline void __iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val) 187static inline void __iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
184{ 188{
185 iwl_write32(priv, HBUS_TARG_PRPH_WADDR, 189 iwl_write32(bus, HBUS_TARG_PRPH_WADDR,
186 ((addr & 0x0000FFFF) | (3 << 24))); 190 ((addr & 0x0000FFFF) | (3 << 24)));
187 wmb(); 191 wmb();
188 iwl_write32(priv, HBUS_TARG_PRPH_WDAT, val); 192 iwl_write32(bus, HBUS_TARG_PRPH_WDAT, val);
189} 193}
190 194
191u32 iwl_read_prph(struct iwl_priv *priv, u32 reg) 195u32 iwl_read_prph(struct iwl_bus *bus, u32 reg)
192{ 196{
193 unsigned long flags; 197 unsigned long flags;
194 u32 val; 198 u32 val;
195 199
196 spin_lock_irqsave(&priv->reg_lock, flags); 200 spin_lock_irqsave(&bus->reg_lock, flags);
197 iwl_grab_nic_access(priv); 201 iwl_grab_nic_access(bus);
198 val = __iwl_read_prph(priv, reg); 202 val = __iwl_read_prph(bus, reg);
199 iwl_release_nic_access(priv); 203 iwl_release_nic_access(bus);
200 spin_unlock_irqrestore(&priv->reg_lock, flags); 204 spin_unlock_irqrestore(&bus->reg_lock, flags);
201 return val; 205 return val;
202} 206}
203 207
204void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val) 208void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val)
205{ 209{
206 unsigned long flags; 210 unsigned long flags;
207 211
208 spin_lock_irqsave(&priv->reg_lock, flags); 212 spin_lock_irqsave(&bus->reg_lock, flags);
209 if (!iwl_grab_nic_access(priv)) { 213 if (!iwl_grab_nic_access(bus)) {
210 __iwl_write_prph(priv, addr, val); 214 __iwl_write_prph(bus, addr, val);
211 iwl_release_nic_access(priv); 215 iwl_release_nic_access(bus);
212 } 216 }
213 spin_unlock_irqrestore(&priv->reg_lock, flags); 217 spin_unlock_irqrestore(&bus->reg_lock, flags);
214} 218}
215 219
216void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) 220void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
217{ 221{
218 unsigned long flags; 222 unsigned long flags;
219 223
220 spin_lock_irqsave(&priv->reg_lock, flags); 224 spin_lock_irqsave(&bus->reg_lock, flags);
221 iwl_grab_nic_access(priv); 225 iwl_grab_nic_access(bus);
222 __iwl_write_prph(priv, reg, __iwl_read_prph(priv, reg) | mask); 226 __iwl_write_prph(bus, reg, __iwl_read_prph(bus, reg) | mask);
223 iwl_release_nic_access(priv); 227 iwl_release_nic_access(bus);
224 spin_unlock_irqrestore(&priv->reg_lock, flags); 228 spin_unlock_irqrestore(&bus->reg_lock, flags);
225} 229}
226 230
227void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, 231void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
228 u32 bits, u32 mask) 232 u32 bits, u32 mask)
229{ 233{
230 unsigned long flags; 234 unsigned long flags;
231 235
232 spin_lock_irqsave(&priv->reg_lock, flags); 236 spin_lock_irqsave(&bus->reg_lock, flags);
233 iwl_grab_nic_access(priv); 237 iwl_grab_nic_access(bus);
234 __iwl_write_prph(priv, reg, 238 __iwl_write_prph(bus, reg,
235 (__iwl_read_prph(priv, reg) & mask) | bits); 239 (__iwl_read_prph(bus, reg) & mask) | bits);
236 iwl_release_nic_access(priv); 240 iwl_release_nic_access(bus);
237 spin_unlock_irqrestore(&priv->reg_lock, flags); 241 spin_unlock_irqrestore(&bus->reg_lock, flags);
238} 242}
239 243
240void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask) 244void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask)
241{ 245{
242 unsigned long flags; 246 unsigned long flags;
243 u32 val; 247 u32 val;
244 248
245 spin_lock_irqsave(&priv->reg_lock, flags); 249 spin_lock_irqsave(&bus->reg_lock, flags);
246 iwl_grab_nic_access(priv); 250 iwl_grab_nic_access(bus);
247 val = __iwl_read_prph(priv, reg); 251 val = __iwl_read_prph(bus, reg);
248 __iwl_write_prph(priv, reg, (val & ~mask)); 252 __iwl_write_prph(bus, reg, (val & ~mask));
249 iwl_release_nic_access(priv); 253 iwl_release_nic_access(bus);
250 spin_unlock_irqrestore(&priv->reg_lock, flags); 254 spin_unlock_irqrestore(&bus->reg_lock, flags);
251} 255}
252 256
253void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr, 257void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
254 void *buf, int words) 258 void *buf, int words)
255{ 259{
256 unsigned long flags; 260 unsigned long flags;
257 int offs; 261 int offs;
258 u32 *vals = buf; 262 u32 *vals = buf;
259 263
260 spin_lock_irqsave(&priv->reg_lock, flags); 264 spin_lock_irqsave(&bus->reg_lock, flags);
261 iwl_grab_nic_access(priv); 265 iwl_grab_nic_access(bus);
262 266
263 iwl_write32(priv, HBUS_TARG_MEM_RADDR, addr); 267 iwl_write32(bus, HBUS_TARG_MEM_RADDR, addr);
264 rmb(); 268 rmb();
265 269
266 for (offs = 0; offs < words; offs++) 270 for (offs = 0; offs < words; offs++)
267 vals[offs] = iwl_read32(priv, HBUS_TARG_MEM_RDAT); 271 vals[offs] = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
268 272
269 iwl_release_nic_access(priv); 273 iwl_release_nic_access(bus);
270 spin_unlock_irqrestore(&priv->reg_lock, flags); 274 spin_unlock_irqrestore(&bus->reg_lock, flags);
271} 275}
272 276
273u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr) 277u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
274{ 278{
275 u32 value; 279 u32 value;
276 280
277 _iwl_read_targ_mem_words(priv, addr, &value, 1); 281 _iwl_read_targ_mem_words(bus, addr, &value, 1);
278 282
279 return value; 283 return value;
280} 284}
281 285
282void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val) 286void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
283{ 287{
284 unsigned long flags; 288 unsigned long flags;
285 289
286 spin_lock_irqsave(&priv->reg_lock, flags); 290 spin_lock_irqsave(&bus->reg_lock, flags);
287 if (!iwl_grab_nic_access(priv)) { 291 if (!iwl_grab_nic_access(bus)) {
288 iwl_write32(priv, HBUS_TARG_MEM_WADDR, addr); 292 iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
289 wmb(); 293 wmb();
290 iwl_write32(priv, HBUS_TARG_MEM_WDAT, val); 294 iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
291 iwl_release_nic_access(priv); 295 iwl_release_nic_access(bus);
292 } 296 }
293 spin_unlock_irqrestore(&priv->reg_lock, flags); 297 spin_unlock_irqrestore(&bus->reg_lock, flags);
294} 298}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 19a093101122..ced2cbeb6eae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -29,65 +29,62 @@
29#ifndef __iwl_io_h__ 29#ifndef __iwl_io_h__
30#define __iwl_io_h__ 30#define __iwl_io_h__
31 31
32#include <linux/io.h>
33
34#include "iwl-dev.h"
35#include "iwl-debug.h"
36#include "iwl-devtrace.h" 32#include "iwl-devtrace.h"
33#include "iwl-shared.h"
37#include "iwl-bus.h" 34#include "iwl-bus.h"
38 35
39static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val) 36static inline void iwl_write8(struct iwl_bus *bus, u32 ofs, u8 val)
40{ 37{
41 trace_iwlwifi_dev_iowrite8(priv, ofs, val); 38 trace_iwlwifi_dev_iowrite8(priv(bus), ofs, val);
42 bus_write8(priv->bus, ofs, val); 39 bus_write8(bus, ofs, val);
43} 40}
44 41
45static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val) 42static inline void iwl_write32(struct iwl_bus *bus, u32 ofs, u32 val)
46{ 43{
47 trace_iwlwifi_dev_iowrite32(priv, ofs, val); 44 trace_iwlwifi_dev_iowrite32(priv(bus), ofs, val);
48 bus_write32(priv->bus, ofs, val); 45 bus_write32(bus, ofs, val);
49} 46}
50 47
51static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs) 48static inline u32 iwl_read32(struct iwl_bus *bus, u32 ofs)
52{ 49{
53 u32 val = bus_read32(priv->bus, ofs); 50 u32 val = bus_read32(bus, ofs);
54 trace_iwlwifi_dev_ioread32(priv, ofs, val); 51 trace_iwlwifi_dev_ioread32(priv(bus), ofs, val);
55 return val; 52 return val;
56} 53}
57 54
58void iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask); 55void iwl_set_bit(struct iwl_bus *bus, u32 reg, u32 mask);
59void iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask); 56void iwl_clear_bit(struct iwl_bus *bus, u32 reg, u32 mask);
60 57
61int iwl_poll_bit(struct iwl_priv *priv, u32 addr, 58int iwl_poll_bit(struct iwl_bus *bus, u32 addr,
62 u32 bits, u32 mask, int timeout); 59 u32 bits, u32 mask, int timeout);
63int iwl_poll_direct_bit(struct iwl_priv *priv, u32 addr, u32 mask, 60int iwl_poll_direct_bit(struct iwl_bus *bus, u32 addr, u32 mask,
64 int timeout); 61 int timeout);
65 62
66int iwl_grab_nic_access_silent(struct iwl_priv *priv); 63int iwl_grab_nic_access_silent(struct iwl_bus *bus);
67int iwl_grab_nic_access(struct iwl_priv *priv); 64int iwl_grab_nic_access(struct iwl_bus *bus);
68void iwl_release_nic_access(struct iwl_priv *priv); 65void iwl_release_nic_access(struct iwl_bus *bus);
69 66
70u32 iwl_read_direct32(struct iwl_priv *priv, u32 reg); 67u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg);
71void iwl_write_direct32(struct iwl_priv *priv, u32 reg, u32 value); 68void iwl_write_direct32(struct iwl_bus *bus, u32 reg, u32 value);
72 69
73 70
74u32 iwl_read_prph(struct iwl_priv *priv, u32 reg); 71u32 iwl_read_prph(struct iwl_bus *bus, u32 reg);
75void iwl_write_prph(struct iwl_priv *priv, u32 addr, u32 val); 72void iwl_write_prph(struct iwl_bus *bus, u32 addr, u32 val);
76void iwl_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask); 73void iwl_set_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
77void iwl_set_bits_mask_prph(struct iwl_priv *priv, u32 reg, 74void iwl_set_bits_mask_prph(struct iwl_bus *bus, u32 reg,
78 u32 bits, u32 mask); 75 u32 bits, u32 mask);
79void iwl_clear_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask); 76void iwl_clear_bits_prph(struct iwl_bus *bus, u32 reg, u32 mask);
80 77
81void _iwl_read_targ_mem_words(struct iwl_priv *priv, u32 addr, 78void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
82 void *buf, int words); 79 void *buf, int words);
83 80
84#define iwl_read_targ_mem_words(priv, addr, buf, bufsize) \ 81#define iwl_read_targ_mem_words(bus, addr, buf, bufsize) \
85 do { \ 82 do { \
86 BUILD_BUG_ON((bufsize) % sizeof(u32)); \ 83 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
87 _iwl_read_targ_mem_words(priv, addr, buf, \ 84 _iwl_read_targ_mem_words(bus, addr, buf, \
88 (bufsize) / sizeof(u32));\ 85 (bufsize) / sizeof(u32));\
89 } while (0) 86 } while (0)
90 87
91u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr); 88u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
92void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val); 89void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
93#endif 90#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 1a5252d8ca73..7dffed186f0a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -40,6 +40,7 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-io.h" 41#include "iwl-io.h"
42#include "iwl-trans.h" 42#include "iwl-trans.h"
43#include "iwl-shared.h"
43 44
44/* Throughput OFF time(ms) ON time (ms) 45/* Throughput OFF time(ms) ON time (ms)
45 * >300 25 25 46 * >300 25 25
@@ -70,7 +71,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
70/* Set led register off */ 71/* Set led register off */
71void iwlagn_led_enable(struct iwl_priv *priv) 72void iwlagn_led_enable(struct iwl_priv *priv)
72{ 73{
73 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON); 74 iwl_write32(bus(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON);
74} 75}
75 76
76/* 77/*
@@ -107,11 +108,11 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
107 }; 108 };
108 u32 reg; 109 u32 reg;
109 110
110 reg = iwl_read32(priv, CSR_LED_REG); 111 reg = iwl_read32(bus(priv), CSR_LED_REG);
111 if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) 112 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
112 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK); 113 iwl_write32(bus(priv), CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
113 114
114 return trans_send_cmd(&priv->trans, &cmd); 115 return iwl_trans_send_cmd(trans(priv), &cmd);
115} 116}
116 117
117/* Set led pattern command */ 118/* Set led pattern command */
@@ -125,7 +126,7 @@ static int iwl_led_cmd(struct iwl_priv *priv,
125 }; 126 };
126 int ret; 127 int ret;
127 128
128 if (!test_bit(STATUS_READY, &priv->status)) 129 if (!test_bit(STATUS_READY, &priv->shrd->status))
129 return -EBUSY; 130 return -EBUSY;
130 131
131 if (priv->blink_on == on && priv->blink_off == off) 132 if (priv->blink_on == on && priv->blink_off == off)
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 69d4ec467dca..e41f53e5c307 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -64,9 +64,11 @@
64#include <linux/pci-aspm.h> 64#include <linux/pci-aspm.h>
65 65
66#include "iwl-bus.h" 66#include "iwl-bus.h"
67#include "iwl-agn.h"
68#include "iwl-core.h"
69#include "iwl-io.h" 67#include "iwl-io.h"
68#include "iwl-shared.h"
69#include "iwl-trans.h"
70#include "iwl-csr.h"
71#include "iwl-pci.h"
70 72
71/* PCI registers */ 73/* PCI registers */
72#define PCI_CFG_RETRY_TIMEOUT 0x041 74#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -91,6 +93,7 @@ static u16 iwl_pciexp_link_ctrl(struct iwl_bus *bus)
91{ 93{
92 int pos; 94 int pos;
93 u16 pci_lnk_ctl; 95 u16 pci_lnk_ctl;
96
94 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus); 97 struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
95 98
96 pos = pci_pcie_cap(pci_dev); 99 pos = pci_pcie_cap(pci_dev);
@@ -120,21 +123,21 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
120 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == 123 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
121 PCI_CFG_LINK_CTRL_VAL_L1_EN) { 124 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
122 /* L1-ASPM enabled; disable(!) L0S */ 125 /* L1-ASPM enabled; disable(!) L0S */
123 iwl_set_bit(bus->drv_data, CSR_GIO_REG, 126 iwl_set_bit(bus, CSR_GIO_REG,
124 CSR_GIO_REG_VAL_L0S_ENABLED); 127 CSR_GIO_REG_VAL_L0S_ENABLED);
125 dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n"); 128 dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
126 } else { 129 } else {
127 /* L1-ASPM disabled; enable(!) L0S */ 130 /* L1-ASPM disabled; enable(!) L0S */
128 iwl_clear_bit(bus->drv_data, CSR_GIO_REG, 131 iwl_clear_bit(bus, CSR_GIO_REG,
129 CSR_GIO_REG_VAL_L0S_ENABLED); 132 CSR_GIO_REG_VAL_L0S_ENABLED);
130 dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n"); 133 dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
131 } 134 }
132} 135}
133 136
134static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data) 137static void iwl_pci_set_drv_data(struct iwl_bus *bus, struct iwl_shared *shrd)
135{ 138{
136 bus->drv_data = drv_data; 139 bus->shrd = shrd;
137 pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_data); 140 pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), shrd);
138} 141}
139 142
140static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[], 143static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
@@ -162,7 +165,7 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
162 return val; 165 return val;
163} 166}
164 167
165static struct iwl_bus_ops pci_ops = { 168static const struct iwl_bus_ops bus_ops_pci = {
166 .get_pm_support = iwl_pci_is_pm_supported, 169 .get_pm_support = iwl_pci_is_pm_supported,
167 .apm_config = iwl_pci_apm_config, 170 .apm_config = iwl_pci_apm_config,
168 .set_drv_data = iwl_pci_set_drv_data, 171 .set_drv_data = iwl_pci_set_drv_data,
@@ -256,6 +259,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
256 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)}, 259 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
257 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)}, 260 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
258 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)}, 261 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
262 {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
259 263
260/* 6x30 Series */ 264/* 6x30 Series */
261 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)}, 265 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -328,6 +332,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
328 {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)}, 332 {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
329 {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)}, 333 {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
330 {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)}, 334 {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
335 {IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
331 336
332/* 2x30 Series */ 337/* 2x30 Series */
333 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)}, 338 {IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
@@ -457,9 +462,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
457 462
458 bus->dev = &pdev->dev; 463 bus->dev = &pdev->dev;
459 bus->irq = pdev->irq; 464 bus->irq = pdev->irq;
460 bus->ops = &pci_ops; 465 bus->ops = &bus_ops_pci;
461 466
462 err = iwl_probe(bus, cfg); 467 err = iwl_probe(bus, &trans_ops_pcie, cfg);
463 if (err) 468 if (err)
464 goto out_disable_msi; 469 goto out_disable_msi;
465 return 0; 470 return 0;
@@ -493,33 +498,33 @@ static void iwl_pci_down(struct iwl_bus *bus)
493 498
494static void __devexit iwl_pci_remove(struct pci_dev *pdev) 499static void __devexit iwl_pci_remove(struct pci_dev *pdev)
495{ 500{
496 struct iwl_priv *priv = pci_get_drvdata(pdev); 501 struct iwl_shared *shrd = pci_get_drvdata(pdev);
497 void *bus_specific = priv->bus->bus_specific; 502 struct iwl_bus *bus = shrd->bus;
498 503
499 iwl_remove(priv); 504 iwl_remove(shrd->priv);
500 505
501 iwl_pci_down(bus_specific); 506 iwl_pci_down(bus);
502} 507}
503 508
504#ifdef CONFIG_PM 509#ifdef CONFIG_PM_SLEEP
505 510
506static int iwl_pci_suspend(struct device *device) 511static int iwl_pci_suspend(struct device *device)
507{ 512{
508 struct pci_dev *pdev = to_pci_dev(device); 513 struct pci_dev *pdev = to_pci_dev(device);
509 struct iwl_priv *priv = pci_get_drvdata(pdev); 514 struct iwl_shared *shrd = pci_get_drvdata(pdev);
510 515
511 /* Before you put code here, think about WoWLAN. You cannot check here 516 /* Before you put code here, think about WoWLAN. You cannot check here
512 * whether WoWLAN is enabled or not, and your code will run even if 517 * whether WoWLAN is enabled or not, and your code will run even if
513 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx. 518 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
514 */ 519 */
515 520
516 return iwl_suspend(priv); 521 return iwl_trans_suspend(shrd->trans);
517} 522}
518 523
519static int iwl_pci_resume(struct device *device) 524static int iwl_pci_resume(struct device *device)
520{ 525{
521 struct pci_dev *pdev = to_pci_dev(device); 526 struct pci_dev *pdev = to_pci_dev(device);
522 struct iwl_priv *priv = pci_get_drvdata(pdev); 527 struct iwl_shared *shrd = pci_get_drvdata(pdev);
523 528
524 /* Before you put code here, think about WoWLAN. You cannot check here 529 /* Before you put code here, think about WoWLAN. You cannot check here
525 * whether WoWLAN is enabled or not, and your code will run even if 530 * whether WoWLAN is enabled or not, and your code will run even if
@@ -532,7 +537,7 @@ static int iwl_pci_resume(struct device *device)
532 */ 537 */
533 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 538 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
534 539
535 return iwl_resume(priv); 540 return iwl_trans_resume(shrd->trans);
536} 541}
537 542
538static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); 543static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.h b/drivers/net/wireless/iwlwifi/iwl-pci.h
new file mode 100644
index 000000000000..c0aea9e092cb
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.h
@@ -0,0 +1,116 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_pci_h__
64#define __iwl_pci_h__
65
66
67/* This file includes the declaration that are internal to the PCI
68 * implementation of the bus layer
69 */
70
71/* configuration for the _agn devices */
72extern struct iwl_cfg iwl5300_agn_cfg;
73extern struct iwl_cfg iwl5100_agn_cfg;
74extern struct iwl_cfg iwl5350_agn_cfg;
75extern struct iwl_cfg iwl5100_bgn_cfg;
76extern struct iwl_cfg iwl5100_abg_cfg;
77extern struct iwl_cfg iwl5150_agn_cfg;
78extern struct iwl_cfg iwl5150_abg_cfg;
79extern struct iwl_cfg iwl6005_2agn_cfg;
80extern struct iwl_cfg iwl6005_2abg_cfg;
81extern struct iwl_cfg iwl6005_2bg_cfg;
82extern struct iwl_cfg iwl6005_2agn_sff_cfg;
83extern struct iwl_cfg iwl1030_bgn_cfg;
84extern struct iwl_cfg iwl1030_bg_cfg;
85extern struct iwl_cfg iwl6030_2agn_cfg;
86extern struct iwl_cfg iwl6030_2abg_cfg;
87extern struct iwl_cfg iwl6030_2bgn_cfg;
88extern struct iwl_cfg iwl6030_2bg_cfg;
89extern struct iwl_cfg iwl6000i_2agn_cfg;
90extern struct iwl_cfg iwl6000i_2abg_cfg;
91extern struct iwl_cfg iwl6000i_2bg_cfg;
92extern struct iwl_cfg iwl6000_3agn_cfg;
93extern struct iwl_cfg iwl6050_2agn_cfg;
94extern struct iwl_cfg iwl6050_2abg_cfg;
95extern struct iwl_cfg iwl6150_bgn_cfg;
96extern struct iwl_cfg iwl6150_bg_cfg;
97extern struct iwl_cfg iwl1000_bgn_cfg;
98extern struct iwl_cfg iwl1000_bg_cfg;
99extern struct iwl_cfg iwl100_bgn_cfg;
100extern struct iwl_cfg iwl100_bg_cfg;
101extern struct iwl_cfg iwl130_bgn_cfg;
102extern struct iwl_cfg iwl130_bg_cfg;
103extern struct iwl_cfg iwl2000_2bgn_cfg;
104extern struct iwl_cfg iwl2000_2bg_cfg;
105extern struct iwl_cfg iwl2000_2bgn_d_cfg;
106extern struct iwl_cfg iwl2030_2bgn_cfg;
107extern struct iwl_cfg iwl2030_2bg_cfg;
108extern struct iwl_cfg iwl6035_2agn_cfg;
109extern struct iwl_cfg iwl6035_2abg_cfg;
110extern struct iwl_cfg iwl6035_2bg_cfg;
111extern struct iwl_cfg iwl105_bg_cfg;
112extern struct iwl_cfg iwl105_bgn_cfg;
113extern struct iwl_cfg iwl135_bg_cfg;
114extern struct iwl_cfg iwl135_bgn_cfg;
115
116#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index cd64df05f9ed..62cd781192b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -43,6 +43,7 @@
43#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-power.h" 44#include "iwl-power.h"
45#include "iwl-trans.h" 45#include "iwl-trans.h"
46#include "iwl-shared.h"
46 47
47/* 48/*
48 * Setting power level allows the card to go to sleep when not busy. 49 * Setting power level allows the card to go to sleep when not busy.
@@ -214,7 +215,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
214 else 215 else
215 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 216 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
216 217
217 if (priv->cfg->base_params->shadow_reg_enable) 218 if (hw_params(priv).shadow_reg_enable)
218 cmd->flags |= IWL_POWER_SHADOW_REG_ENA; 219 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
219 else 220 else
220 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 221 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -300,7 +301,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
300 if (priv->power_data.bus_pm) 301 if (priv->power_data.bus_pm)
301 cmd->flags |= IWL_POWER_PCI_PM_MSK; 302 cmd->flags |= IWL_POWER_PCI_PM_MSK;
302 303
303 if (priv->cfg->base_params->shadow_reg_enable) 304 if (hw_params(priv).shadow_reg_enable)
304 cmd->flags |= IWL_POWER_SHADOW_REG_ENA; 305 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
305 else 306 else
306 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 307 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
@@ -335,7 +336,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
335 le32_to_cpu(cmd->sleep_interval[3]), 336 le32_to_cpu(cmd->sleep_interval[3]),
336 le32_to_cpu(cmd->sleep_interval[4])); 337 le32_to_cpu(cmd->sleep_interval[4]));
337 338
338 return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC, 339 return iwl_trans_send_cmd_pdu(trans(priv), POWER_TABLE_CMD, CMD_SYNC,
339 sizeof(struct iwl_powertable_cmd), cmd); 340 sizeof(struct iwl_powertable_cmd), cmd);
340} 341}
341 342
@@ -347,7 +348,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
347 348
348 dtimper = priv->hw->conf.ps_dtim_period ?: 1; 349 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
349 350
350 if (priv->wowlan) 351 if (priv->shrd->wowlan)
351 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); 352 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
352 else if (!priv->cfg->base_params->no_idle_support && 353 else if (!priv->cfg->base_params->no_idle_support &&
353 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 354 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
@@ -382,7 +383,7 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
382 int ret; 383 int ret;
383 bool update_chains; 384 bool update_chains;
384 385
385 lockdep_assert_held(&priv->mutex); 386 lockdep_assert_held(&priv->shrd->mutex);
386 387
387 /* Don't update the RX chain when chain noise calibration is running */ 388 /* Don't update the RX chain when chain noise calibration is running */
388 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || 389 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
@@ -391,23 +392,23 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
391 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) 392 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
392 return 0; 393 return 0;
393 394
394 if (!iwl_is_ready_rf(priv)) 395 if (!iwl_is_ready_rf(priv->shrd))
395 return -EIO; 396 return -EIO;
396 397
397 /* scan complete use sleep_power_next, need to be updated */ 398 /* scan complete use sleep_power_next, need to be updated */
398 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); 399 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
399 if (test_bit(STATUS_SCANNING, &priv->status) && !force) { 400 if (test_bit(STATUS_SCANNING, &priv->shrd->status) && !force) {
400 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); 401 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
401 return 0; 402 return 0;
402 } 403 }
403 404
404 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) 405 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
405 set_bit(STATUS_POWER_PMI, &priv->status); 406 set_bit(STATUS_POWER_PMI, &priv->shrd->status);
406 407
407 ret = iwl_set_power(priv, cmd); 408 ret = iwl_set_power(priv, cmd);
408 if (!ret) { 409 if (!ret) {
409 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 410 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
410 clear_bit(STATUS_POWER_PMI, &priv->status); 411 clear_bit(STATUS_POWER_PMI, &priv->shrd->status);
411 412
412 if (update_chains) 413 if (update_chains)
413 iwl_update_chain_flags(priv); 414 iwl_update_chain_flags(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 2f267b8aabbb..bebdd828f324 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -217,8 +217,8 @@
217 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc) 217 ((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
218 218
219#define SCD_QUEUECHAIN_SEL_ALL(priv) \ 219#define SCD_QUEUECHAIN_SEL_ALL(priv) \
220 (((1<<(priv)->hw_params.max_txq_num) - 1) &\ 220 (((1<<hw_params(priv).max_txq_num) - 1) &\
221 (~(1<<(priv)->cmd_queue))) 221 (~(1<<(priv)->shrd->cmd_queue)))
222 222
223#define SCD_BASE (PRPH_BASE + 0xa02c00) 223#define SCD_BASE (PRPH_BASE + 0xa02c00)
224 224
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 8e314003b63a..8572548dd4a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -40,6 +40,7 @@
40#include "iwl-helpers.h" 40#include "iwl-helpers.h"
41#include "iwl-agn-calib.h" 41#include "iwl-agn-calib.h"
42#include "iwl-agn.h" 42#include "iwl-agn.h"
43#include "iwl-shared.h"
43 44
44 45
45/****************************************************************************** 46/******************************************************************************
@@ -73,7 +74,7 @@ static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
73 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 74 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
74 struct iwl_rxon_cmd *rxon = (void *)&ctx->active; 75 struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
75 76
76 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status)) 77 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
77 return; 78 return;
78 79
79 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) { 80 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
@@ -121,7 +122,8 @@ static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
121 struct iwl_rx_mem_buffer *rxb) 122 struct iwl_rx_mem_buffer *rxb)
122{ 123{
123 struct iwl_rx_packet *pkt = rxb_addr(rxb); 124 struct iwl_rx_packet *pkt = rxb_addr(rxb);
124 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 125 u32 __maybe_unused len =
126 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
125 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 127 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
126 "notification for %s:\n", len, 128 "notification for %s:\n", len,
127 get_cmd_string(pkt->hdr.cmd)); 129 get_cmd_string(pkt->hdr.cmd));
@@ -148,8 +150,8 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
148 150
149 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 151 priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
150 152
151 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 153 if (!test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
152 queue_work(priv->workqueue, &priv->beacon_update); 154 queue_work(priv->shrd->workqueue, &priv->beacon_update);
153} 155}
154 156
155/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ 157/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
@@ -258,7 +260,7 @@ static void iwl_recover_from_statistics(struct iwl_priv *priv,
258{ 260{
259 unsigned int msecs; 261 unsigned int msecs;
260 262
261 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 263 if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
262 return; 264 return;
263 265
264 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); 266 msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
@@ -474,7 +476,7 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
474 476
475 priv->rx_statistics_jiffies = stamp; 477 priv->rx_statistics_jiffies = stamp;
476 478
477 set_bit(STATUS_STATISTICS, &priv->status); 479 set_bit(STATUS_STATISTICS, &priv->shrd->status);
478 480
479 /* Reschedule the statistics timer to occur in 481 /* Reschedule the statistics timer to occur in
480 * reg_recalib_period seconds to ensure we get a 482 * reg_recalib_period seconds to ensure we get a
@@ -483,10 +485,10 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
483 mod_timer(&priv->statistics_periodic, jiffies + 485 mod_timer(&priv->statistics_periodic, jiffies +
484 msecs_to_jiffies(reg_recalib_period * 1000)); 486 msecs_to_jiffies(reg_recalib_period * 1000));
485 487
486 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 488 if (unlikely(!test_bit(STATUS_SCANNING, &priv->shrd->status)) &&
487 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { 489 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
488 iwl_rx_calc_noise(priv); 490 iwl_rx_calc_noise(priv);
489 queue_work(priv->workqueue, &priv->run_time_calib_work); 491 queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
490 } 492 }
491 if (priv->cfg->lib->temperature && change) 493 if (priv->cfg->lib->temperature && change)
492 priv->cfg->lib->temperature(priv); 494 priv->cfg->lib->temperature(priv);
@@ -518,7 +520,7 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
518{ 520{
519 struct iwl_rx_packet *pkt = rxb_addr(rxb); 521 struct iwl_rx_packet *pkt = rxb_addr(rxb);
520 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 522 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
521 unsigned long status = priv->status; 523 unsigned long status = priv->shrd->status;
522 524
523 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", 525 IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
524 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 526 (flags & HW_CARD_DISABLED) ? "Kill" : "On",
@@ -529,16 +531,16 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
529 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 531 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
530 CT_CARD_DISABLED)) { 532 CT_CARD_DISABLED)) {
531 533
532 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 534 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
533 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 535 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
534 536
535 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 537 iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
536 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 538 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
537 539
538 if (!(flags & RXON_CARD_DISABLED)) { 540 if (!(flags & RXON_CARD_DISABLED)) {
539 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 541 iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
540 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 542 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
541 iwl_write_direct32(priv, HBUS_TARG_MBX_C, 543 iwl_write_direct32(bus(priv), HBUS_TARG_MBX_C,
542 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 544 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
543 } 545 }
544 if (flags & CT_CARD_DISABLED) 546 if (flags & CT_CARD_DISABLED)
@@ -548,18 +550,18 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
548 iwl_tt_exit_ct_kill(priv); 550 iwl_tt_exit_ct_kill(priv);
549 551
550 if (flags & HW_CARD_DISABLED) 552 if (flags & HW_CARD_DISABLED)
551 set_bit(STATUS_RF_KILL_HW, &priv->status); 553 set_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
552 else 554 else
553 clear_bit(STATUS_RF_KILL_HW, &priv->status); 555 clear_bit(STATUS_RF_KILL_HW, &priv->shrd->status);
554 556
555 557
556 if (!(flags & RXON_CARD_DISABLED)) 558 if (!(flags & RXON_CARD_DISABLED))
557 iwl_scan_cancel(priv); 559 iwl_scan_cancel(priv);
558 560
559 if ((test_bit(STATUS_RF_KILL_HW, &status) != 561 if ((test_bit(STATUS_RF_KILL_HW, &status) !=
560 test_bit(STATUS_RF_KILL_HW, &priv->status))) 562 test_bit(STATUS_RF_KILL_HW, &priv->shrd->status)))
561 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 563 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
562 test_bit(STATUS_RF_KILL_HW, &priv->status)); 564 test_bit(STATUS_RF_KILL_HW, &priv->shrd->status));
563 else 565 else
564 wake_up_interruptible(&priv->wait_command_queue); 566 wake_up_interruptible(&priv->wait_command_queue);
565} 567}
@@ -580,7 +582,7 @@ static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
580 le32_to_cpu(missed_beacon->total_missed_becons), 582 le32_to_cpu(missed_beacon->total_missed_becons),
581 le32_to_cpu(missed_beacon->num_recvd_beacons), 583 le32_to_cpu(missed_beacon->num_recvd_beacons),
582 le32_to_cpu(missed_beacon->num_expected_beacons)); 584 le32_to_cpu(missed_beacon->num_expected_beacons));
583 if (!test_bit(STATUS_SCANNING, &priv->status)) 585 if (!test_bit(STATUS_SCANNING, &priv->shrd->status))
584 iwl_init_sensitivity(priv); 586 iwl_init_sensitivity(priv);
585 } 587 }
586} 588}
@@ -697,7 +699,7 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
697 ctx->active.bssid_addr)) 699 ctx->active.bssid_addr))
698 continue; 700 continue;
699 ctx->last_tx_rejected = false; 701 ctx->last_tx_rejected = false;
700 iwl_wake_any_queue(priv, ctx); 702 iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
701 } 703 }
702 } 704 }
703 705
@@ -1018,7 +1020,7 @@ void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1018 * handle those that need handling via function in 1020 * handle those that need handling via function in
1019 * rx_handlers table. See iwl_setup_rx_handlers() */ 1021 * rx_handlers table. See iwl_setup_rx_handlers() */
1020 if (priv->rx_handlers[pkt->hdr.cmd]) { 1022 if (priv->rx_handlers[pkt->hdr.cmd]) {
1021 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1023 priv->rx_handlers_stats[pkt->hdr.cmd]++;
1022 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); 1024 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1023 } else { 1025 } else {
1024 /* No handling needed */ 1026 /* No handling needed */
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 28e59319f581..fc5af3475392 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -68,14 +68,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
68 /* Exit instantly with error when device is not ready 68 /* Exit instantly with error when device is not ready
69 * to receive scan abort command or it does not perform 69 * to receive scan abort command or it does not perform
70 * hardware scan currently */ 70 * hardware scan currently */
71 if (!test_bit(STATUS_READY, &priv->status) || 71 if (!test_bit(STATUS_READY, &priv->shrd->status) ||
72 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || 72 !test_bit(STATUS_GEO_CONFIGURED, &priv->shrd->status) ||
73 !test_bit(STATUS_SCAN_HW, &priv->status) || 73 !test_bit(STATUS_SCAN_HW, &priv->shrd->status) ||
74 test_bit(STATUS_FW_ERROR, &priv->status) || 74 test_bit(STATUS_FW_ERROR, &priv->shrd->status) ||
75 test_bit(STATUS_EXIT_PENDING, &priv->status)) 75 test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
76 return -EIO; 76 return -EIO;
77 77
78 ret = trans_send_cmd(&priv->trans, &cmd); 78 ret = iwl_trans_send_cmd(trans(priv), &cmd);
79 if (ret) 79 if (ret)
80 return ret; 80 return ret;
81 81
@@ -91,7 +91,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
91 ret = -EIO; 91 ret = -EIO;
92 } 92 }
93 93
94 iwl_free_pages(priv, cmd.reply_page); 94 iwl_free_pages(priv->shrd, cmd.reply_page);
95 return ret; 95 return ret;
96} 96}
97 97
@@ -116,17 +116,17 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
116 116
117void iwl_force_scan_end(struct iwl_priv *priv) 117void iwl_force_scan_end(struct iwl_priv *priv)
118{ 118{
119 lockdep_assert_held(&priv->mutex); 119 lockdep_assert_held(&priv->shrd->mutex);
120 120
121 if (!test_bit(STATUS_SCANNING, &priv->status)) { 121 if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
122 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n"); 122 IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
123 return; 123 return;
124 } 124 }
125 125
126 IWL_DEBUG_SCAN(priv, "Forcing scan end\n"); 126 IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
127 clear_bit(STATUS_SCANNING, &priv->status); 127 clear_bit(STATUS_SCANNING, &priv->shrd->status);
128 clear_bit(STATUS_SCAN_HW, &priv->status); 128 clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
129 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 129 clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
130 iwl_complete_scan(priv, true); 130 iwl_complete_scan(priv, true);
131} 131}
132 132
@@ -134,14 +134,14 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
134{ 134{
135 int ret; 135 int ret;
136 136
137 lockdep_assert_held(&priv->mutex); 137 lockdep_assert_held(&priv->shrd->mutex);
138 138
139 if (!test_bit(STATUS_SCANNING, &priv->status)) { 139 if (!test_bit(STATUS_SCANNING, &priv->shrd->status)) {
140 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n"); 140 IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
141 return; 141 return;
142 } 142 }
143 143
144 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) { 144 if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
145 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n"); 145 IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
146 return; 146 return;
147 } 147 }
@@ -160,7 +160,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
160int iwl_scan_cancel(struct iwl_priv *priv) 160int iwl_scan_cancel(struct iwl_priv *priv)
161{ 161{
162 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n"); 162 IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
163 queue_work(priv->workqueue, &priv->abort_scan); 163 queue_work(priv->shrd->workqueue, &priv->abort_scan);
164 return 0; 164 return 0;
165} 165}
166 166
@@ -173,19 +173,19 @@ int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
173{ 173{
174 unsigned long timeout = jiffies + msecs_to_jiffies(ms); 174 unsigned long timeout = jiffies + msecs_to_jiffies(ms);
175 175
176 lockdep_assert_held(&priv->mutex); 176 lockdep_assert_held(&priv->shrd->mutex);
177 177
178 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n"); 178 IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
179 179
180 iwl_do_scan_abort(priv); 180 iwl_do_scan_abort(priv);
181 181
182 while (time_before_eq(jiffies, timeout)) { 182 while (time_before_eq(jiffies, timeout)) {
183 if (!test_bit(STATUS_SCAN_HW, &priv->status)) 183 if (!test_bit(STATUS_SCAN_HW, &priv->shrd->status))
184 break; 184 break;
185 msleep(20); 185 msleep(20);
186 } 186 }
187 187
188 return test_bit(STATUS_SCAN_HW, &priv->status); 188 return test_bit(STATUS_SCAN_HW, &priv->shrd->status);
189} 189}
190 190
191/* Service response to REPLY_SCAN_CMD (0x80) */ 191/* Service response to REPLY_SCAN_CMD (0x80) */
@@ -257,13 +257,13 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
257 scan_notif->tsf_high, scan_notif->status); 257 scan_notif->tsf_high, scan_notif->status);
258 258
259 /* The HW is no longer scanning */ 259 /* The HW is no longer scanning */
260 clear_bit(STATUS_SCAN_HW, &priv->status); 260 clear_bit(STATUS_SCAN_HW, &priv->shrd->status);
261 261
262 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", 262 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
263 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 263 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
264 jiffies_to_msecs(jiffies - priv->scan_start)); 264 jiffies_to_msecs(jiffies - priv->scan_start));
265 265
266 queue_work(priv->workqueue, &priv->scan_completed); 266 queue_work(priv->shrd->workqueue, &priv->scan_completed);
267 267
268 if (priv->iw_mode != NL80211_IFTYPE_ADHOC && 268 if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
269 iwl_advanced_bt_coexist(priv) && 269 iwl_advanced_bt_coexist(priv) &&
@@ -283,7 +283,8 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
283 IWL_BT_COEX_TRAFFIC_LOAD_NONE; 283 IWL_BT_COEX_TRAFFIC_LOAD_NONE;
284 } 284 }
285 priv->bt_status = scan_notif->bt_status; 285 priv->bt_status = scan_notif->bt_status;
286 queue_work(priv->workqueue, &priv->bt_traffic_change_work); 286 queue_work(priv->shrd->workqueue,
287 &priv->bt_traffic_change_work);
287 } 288 }
288} 289}
289 290
@@ -343,7 +344,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
343 344
344void iwl_init_scan_params(struct iwl_priv *priv) 345void iwl_init_scan_params(struct iwl_priv *priv)
345{ 346{
346 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; 347 u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1;
347 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 348 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
348 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 349 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
349 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 350 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -357,22 +358,22 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
357{ 358{
358 int ret; 359 int ret;
359 360
360 lockdep_assert_held(&priv->mutex); 361 lockdep_assert_held(&priv->shrd->mutex);
361 362
362 cancel_delayed_work(&priv->scan_check); 363 cancel_delayed_work(&priv->scan_check);
363 364
364 if (!iwl_is_ready_rf(priv)) { 365 if (!iwl_is_ready_rf(priv->shrd)) {
365 IWL_WARN(priv, "Request scan called when driver not ready.\n"); 366 IWL_WARN(priv, "Request scan called when driver not ready.\n");
366 return -EIO; 367 return -EIO;
367 } 368 }
368 369
369 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 370 if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
370 IWL_DEBUG_SCAN(priv, 371 IWL_DEBUG_SCAN(priv,
371 "Multiple concurrent scan requests in parallel.\n"); 372 "Multiple concurrent scan requests in parallel.\n");
372 return -EBUSY; 373 return -EBUSY;
373 } 374 }
374 375
375 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 376 if (test_bit(STATUS_SCAN_ABORTING, &priv->shrd->status)) {
376 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n"); 377 IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
377 return -EBUSY; 378 return -EBUSY;
378 } 379 }
@@ -382,19 +383,19 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
382 scan_type == IWL_SCAN_ROC ? "remain-on-channel " : 383 scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
383 "internal short "); 384 "internal short ");
384 385
385 set_bit(STATUS_SCANNING, &priv->status); 386 set_bit(STATUS_SCANNING, &priv->shrd->status);
386 priv->scan_type = scan_type; 387 priv->scan_type = scan_type;
387 priv->scan_start = jiffies; 388 priv->scan_start = jiffies;
388 priv->scan_band = band; 389 priv->scan_band = band;
389 390
390 ret = iwlagn_request_scan(priv, vif); 391 ret = iwlagn_request_scan(priv, vif);
391 if (ret) { 392 if (ret) {
392 clear_bit(STATUS_SCANNING, &priv->status); 393 clear_bit(STATUS_SCANNING, &priv->shrd->status);
393 priv->scan_type = IWL_SCAN_NORMAL; 394 priv->scan_type = IWL_SCAN_NORMAL;
394 return ret; 395 return ret;
395 } 396 }
396 397
397 queue_delayed_work(priv->workqueue, &priv->scan_check, 398 queue_delayed_work(priv->shrd->workqueue, &priv->scan_check,
398 IWL_SCAN_CHECK_WATCHDOG); 399 IWL_SCAN_CHECK_WATCHDOG);
399 400
400 return 0; 401 return 0;
@@ -412,9 +413,9 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
412 if (req->n_channels == 0) 413 if (req->n_channels == 0)
413 return -EINVAL; 414 return -EINVAL;
414 415
415 mutex_lock(&priv->mutex); 416 mutex_lock(&priv->shrd->mutex);
416 417
417 if (test_bit(STATUS_SCANNING, &priv->status) && 418 if (test_bit(STATUS_SCANNING, &priv->shrd->status) &&
418 priv->scan_type != IWL_SCAN_NORMAL) { 419 priv->scan_type != IWL_SCAN_NORMAL) {
419 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 420 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
420 ret = -EAGAIN; 421 ret = -EAGAIN;
@@ -439,7 +440,7 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
439 IWL_DEBUG_MAC80211(priv, "leave\n"); 440 IWL_DEBUG_MAC80211(priv, "leave\n");
440 441
441out_unlock: 442out_unlock:
442 mutex_unlock(&priv->mutex); 443 mutex_unlock(&priv->shrd->mutex);
443 444
444 return ret; 445 return ret;
445} 446}
@@ -450,7 +451,7 @@ out_unlock:
450 */ 451 */
451void iwl_internal_short_hw_scan(struct iwl_priv *priv) 452void iwl_internal_short_hw_scan(struct iwl_priv *priv)
452{ 453{
453 queue_work(priv->workqueue, &priv->start_internal_scan); 454 queue_work(priv->shrd->workqueue, &priv->start_internal_scan);
454} 455}
455 456
456static void iwl_bg_start_internal_scan(struct work_struct *work) 457static void iwl_bg_start_internal_scan(struct work_struct *work)
@@ -460,14 +461,14 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
460 461
461 IWL_DEBUG_SCAN(priv, "Start internal scan\n"); 462 IWL_DEBUG_SCAN(priv, "Start internal scan\n");
462 463
463 mutex_lock(&priv->mutex); 464 mutex_lock(&priv->shrd->mutex);
464 465
465 if (priv->scan_type == IWL_SCAN_RADIO_RESET) { 466 if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
466 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); 467 IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
467 goto unlock; 468 goto unlock;
468 } 469 }
469 470
470 if (test_bit(STATUS_SCANNING, &priv->status)) { 471 if (test_bit(STATUS_SCANNING, &priv->shrd->status)) {
471 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 472 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
472 goto unlock; 473 goto unlock;
473 } 474 }
@@ -475,7 +476,7 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
475 if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band)) 476 if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
476 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n"); 477 IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
477 unlock: 478 unlock:
478 mutex_unlock(&priv->mutex); 479 mutex_unlock(&priv->shrd->mutex);
479} 480}
480 481
481static void iwl_bg_scan_check(struct work_struct *data) 482static void iwl_bg_scan_check(struct work_struct *data)
@@ -488,9 +489,9 @@ static void iwl_bg_scan_check(struct work_struct *data)
488 /* Since we are here firmware does not finish scan and 489 /* Since we are here firmware does not finish scan and
489 * most likely is in bad shape, so we don't bother to 490 * most likely is in bad shape, so we don't bother to
490 * send abort command, just force scan complete to mac80211 */ 491 * send abort command, just force scan complete to mac80211 */
491 mutex_lock(&priv->mutex); 492 mutex_lock(&priv->shrd->mutex);
492 iwl_force_scan_end(priv); 493 iwl_force_scan_end(priv);
493 mutex_unlock(&priv->mutex); 494 mutex_unlock(&priv->shrd->mutex);
494} 495}
495 496
496/** 497/**
@@ -548,9 +549,9 @@ static void iwl_bg_abort_scan(struct work_struct *work)
548 549
549 /* We keep scan_check work queued in case when firmware will not 550 /* We keep scan_check work queued in case when firmware will not
550 * report back scan completed notification */ 551 * report back scan completed notification */
551 mutex_lock(&priv->mutex); 552 mutex_lock(&priv->shrd->mutex);
552 iwl_scan_cancel_timeout(priv, 200); 553 iwl_scan_cancel_timeout(priv, 200);
553 mutex_unlock(&priv->mutex); 554 mutex_unlock(&priv->shrd->mutex);
554} 555}
555 556
556static void iwl_bg_scan_completed(struct work_struct *work) 557static void iwl_bg_scan_completed(struct work_struct *work)
@@ -563,13 +564,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
563 564
564 cancel_delayed_work(&priv->scan_check); 565 cancel_delayed_work(&priv->scan_check);
565 566
566 mutex_lock(&priv->mutex); 567 mutex_lock(&priv->shrd->mutex);
567 568
568 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status); 569 aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->shrd->status);
569 if (aborted) 570 if (aborted)
570 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n"); 571 IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
571 572
572 if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) { 573 if (!test_and_clear_bit(STATUS_SCANNING, &priv->shrd->status)) {
573 IWL_DEBUG_SCAN(priv, "Scan already completed.\n"); 574 IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
574 goto out_settings; 575 goto out_settings;
575 } 576 }
@@ -605,13 +606,13 @@ out_complete:
605 606
606out_settings: 607out_settings:
607 /* Can we still talk to firmware ? */ 608 /* Can we still talk to firmware ? */
608 if (!iwl_is_ready_rf(priv)) 609 if (!iwl_is_ready_rf(priv->shrd))
609 goto out; 610 goto out;
610 611
611 iwlagn_post_scan(priv); 612 iwlagn_post_scan(priv);
612 613
613out: 614out:
614 mutex_unlock(&priv->mutex); 615 mutex_unlock(&priv->shrd->mutex);
615} 616}
616 617
617void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 618void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
@@ -629,8 +630,8 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
629 cancel_work_sync(&priv->scan_completed); 630 cancel_work_sync(&priv->scan_completed);
630 631
631 if (cancel_delayed_work_sync(&priv->scan_check)) { 632 if (cancel_delayed_work_sync(&priv->scan_check)) {
632 mutex_lock(&priv->mutex); 633 mutex_lock(&priv->shrd->mutex);
633 iwl_force_scan_end(priv); 634 iwl_force_scan_end(priv);
634 mutex_unlock(&priv->mutex); 635 mutex_unlock(&priv->shrd->mutex);
635 } 636 }
636} 637}
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
new file mode 100644
index 000000000000..8b8cd54a32e0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -0,0 +1,430 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_shared_h__
64#define __iwl_shared_h__
65
66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/mutex.h>
69#include <linux/gfp.h>
70#include <net/mac80211.h>
71
72#include "iwl-commands.h"
73
74/*This files includes all the types / functions that are exported by the
75 * upper layer to the bus and transport layer */
76
77struct iwl_cfg;
78struct iwl_bus;
79struct iwl_priv;
80struct iwl_sensitivity_ranges;
81struct iwl_trans_ops;
82
83#define DRV_NAME "iwlagn"
84#define IWLWIFI_VERSION "in-tree:"
85#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
86#define DRV_AUTHOR "<ilw@linux.intel.com>"
87
88extern struct iwl_mod_params iwlagn_mod_params;
89
90/**
91 * struct iwl_mod_params
92 * @sw_crypto: using hardware encryption, default = 0
93 * @num_of_queues: number of tx queue, HW dependent
94 * @disable_11n: 11n capabilities enabled, default = 0
95 * @amsdu_size_8K: enable 8K amsdu size, default = 1
96 * @antenna: both antennas (use diversity), default = 0
97 * @restart_fw: restart firmware, default = 1
98 * @plcp_check: enable plcp health check, default = true
99 * @ack_check: disable ack health check, default = false
100 * @wd_disable: enable stuck queue check, default = false
101 * @bt_coex_active: enable bt coex, default = true
102 * @led_mode: system default, default = 0
103 * @no_sleep_autoadjust: disable autoadjust, default = true
104 * @power_save: disable power save, default = false
105 * @power_level: power level, default = 1
106 * @debug_level: levels are IWL_DL_*
107 * @ant_coupling: antenna coupling in dB, default = 0
108 * @bt_ch_announce: BT channel inhibition, default = enable
109 * @wanted_ucode_alternative: ucode alternative to use, default = 1
110 * @auto_agg: enable agg. without check, default = true
111 */
112struct iwl_mod_params {
113 int sw_crypto;
114 int num_of_queues;
115 int disable_11n;
116 int amsdu_size_8K;
117 int antenna;
118 int restart_fw;
119 bool plcp_check;
120 bool ack_check;
121 bool wd_disable;
122 bool bt_coex_active;
123 int led_mode;
124 bool no_sleep_autoadjust;
125 bool power_save;
126 int power_level;
127 u32 debug_level;
128 int ant_coupling;
129 bool bt_ch_announce;
130 int wanted_ucode_alternative;
131 bool auto_agg;
132};
133
134/**
135 * struct iwl_hw_params
136 * @max_txq_num: Max # Tx queues supported
137 * @num_ampdu_queues: num of ampdu queues
138 * @tx/rx_chains_num: Number of TX/RX chains
139 * @valid_tx/rx_ant: usable antennas
140 * @max_stations:
141 * @ht40_channel: is 40MHz width possible in band 2.4
142 * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
143 * @sku:
144 * @rx_page_order: Rx buffer page order
145 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
146 * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
147 * @sw_crypto: 0 for hw, 1 for sw
148 * @max_xxx_size: for ucode uses
149 * @ct_kill_threshold: temperature threshold
150 * @wd_timeout: TX queues watchdog timeout
151 * @calib_init_cfg: setup initial calibrations for the hw
152 * @calib_rt_cfg: setup runtime calibrations for the hw
153 * @struct iwl_sensitivity_ranges: range of sensitivity values
154 */
155struct iwl_hw_params {
156 u8 max_txq_num;
157 u8 num_ampdu_queues;
158 u8 tx_chains_num;
159 u8 rx_chains_num;
160 u8 valid_tx_ant;
161 u8 valid_rx_ant;
162 u8 max_stations;
163 u8 ht40_channel;
164 bool shadow_reg_enable;
165 u16 beacon_time_tsf_bits;
166 u16 sku;
167 u32 rx_page_order;
168 u32 max_inst_size;
169 u32 max_data_size;
170 u32 ct_kill_threshold; /* value in hw-dependent units */
171 u32 ct_kill_exit_threshold; /* value in hw-dependent units */
172 /* for 1000, 6000 series and up */
173 unsigned int wd_timeout;
174
175 u32 calib_init_cfg;
176 u32 calib_rt_cfg;
177 const struct iwl_sensitivity_ranges *sens;
178};
179
180/**
181 * struct iwl_ht_agg - aggregation status while waiting for block-ack
182 * @txq_id: Tx queue used for Tx attempt
183 * @wait_for_ba: Expect block-ack before next Tx reply
184 * @rate_n_flags: Rate at which Tx was attempted
185 *
186 * If REPLY_TX indicates that aggregation was attempted, driver must wait
187 * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
188 * until block ack arrives.
189 */
190struct iwl_ht_agg {
191 u16 txq_id;
192 u16 wait_for_ba;
193 u32 rate_n_flags;
194#define IWL_AGG_OFF 0
195#define IWL_AGG_ON 1
196#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
197#define IWL_EMPTYING_HW_QUEUE_DELBA 3
198 u8 state;
199};
200
201struct iwl_tid_data {
202 u16 seq_number; /* agn only */
203 u16 tfds_in_queue;
204 struct iwl_ht_agg agg;
205};
206
207/**
208 * struct iwl_shared - shared fields for all the layers of the driver
209 *
210 * @dbg_level_dev: dbg level set per device. Prevails on
211 * iwlagn_mod_params.debug_level if set (!= 0)
212 * @ucode_owner: IWL_OWNERSHIP_*
213 * @cmd_queue: command queue number
214 * @status: STATUS_*
215 * @bus: pointer to the bus layer data
216 * @priv: pointer to the upper layer data
217 * @hw_params: see struct iwl_hw_params
218 * @workqueue: the workqueue used by all the layers of the driver
219 * @lock: protect general shared data
220 * @sta_lock: protects the station table.
221 * If lock and sta_lock are needed, lock must be acquired first.
222 * @mutex:
223 */
224struct iwl_shared {
225#ifdef CONFIG_IWLWIFI_DEBUG
226 u32 dbg_level_dev;
227#endif /* CONFIG_IWLWIFI_DEBUG */
228
229#define IWL_OWNERSHIP_DRIVER 0
230#define IWL_OWNERSHIP_TM 1
231 u8 ucode_owner;
232 u8 cmd_queue;
233 unsigned long status;
234 bool wowlan;
235
236 struct iwl_bus *bus;
237 struct iwl_priv *priv;
238 struct iwl_trans *trans;
239 struct iwl_hw_params hw_params;
240
241 struct workqueue_struct *workqueue;
242 spinlock_t lock;
243 spinlock_t sta_lock;
244 struct mutex mutex;
245
246 /*these 2 shouldn't really be here, but they are needed for
247 * iwl_queue_stop, which is called from the upper layer too
248 */
249 u8 mac80211_registered;
250 struct ieee80211_hw *hw;
251
252 struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
253};
254
255/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
256#define priv(_m) ((_m)->shrd->priv)
257#define bus(_m) ((_m)->shrd->bus)
258#define trans(_m) ((_m)->shrd->trans)
259#define hw_params(_m) ((_m)->shrd->hw_params)
260
261#ifdef CONFIG_IWLWIFI_DEBUG
262/*
263 * iwl_get_debug_level: Return active debug level for device
264 *
265 * Using sysfs it is possible to set per device debug level. This debug
266 * level will be used if set, otherwise the global debug level which can be
267 * set via module parameter is used.
268 */
269static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
270{
271 if (shrd->dbg_level_dev)
272 return shrd->dbg_level_dev;
273 else
274 return iwlagn_mod_params.debug_level;
275}
276#else
277static inline u32 iwl_get_debug_level(struct iwl_shared *shrd)
278{
279 return iwlagn_mod_params.debug_level;
280}
281#endif
282
283static inline void iwl_free_pages(struct iwl_shared *shrd, unsigned long page)
284{
285 free_pages(page, shrd->hw_params.rx_page_order);
286}
287
288struct iwl_rx_mem_buffer {
289 dma_addr_t page_dma;
290 struct page *page;
291 struct list_head list;
292};
293
294#define rxb_addr(r) page_address(r->page)
295
296/*
297 * mac80211 queues, ACs, hardware queues, FIFOs.
298 *
299 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
300 *
301 * Mac80211 uses the following numbers, which we get as from it
302 * by way of skb_get_queue_mapping(skb):
303 *
304 * VO 0
305 * VI 1
306 * BE 2
307 * BK 3
308 *
309 *
310 * Regular (not A-MPDU) frames are put into hardware queues corresponding
311 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
312 * own queue per aggregation session (RA/TID combination), such queues are
313 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
314 * order to map frames to the right queue, we also need an AC->hw queue
315 * mapping. This is implemented here.
316 *
317 * Due to the way hw queues are set up (by the hw specific modules like
318 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
319 * mapping.
320 */
321
322static const u8 tid_to_ac[] = {
323 IEEE80211_AC_BE,
324 IEEE80211_AC_BK,
325 IEEE80211_AC_BK,
326 IEEE80211_AC_BE,
327 IEEE80211_AC_VI,
328 IEEE80211_AC_VI,
329 IEEE80211_AC_VO,
330 IEEE80211_AC_VO
331};
332
333static inline int get_ac_from_tid(u16 tid)
334{
335 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
336 return tid_to_ac[tid];
337
338 /* no support for TIDs 8-15 yet */
339 return -EINVAL;
340}
341
342enum iwl_rxon_context_id {
343 IWL_RXON_CTX_BSS,
344 IWL_RXON_CTX_PAN,
345
346 NUM_IWL_RXON_CTX
347};
348
349#ifdef CONFIG_PM
350int iwl_suspend(struct iwl_priv *priv);
351int iwl_resume(struct iwl_priv *priv);
352#endif /* !CONFIG_PM */
353
354int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
355 struct iwl_cfg *cfg);
356void __devexit iwl_remove(struct iwl_priv * priv);
357
358void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
359 enum iwl_rxon_context_id ctx,
360 u8 sta_id, u8 tid);
361void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
362 enum iwl_rxon_context_id ctx,
363 u8 sta_id, u8 tid);
364
365/*****************************************************
366* DRIVER STATUS FUNCTIONS
367******************************************************/
368#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
369/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
370#define STATUS_INT_ENABLED 2
371#define STATUS_RF_KILL_HW 3
372#define STATUS_CT_KILL 4
373#define STATUS_INIT 5
374#define STATUS_ALIVE 6
375#define STATUS_READY 7
376#define STATUS_TEMPERATURE 8
377#define STATUS_GEO_CONFIGURED 9
378#define STATUS_EXIT_PENDING 10
379#define STATUS_STATISTICS 12
380#define STATUS_SCANNING 13
381#define STATUS_SCAN_ABORTING 14
382#define STATUS_SCAN_HW 15
383#define STATUS_POWER_PMI 16
384#define STATUS_FW_ERROR 17
385#define STATUS_DEVICE_ENABLED 18
386#define STATUS_CHANNEL_SWITCH_PENDING 19
387
388static inline int iwl_is_ready(struct iwl_shared *shrd)
389{
390 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
391 * set but EXIT_PENDING is not */
392 return test_bit(STATUS_READY, &shrd->status) &&
393 test_bit(STATUS_GEO_CONFIGURED, &shrd->status) &&
394 !test_bit(STATUS_EXIT_PENDING, &shrd->status);
395}
396
397static inline int iwl_is_alive(struct iwl_shared *shrd)
398{
399 return test_bit(STATUS_ALIVE, &shrd->status);
400}
401
402static inline int iwl_is_init(struct iwl_shared *shrd)
403{
404 return test_bit(STATUS_INIT, &shrd->status);
405}
406
407static inline int iwl_is_rfkill_hw(struct iwl_shared *shrd)
408{
409 return test_bit(STATUS_RF_KILL_HW, &shrd->status);
410}
411
412static inline int iwl_is_rfkill(struct iwl_shared *shrd)
413{
414 return iwl_is_rfkill_hw(shrd);
415}
416
417static inline int iwl_is_ctkill(struct iwl_shared *shrd)
418{
419 return test_bit(STATUS_CT_KILL, &shrd->status);
420}
421
422static inline int iwl_is_ready_rf(struct iwl_shared *shrd)
423{
424 if (iwl_is_rfkill(shrd))
425 return 0;
426
427 return iwl_is_ready(shrd);
428}
429
430#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 1ef3b7106ad5..26b2bd4db6b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -38,7 +38,7 @@
38#include "iwl-trans.h" 38#include "iwl-trans.h"
39#include "iwl-agn.h" 39#include "iwl-agn.h"
40 40
41/* priv->sta_lock must be held */ 41/* priv->shrd->sta_lock must be held */
42static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 42static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
43{ 43{
44 44
@@ -75,7 +75,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n", 75 IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
76 sta_id); 76 sta_id);
77 77
78 spin_lock_irqsave(&priv->sta_lock, flags); 78 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
79 79
80 switch (pkt->u.add_sta.status) { 80 switch (pkt->u.add_sta.status) {
81 case ADD_STA_SUCCESS_MSK: 81 case ADD_STA_SUCCESS_MSK:
@@ -118,7 +118,7 @@ static int iwl_process_add_sta_resp(struct iwl_priv *priv,
118 priv->stations[sta_id].sta.mode == 118 priv->stations[sta_id].sta.mode ==
119 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 119 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
120 addsta->sta.addr); 120 addsta->sta.addr);
121 spin_unlock_irqrestore(&priv->sta_lock, flags); 121 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
122 122
123 return ret; 123 return ret;
124} 124}
@@ -168,7 +168,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
168 } 168 }
169 169
170 cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data); 170 cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
171 ret = trans_send_cmd(&priv->trans, &cmd); 171 ret = iwl_trans_send_cmd(trans(priv), &cmd);
172 172
173 if (ret || (flags & CMD_ASYNC)) 173 if (ret || (flags & CMD_ASYNC))
174 return ret; 174 return ret;
@@ -177,7 +177,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
177 pkt = (struct iwl_rx_packet *)cmd.reply_page; 177 pkt = (struct iwl_rx_packet *)cmd.reply_page;
178 ret = iwl_process_add_sta_resp(priv, sta, pkt, true); 178 ret = iwl_process_add_sta_resp(priv, sta, pkt, true);
179 } 179 }
180 iwl_free_pages(priv, cmd.reply_page); 180 iwl_free_pages(priv->shrd, cmd.reply_page);
181 181
182 return ret; 182 return ret;
183} 183}
@@ -251,7 +251,8 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
251 else if (is_broadcast_ether_addr(addr)) 251 else if (is_broadcast_ether_addr(addr))
252 sta_id = ctx->bcast_sta_id; 252 sta_id = ctx->bcast_sta_id;
253 else 253 else
254 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) { 254 for (i = IWL_STA_ID;
255 i < hw_params(priv).max_stations; i++) {
255 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 256 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
256 addr)) { 257 addr)) {
257 sta_id = i; 258 sta_id = i;
@@ -336,12 +337,12 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
336 struct iwl_addsta_cmd sta_cmd; 337 struct iwl_addsta_cmd sta_cmd;
337 338
338 *sta_id_r = 0; 339 *sta_id_r = 0;
339 spin_lock_irqsave(&priv->sta_lock, flags_spin); 340 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
340 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta); 341 sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
341 if (sta_id == IWL_INVALID_STATION) { 342 if (sta_id == IWL_INVALID_STATION) {
342 IWL_ERR(priv, "Unable to prepare station %pM for addition\n", 343 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
343 addr); 344 addr);
344 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 345 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
345 return -EINVAL; 346 return -EINVAL;
346 } 347 }
347 348
@@ -353,7 +354,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
353 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) { 354 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
354 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n", 355 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
355 sta_id); 356 sta_id);
356 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 357 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
357 return -EEXIST; 358 return -EEXIST;
358 } 359 }
359 360
@@ -361,23 +362,23 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
361 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 362 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
362 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n", 363 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
363 sta_id, addr); 364 sta_id, addr);
364 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 365 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
365 return -EEXIST; 366 return -EEXIST;
366 } 367 }
367 368
368 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS; 369 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
369 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); 370 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
370 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 371 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
371 372
372 /* Add station to device's station table */ 373 /* Add station to device's station table */
373 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 374 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
374 if (ret) { 375 if (ret) {
375 spin_lock_irqsave(&priv->sta_lock, flags_spin); 376 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
376 IWL_ERR(priv, "Adding station %pM failed.\n", 377 IWL_ERR(priv, "Adding station %pM failed.\n",
377 priv->stations[sta_id].sta.sta.addr); 378 priv->stations[sta_id].sta.sta.addr);
378 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 379 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
379 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 380 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
380 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 381 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
381 } 382 }
382 *sta_id_r = sta_id; 383 *sta_id_r = sta_id;
383 return ret; 384 return ret;
@@ -386,7 +387,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
386/** 387/**
387 * iwl_sta_ucode_deactivate - deactivate ucode status for a station 388 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
388 * 389 *
389 * priv->sta_lock must be held 390 * priv->shrd->sta_lock must be held
390 */ 391 */
391static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id) 392static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
392{ 393{
@@ -424,7 +425,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
424 425
425 cmd.flags |= CMD_WANT_SKB; 426 cmd.flags |= CMD_WANT_SKB;
426 427
427 ret = trans_send_cmd(&priv->trans, &cmd); 428 ret = iwl_trans_send_cmd(trans(priv), &cmd);
428 429
429 if (ret) 430 if (ret)
430 return ret; 431 return ret;
@@ -440,9 +441,11 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
440 switch (pkt->u.rem_sta.status) { 441 switch (pkt->u.rem_sta.status) {
441 case REM_STA_SUCCESS_MSK: 442 case REM_STA_SUCCESS_MSK:
442 if (!temporary) { 443 if (!temporary) {
443 spin_lock_irqsave(&priv->sta_lock, flags_spin); 444 spin_lock_irqsave(&priv->shrd->sta_lock,
445 flags_spin);
444 iwl_sta_ucode_deactivate(priv, sta_id); 446 iwl_sta_ucode_deactivate(priv, sta_id);
445 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 447 spin_unlock_irqrestore(&priv->shrd->sta_lock,
448 flags_spin);
446 } 449 }
447 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 450 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
448 break; 451 break;
@@ -452,7 +455,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
452 break; 455 break;
453 } 456 }
454 } 457 }
455 iwl_free_pages(priv, cmd.reply_page); 458 iwl_free_pages(priv->shrd, cmd.reply_page);
456 459
457 return ret; 460 return ret;
458} 461}
@@ -465,7 +468,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
465{ 468{
466 unsigned long flags; 469 unsigned long flags;
467 470
468 if (!iwl_is_ready(priv)) { 471 if (!iwl_is_ready(priv->shrd)) {
469 IWL_DEBUG_INFO(priv, 472 IWL_DEBUG_INFO(priv,
470 "Unable to remove station %pM, device not ready.\n", 473 "Unable to remove station %pM, device not ready.\n",
471 addr); 474 addr);
@@ -483,7 +486,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
483 if (WARN_ON(sta_id == IWL_INVALID_STATION)) 486 if (WARN_ON(sta_id == IWL_INVALID_STATION))
484 return -EINVAL; 487 return -EINVAL;
485 488
486 spin_lock_irqsave(&priv->sta_lock, flags); 489 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
487 490
488 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 491 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
489 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n", 492 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
@@ -509,11 +512,11 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
509 if (WARN_ON(priv->num_stations < 0)) 512 if (WARN_ON(priv->num_stations < 0))
510 priv->num_stations = 0; 513 priv->num_stations = 0;
511 514
512 spin_unlock_irqrestore(&priv->sta_lock, flags); 515 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
513 516
514 return iwl_send_remove_station(priv, addr, sta_id, false); 517 return iwl_send_remove_station(priv, addr, sta_id, false);
515out_err: 518out_err:
516 spin_unlock_irqrestore(&priv->sta_lock, flags); 519 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
517 return -EINVAL; 520 return -EINVAL;
518} 521}
519 522
@@ -534,8 +537,8 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
534 537
535 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n"); 538 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
536 539
537 spin_lock_irqsave(&priv->sta_lock, flags_spin); 540 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
538 for (i = 0; i < priv->hw_params.max_stations; i++) { 541 for (i = 0; i < hw_params(priv).max_stations; i++) {
539 if (ctx && ctx->ctxid != priv->stations[i].ctxid) 542 if (ctx && ctx->ctxid != priv->stations[i].ctxid)
540 continue; 543 continue;
541 544
@@ -545,7 +548,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
545 cleared = true; 548 cleared = true;
546 } 549 }
547 } 550 }
548 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 551 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
549 552
550 if (!cleared) 553 if (!cleared)
551 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n"); 554 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
@@ -569,14 +572,14 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
569 int ret; 572 int ret;
570 bool send_lq; 573 bool send_lq;
571 574
572 if (!iwl_is_ready(priv)) { 575 if (!iwl_is_ready(priv->shrd)) {
573 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n"); 576 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
574 return; 577 return;
575 } 578 }
576 579
577 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n"); 580 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
578 spin_lock_irqsave(&priv->sta_lock, flags_spin); 581 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
579 for (i = 0; i < priv->hw_params.max_stations; i++) { 582 for (i = 0; i < hw_params(priv).max_stations; i++) {
580 if (ctx->ctxid != priv->stations[i].ctxid) 583 if (ctx->ctxid != priv->stations[i].ctxid)
581 continue; 584 continue;
582 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) && 585 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
@@ -589,7 +592,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
589 } 592 }
590 } 593 }
591 594
592 for (i = 0; i < priv->hw_params.max_stations; i++) { 595 for (i = 0; i < hw_params(priv).max_stations; i++) {
593 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) { 596 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
594 memcpy(&sta_cmd, &priv->stations[i].sta, 597 memcpy(&sta_cmd, &priv->stations[i].sta,
595 sizeof(struct iwl_addsta_cmd)); 598 sizeof(struct iwl_addsta_cmd));
@@ -599,15 +602,18 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
599 sizeof(struct iwl_link_quality_cmd)); 602 sizeof(struct iwl_link_quality_cmd));
600 send_lq = true; 603 send_lq = true;
601 } 604 }
602 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 605 spin_unlock_irqrestore(&priv->shrd->sta_lock,
606 flags_spin);
603 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 607 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
604 if (ret) { 608 if (ret) {
605 spin_lock_irqsave(&priv->sta_lock, flags_spin); 609 spin_lock_irqsave(&priv->shrd->sta_lock,
610 flags_spin);
606 IWL_ERR(priv, "Adding station %pM failed.\n", 611 IWL_ERR(priv, "Adding station %pM failed.\n",
607 priv->stations[i].sta.sta.addr); 612 priv->stations[i].sta.sta.addr);
608 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE; 613 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
609 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 614 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
610 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 615 spin_unlock_irqrestore(&priv->shrd->sta_lock,
616 flags_spin);
611 } 617 }
612 /* 618 /*
613 * Rate scaling has already been initialized, send 619 * Rate scaling has already been initialized, send
@@ -615,12 +621,12 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
615 */ 621 */
616 if (send_lq) 622 if (send_lq)
617 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true); 623 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
618 spin_lock_irqsave(&priv->sta_lock, flags_spin); 624 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
619 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS; 625 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
620 } 626 }
621 } 627 }
622 628
623 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 629 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
624 if (!found) 630 if (!found)
625 IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n"); 631 IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
626 else 632 else
@@ -636,9 +642,9 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
636 struct iwl_link_quality_cmd lq; 642 struct iwl_link_quality_cmd lq;
637 bool active; 643 bool active;
638 644
639 spin_lock_irqsave(&priv->sta_lock, flags); 645 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
640 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 646 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
641 spin_unlock_irqrestore(&priv->sta_lock, flags); 647 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
642 return; 648 return;
643 } 649 }
644 650
@@ -648,7 +654,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
648 654
649 active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE; 655 active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
650 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 656 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
651 spin_unlock_irqrestore(&priv->sta_lock, flags); 657 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
652 658
653 if (active) { 659 if (active) {
654 ret = iwl_send_remove_station( 660 ret = iwl_send_remove_station(
@@ -658,9 +664,9 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
658 IWL_ERR(priv, "failed to remove STA %pM (%d)\n", 664 IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
659 priv->stations[sta_id].sta.sta.addr, ret); 665 priv->stations[sta_id].sta.sta.addr, ret);
660 } 666 }
661 spin_lock_irqsave(&priv->sta_lock, flags); 667 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
662 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; 668 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
663 spin_unlock_irqrestore(&priv->sta_lock, flags); 669 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
664 670
665 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 671 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
666 if (ret) 672 if (ret)
@@ -685,8 +691,8 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
685 unsigned long flags; 691 unsigned long flags;
686 int i; 692 int i;
687 693
688 spin_lock_irqsave(&priv->sta_lock, flags); 694 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
689 for (i = 0; i < priv->hw_params.max_stations; i++) { 695 for (i = 0; i < hw_params(priv).max_stations; i++) {
690 if (!(priv->stations[i].used & IWL_STA_BCAST)) 696 if (!(priv->stations[i].used & IWL_STA_BCAST))
691 continue; 697 continue;
692 698
@@ -697,7 +703,7 @@ void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
697 kfree(priv->stations[i].lq); 703 kfree(priv->stations[i].lq);
698 priv->stations[i].lq = NULL; 704 priv->stations[i].lq = NULL;
699 } 705 }
700 spin_unlock_irqrestore(&priv->sta_lock, flags); 706 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
701} 707}
702 708
703#ifdef CONFIG_IWLWIFI_DEBUG 709#ifdef CONFIG_IWLWIFI_DEBUG
@@ -781,19 +787,19 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
781 return -EINVAL; 787 return -EINVAL;
782 788
783 789
784 spin_lock_irqsave(&priv->sta_lock, flags_spin); 790 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
785 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 791 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
786 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 792 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
787 return -EINVAL; 793 return -EINVAL;
788 } 794 }
789 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 795 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
790 796
791 iwl_dump_lq_cmd(priv, lq); 797 iwl_dump_lq_cmd(priv, lq);
792 if (WARN_ON(init && (cmd.flags & CMD_ASYNC))) 798 if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
793 return -EINVAL; 799 return -EINVAL;
794 800
795 if (is_lq_table_valid(priv, ctx, lq)) 801 if (is_lq_table_valid(priv, ctx, lq))
796 ret = trans_send_cmd(&priv->trans, &cmd); 802 ret = iwl_trans_send_cmd(trans(priv), &cmd);
797 else 803 else
798 ret = -EINVAL; 804 ret = -EINVAL;
799 805
@@ -803,9 +809,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
803 if (init) { 809 if (init) {
804 IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n", 810 IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
805 lq->sta_id); 811 lq->sta_id);
806 spin_lock_irqsave(&priv->sta_lock, flags_spin); 812 spin_lock_irqsave(&priv->shrd->sta_lock, flags_spin);
807 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; 813 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
808 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 814 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags_spin);
809 } 815 }
810 return ret; 816 return ret;
811} 817}
@@ -820,13 +826,13 @@ int iwl_mac_sta_remove(struct ieee80211_hw *hw,
820 826
821 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n", 827 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
822 sta->addr); 828 sta->addr);
823 mutex_lock(&priv->mutex); 829 mutex_lock(&priv->shrd->mutex);
824 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", 830 IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
825 sta->addr); 831 sta->addr);
826 ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr); 832 ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
827 if (ret) 833 if (ret)
828 IWL_ERR(priv, "Error removing station %pM\n", 834 IWL_ERR(priv, "Error removing station %pM\n",
829 sta->addr); 835 sta->addr);
830 mutex_unlock(&priv->mutex); 836 mutex_unlock(&priv->shrd->mutex);
831 return ret; 837 return ret;
832} 838}
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 9a6768d66851..9641eb6b1d0a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -76,7 +76,7 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
76 unsigned long flags; 76 unsigned long flags;
77 struct iwl_rxon_context *ctx; 77 struct iwl_rxon_context *ctx;
78 78
79 spin_lock_irqsave(&priv->sta_lock, flags); 79 spin_lock_irqsave(&priv->shrd->sta_lock, flags);
80 memset(priv->stations, 0, sizeof(priv->stations)); 80 memset(priv->stations, 0, sizeof(priv->stations));
81 priv->num_stations = 0; 81 priv->num_stations = 0;
82 82
@@ -94,7 +94,7 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
94 ctx->key_mapping_keys = 0; 94 ctx->key_mapping_keys = 0;
95 } 95 }
96 96
97 spin_unlock_irqrestore(&priv->sta_lock, flags); 97 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
98} 98}
99 99
100static inline int iwl_sta_id(struct ieee80211_sta *sta) 100static inline int iwl_sta_id(struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
index b11f60de4f1e..848fc18befc2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c
@@ -72,7 +72,6 @@
72#include "iwl-dev.h" 72#include "iwl-dev.h"
73#include "iwl-core.h" 73#include "iwl-core.h"
74#include "iwl-debug.h" 74#include "iwl-debug.h"
75#include "iwl-fh.h"
76#include "iwl-io.h" 75#include "iwl-io.h"
77#include "iwl-agn.h" 76#include "iwl-agn.h"
78#include "iwl-testmode.h" 77#include "iwl-testmode.h"
@@ -239,7 +238,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
239 IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," 238 IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
240 " len %d\n", cmd.id, cmd.flags, cmd.len[0]); 239 " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
241 /* ok, let's submit the command to ucode */ 240 /* ok, let's submit the command to ucode */
242 return trans_send_cmd(&priv->trans, &cmd); 241 return iwl_trans_send_cmd(trans(priv), &cmd);
243} 242}
244 243
245 244
@@ -277,7 +276,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
277 276
278 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 277 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
279 case IWL_TM_CMD_APP2DEV_REG_READ32: 278 case IWL_TM_CMD_APP2DEV_REG_READ32:
280 val32 = iwl_read32(priv, ofs); 279 val32 = iwl_read32(bus(priv), ofs);
281 IWL_INFO(priv, "32bit value to read 0x%x\n", val32); 280 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
282 281
283 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); 282 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -299,7 +298,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
299 } else { 298 } else {
300 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); 299 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
301 IWL_INFO(priv, "32bit value to write 0x%x\n", val32); 300 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
302 iwl_write32(priv, ofs, val32); 301 iwl_write32(bus(priv), ofs, val32);
303 } 302 }
304 break; 303 break;
305 case IWL_TM_CMD_APP2DEV_REG_WRITE8: 304 case IWL_TM_CMD_APP2DEV_REG_WRITE8:
@@ -309,7 +308,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
309 } else { 308 } else {
310 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); 309 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
311 IWL_INFO(priv, "8bit value to write 0x%x\n", val8); 310 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
312 iwl_write8(priv, ofs, val8); 311 iwl_write8(bus(priv), ofs, val8);
313 } 312 }
314 break; 313 break;
315 default: 314 default:
@@ -405,7 +404,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
405 404
406 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: 405 case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
407 iwl_testmode_cfg_init_calib(priv); 406 iwl_testmode_cfg_init_calib(priv);
408 trans_stop_device(&priv->trans); 407 iwl_trans_stop_device(trans(priv));
409 break; 408 break;
410 409
411 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: 410 case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
@@ -613,7 +612,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
613 612
614 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]); 613 owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
615 if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM)) 614 if ((owner == IWL_OWNERSHIP_DRIVER) || (owner == IWL_OWNERSHIP_TM))
616 priv->ucode_owner = owner; 615 priv->shrd->ucode_owner = owner;
617 else { 616 else {
618 IWL_DEBUG_INFO(priv, "Invalid owner\n"); 617 IWL_DEBUG_INFO(priv, "Invalid owner\n");
619 return -EINVAL; 618 return -EINVAL;
@@ -661,7 +660,7 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
661 return -ENOMSG; 660 return -ENOMSG;
662 } 661 }
663 /* in case multiple accesses to the device happens */ 662 /* in case multiple accesses to the device happens */
664 mutex_lock(&priv->mutex); 663 mutex_lock(&priv->shrd->mutex);
665 664
666 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 665 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
667 case IWL_TM_CMD_APP2DEV_UCODE: 666 case IWL_TM_CMD_APP2DEV_UCODE:
@@ -702,7 +701,7 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
702 break; 701 break;
703 } 702 }
704 703
705 mutex_unlock(&priv->mutex); 704 mutex_unlock(&priv->shrd->mutex);
706 return result; 705 return result;
707} 706}
708 707
@@ -738,7 +737,7 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
738 } 737 }
739 738
740 /* in case multiple accesses to the device happens */ 739 /* in case multiple accesses to the device happens */
741 mutex_lock(&priv->mutex); 740 mutex_lock(&priv->shrd->mutex);
742 switch (cmd) { 741 switch (cmd) {
743 case IWL_TM_CMD_APP2DEV_READ_TRACE: 742 case IWL_TM_CMD_APP2DEV_READ_TRACE:
744 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); 743 IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
@@ -749,6 +748,6 @@ int iwl_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
749 break; 748 break;
750 } 749 }
751 750
752 mutex_unlock(&priv->mutex); 751 mutex_unlock(&priv->shrd->mutex);
753 return result; 752 return result;
754} 753}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
index b79330d84185..ec4e73737681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
@@ -29,54 +29,318 @@
29#ifndef __iwl_trans_int_pcie_h__ 29#ifndef __iwl_trans_int_pcie_h__
30#define __iwl_trans_int_pcie_h__ 30#define __iwl_trans_int_pcie_h__
31 31
32#include <linux/spinlock.h>
33#include <linux/interrupt.h>
34#include <linux/skbuff.h>
35
36#include "iwl-fh.h"
37#include "iwl-csr.h"
38#include "iwl-shared.h"
39#include "iwl-trans.h"
40#include "iwl-debug.h"
41#include "iwl-io.h"
42
43struct iwl_tx_queue;
44struct iwl_queue;
45struct iwl_host_cmd;
46
32/*This file includes the declaration that are internal to the 47/*This file includes the declaration that are internal to the
33 * trans_pcie layer */ 48 * trans_pcie layer */
34 49
50/**
51 * struct isr_statistics - interrupt statistics
52 *
53 */
54struct isr_statistics {
55 u32 hw;
56 u32 sw;
57 u32 err_code;
58 u32 sch;
59 u32 alive;
60 u32 rfkill;
61 u32 ctkill;
62 u32 wakeup;
63 u32 rx;
64 u32 tx;
65 u32 unhandled;
66};
67
68/**
69 * struct iwl_rx_queue - Rx queue
70 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
71 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
72 * @pool:
73 * @queue:
74 * @read: Shared index to newest available Rx buffer
75 * @write: Shared index to oldest written Rx packet
76 * @free_count: Number of pre-allocated buffers in rx_free
77 * @write_actual:
78 * @rx_free: list of free SKBs for use
79 * @rx_used: List of Rx buffers with no SKB
80 * @need_update: flag to indicate we need to update read/write index
81 * @rb_stts: driver's pointer to receive buffer status
82 * @rb_stts_dma: bus address of receive buffer status
83 * @lock:
84 *
85 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
86 */
87struct iwl_rx_queue {
88 __le32 *bd;
89 dma_addr_t bd_dma;
90 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
91 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
92 u32 read;
93 u32 write;
94 u32 free_count;
95 u32 write_actual;
96 struct list_head rx_free;
97 struct list_head rx_used;
98 int need_update;
99 struct iwl_rb_status *rb_stts;
100 dma_addr_t rb_stts_dma;
101 spinlock_t lock;
102};
103
104struct iwl_dma_ptr {
105 dma_addr_t dma;
106 void *addr;
107 size_t size;
108};
109
110/*
111 * This queue number is required for proper operation
112 * because the ucode will stop/start the scheduler as
113 * required.
114 */
115#define IWL_IPAN_MCAST_QUEUE 8
116
117/**
118 * struct iwl_trans_pcie - PCIe transport specific data
119 * @rxq: all the RX queue data
120 * @rx_replenish: work that will be called when buffers need to be allocated
121 * @trans: pointer to the generic transport area
122 * @scd_base_addr: scheduler sram base address in SRAM
123 * @scd_bc_tbls: pointer to the byte count table of the scheduler
124 * @kw: keep warm address
125 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
126 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
127 * @mcast_queue:
128 * @txq: Tx DMA processing queues
129 * @txq_ctx_active_msk: what queue is active
130 * queue_stopped: tracks what queue is stopped
131 * queue_stop_count: tracks what SW queue is stopped
132 */
133struct iwl_trans_pcie {
134 struct iwl_rx_queue rxq;
135 struct work_struct rx_replenish;
136 struct iwl_trans *trans;
137
138 /* INT ICT Table */
139 __le32 *ict_tbl;
140 void *ict_tbl_vir;
141 dma_addr_t ict_tbl_dma;
142 dma_addr_t aligned_ict_tbl_dma;
143 int ict_index;
144 u32 inta;
145 bool use_ict;
146 struct tasklet_struct irq_tasklet;
147 struct isr_statistics isr_stats;
148
149 u32 inta_mask;
150 u32 scd_base_addr;
151 struct iwl_dma_ptr scd_bc_tbls;
152 struct iwl_dma_ptr kw;
153
154 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
155 const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
156 u8 mcast_queue[NUM_IWL_RXON_CTX];
157
158 struct iwl_tx_queue *txq;
159 unsigned long txq_ctx_active_msk;
160#define IWL_MAX_HW_QUEUES 32
161 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
162 atomic_t queue_stop_count[4];
163};
164
165#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
166 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
167
35/***************************************************** 168/*****************************************************
36* RX 169* RX
37******************************************************/ 170******************************************************/
38void iwl_bg_rx_replenish(struct work_struct *data); 171void iwl_bg_rx_replenish(struct work_struct *data);
39void iwl_irq_tasklet(struct iwl_priv *priv); 172void iwl_irq_tasklet(struct iwl_trans *trans);
40void iwlagn_rx_replenish(struct iwl_priv *priv); 173void iwlagn_rx_replenish(struct iwl_trans *trans);
41void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 174void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
42 struct iwl_rx_queue *q); 175 struct iwl_rx_queue *q);
43 176
44/***************************************************** 177/*****************************************************
45* ICT 178* ICT
46******************************************************/ 179******************************************************/
47int iwl_reset_ict(struct iwl_priv *priv); 180int iwl_reset_ict(struct iwl_trans *trans);
48void iwl_disable_ict(struct iwl_priv *priv); 181void iwl_disable_ict(struct iwl_trans *trans);
49int iwl_alloc_isr_ict(struct iwl_priv *priv); 182int iwl_alloc_isr_ict(struct iwl_trans *trans);
50void iwl_free_isr_ict(struct iwl_priv *priv); 183void iwl_free_isr_ict(struct iwl_trans *trans);
51irqreturn_t iwl_isr_ict(int irq, void *data); 184irqreturn_t iwl_isr_ict(int irq, void *data);
52 185
53
54/***************************************************** 186/*****************************************************
55* TX / HCMD 187* TX / HCMD
56******************************************************/ 188******************************************************/
57void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq); 189void iwl_txq_update_write_ptr(struct iwl_trans *trans,
58void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, 190 struct iwl_tx_queue *txq);
59 int index); 191int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
60int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
61 struct iwl_tx_queue *txq, 192 struct iwl_tx_queue *txq,
62 dma_addr_t addr, u16 len, u8 reset); 193 dma_addr_t addr, u16 len, u8 reset);
63int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 194int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id);
64 int count, int slots_num, u32 id); 195int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
65int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 196int __must_check iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id,
66int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, 197 u32 flags, u16 len, const void *data);
67 u16 len, const void *data);
68void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 198void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
69void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 199void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
70 struct iwl_tx_queue *txq, 200 struct iwl_tx_queue *txq,
71 u16 byte_cnt); 201 u16 byte_cnt);
72int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 202void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
73 u16 ssn_idx, u8 tx_fifo); 203int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
74void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, 204 enum iwl_rxon_context_id ctx, int sta_id,
75 int txq_id, u32 index); 205 int tid);
76void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, 206void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
207void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
77 struct iwl_tx_queue *txq, 208 struct iwl_tx_queue *txq,
78 int tx_fifo_id, int scd_retry); 209 int tx_fifo_id, int scd_retry);
79void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, 210int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
80 int frame_limit); 211 enum iwl_rxon_context_id ctx, int sta_id,
212 int tid, u16 *ssn);
213void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
214 enum iwl_rxon_context_id ctx,
215 int sta_id, int tid, int frame_limit);
216void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
217 int index);
218int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
219 struct sk_buff_head *skbs);
220int iwl_queue_space(const struct iwl_queue *q);
221
222/*****************************************************
223* Error handling
224******************************************************/
225int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
226 char **buf, bool display);
227int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
228void iwl_dump_csr(struct iwl_trans *trans);
229
230/*****************************************************
231* Helpers
232******************************************************/
233static inline void iwl_disable_interrupts(struct iwl_trans *trans)
234{
235 clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
236
237 /* disable interrupts from uCode/NIC to host */
238 iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
239
240 /* acknowledge/clear/reset any interrupts still pending
241 * from uCode or flow handler (Rx/Tx DMA) */
242 iwl_write32(bus(trans), CSR_INT, 0xffffffff);
243 iwl_write32(bus(trans), CSR_FH_INT_STATUS, 0xffffffff);
244 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
245}
246
247static inline void iwl_enable_interrupts(struct iwl_trans *trans)
248{
249 struct iwl_trans_pcie *trans_pcie =
250 IWL_TRANS_GET_PCIE_TRANS(trans);
251
252 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
253 set_bit(STATUS_INT_ENABLED, &trans->shrd->status);
254 iwl_write32(bus(trans), CSR_INT_MASK, trans_pcie->inta_mask);
255}
256
257/*
258 * we have 8 bits used like this:
259 *
260 * 7 6 5 4 3 2 1 0
261 * | | | | | | | |
262 * | | | | | | +-+-------- AC queue (0-3)
263 * | | | | | |
264 * | +-+-+-+-+------------ HW queue ID
265 * |
266 * +---------------------- unused
267 */
268static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
269{
270 BUG_ON(ac > 3); /* only have 2 bits */
271 BUG_ON(hwq > 31); /* only use 5 bits */
272
273 txq->swq_id = (hwq << 2) | ac;
274}
275
276static inline void iwl_wake_queue(struct iwl_trans *trans,
277 struct iwl_tx_queue *txq)
278{
279 u8 queue = txq->swq_id;
280 u8 ac = queue & 3;
281 u8 hwq = (queue >> 2) & 0x1f;
282 struct iwl_trans_pcie *trans_pcie =
283 IWL_TRANS_GET_PCIE_TRANS(trans);
284
285 if (unlikely(!trans->shrd->mac80211_registered))
286 return;
287
288 if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
289 if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
290 ieee80211_wake_queue(trans->shrd->hw, ac);
291}
292
293static inline void iwl_stop_queue(struct iwl_trans *trans,
294 struct iwl_tx_queue *txq)
295{
296 u8 queue = txq->swq_id;
297 u8 ac = queue & 3;
298 u8 hwq = (queue >> 2) & 0x1f;
299 struct iwl_trans_pcie *trans_pcie =
300 IWL_TRANS_GET_PCIE_TRANS(trans);
301
302 if (unlikely(!trans->shrd->mac80211_registered))
303 return;
304
305 if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
306 if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
307 ieee80211_stop_queue(trans->shrd->hw, ac);
308}
309
310#ifdef ieee80211_stop_queue
311#undef ieee80211_stop_queue
312#endif
313
314#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
315
316#ifdef ieee80211_wake_queue
317#undef ieee80211_wake_queue
318#endif
319
320#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
321
322static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
323 int txq_id)
324{
325 set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
326}
327
328static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
329 int txq_id)
330{
331 clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
332}
333
334static inline int iwl_queue_used(const struct iwl_queue *q, int i)
335{
336 return q->write_ptr >= q->read_ptr ?
337 (i >= q->read_ptr && i < q->write_ptr) :
338 !(i < q->read_ptr && i >= q->write_ptr);
339}
340
341static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
342{
343 return index & (q->n_window - 1);
344}
81 345
82#endif /* __iwl_trans_int_pcie_h__ */ 346#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
index 474860290404..2d0ddb8d422d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
@@ -127,7 +127,7 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
127/** 127/**
128 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue 128 * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
129 */ 129 */
130void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 130void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
131 struct iwl_rx_queue *q) 131 struct iwl_rx_queue *q)
132{ 132{
133 unsigned long flags; 133 unsigned long flags;
@@ -138,34 +138,34 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
138 if (q->need_update == 0) 138 if (q->need_update == 0)
139 goto exit_unlock; 139 goto exit_unlock;
140 140
141 if (priv->cfg->base_params->shadow_reg_enable) { 141 if (hw_params(trans).shadow_reg_enable) {
142 /* shadow register enabled */ 142 /* shadow register enabled */
143 /* Device expects a multiple of 8 */ 143 /* Device expects a multiple of 8 */
144 q->write_actual = (q->write & ~0x7); 144 q->write_actual = (q->write & ~0x7);
145 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual); 145 iwl_write32(bus(trans), FH_RSCSR_CHNL0_WPTR, q->write_actual);
146 } else { 146 } else {
147 /* If power-saving is in use, make sure device is awake */ 147 /* If power-saving is in use, make sure device is awake */
148 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 148 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
149 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 149 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
150 150
151 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 151 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
152 IWL_DEBUG_INFO(priv, 152 IWL_DEBUG_INFO(trans,
153 "Rx queue requesting wakeup," 153 "Rx queue requesting wakeup,"
154 " GP1 = 0x%x\n", reg); 154 " GP1 = 0x%x\n", reg);
155 iwl_set_bit(priv, CSR_GP_CNTRL, 155 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
156 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 156 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
157 goto exit_unlock; 157 goto exit_unlock;
158 } 158 }
159 159
160 q->write_actual = (q->write & ~0x7); 160 q->write_actual = (q->write & ~0x7);
161 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 161 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
162 q->write_actual); 162 q->write_actual);
163 163
164 /* Else device is assumed to be awake */ 164 /* Else device is assumed to be awake */
165 } else { 165 } else {
166 /* Device expects a multiple of 8 */ 166 /* Device expects a multiple of 8 */
167 q->write_actual = (q->write & ~0x7); 167 q->write_actual = (q->write & ~0x7);
168 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 168 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_WPTR,
169 q->write_actual); 169 q->write_actual);
170 } 170 }
171 } 171 }
@@ -178,8 +178,7 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
178/** 178/**
179 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 179 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
180 */ 180 */
181static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv, 181static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
182 dma_addr_t dma_addr)
183{ 182{
184 return cpu_to_le32((u32)(dma_addr >> 8)); 183 return cpu_to_le32((u32)(dma_addr >> 8));
185} 184}
@@ -195,9 +194,12 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
195 * also updates the memory address in the firmware to reference the new 194 * also updates the memory address in the firmware to reference the new
196 * target buffer. 195 * target buffer.
197 */ 196 */
198static void iwlagn_rx_queue_restock(struct iwl_priv *priv) 197static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
199{ 198{
200 struct iwl_rx_queue *rxq = &priv->rxq; 199 struct iwl_trans_pcie *trans_pcie =
200 IWL_TRANS_GET_PCIE_TRANS(trans);
201
202 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
201 struct list_head *element; 203 struct list_head *element;
202 struct iwl_rx_mem_buffer *rxb; 204 struct iwl_rx_mem_buffer *rxb;
203 unsigned long flags; 205 unsigned long flags;
@@ -214,8 +216,7 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
214 list_del(element); 216 list_del(element);
215 217
216 /* Point to Rx buffer via next RBD in circular buffer */ 218 /* Point to Rx buffer via next RBD in circular buffer */
217 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv, 219 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(rxb->page_dma);
218 rxb->page_dma);
219 rxq->queue[rxq->write] = rxb; 220 rxq->queue[rxq->write] = rxb;
220 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 221 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
221 rxq->free_count--; 222 rxq->free_count--;
@@ -224,7 +225,7 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
224 /* If the pre-allocated buffer pool is dropping low, schedule to 225 /* If the pre-allocated buffer pool is dropping low, schedule to
225 * refill it */ 226 * refill it */
226 if (rxq->free_count <= RX_LOW_WATERMARK) 227 if (rxq->free_count <= RX_LOW_WATERMARK)
227 queue_work(priv->workqueue, &priv->rx_replenish); 228 queue_work(trans->shrd->workqueue, &trans_pcie->rx_replenish);
228 229
229 230
230 /* If we've added more space for the firmware to place data, tell it. 231 /* If we've added more space for the firmware to place data, tell it.
@@ -233,7 +234,7 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
233 spin_lock_irqsave(&rxq->lock, flags); 234 spin_lock_irqsave(&rxq->lock, flags);
234 rxq->need_update = 1; 235 rxq->need_update = 1;
235 spin_unlock_irqrestore(&rxq->lock, flags); 236 spin_unlock_irqrestore(&rxq->lock, flags);
236 iwl_rx_queue_update_write_ptr(priv, rxq); 237 iwl_rx_queue_update_write_ptr(trans, rxq);
237 } 238 }
238} 239}
239 240
@@ -245,9 +246,12 @@ static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
245 * Also restock the Rx queue via iwl_rx_queue_restock. 246 * Also restock the Rx queue via iwl_rx_queue_restock.
246 * This is called as a scheduled work item (except for during initialization) 247 * This is called as a scheduled work item (except for during initialization)
247 */ 248 */
248static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority) 249static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
249{ 250{
250 struct iwl_rx_queue *rxq = &priv->rxq; 251 struct iwl_trans_pcie *trans_pcie =
252 IWL_TRANS_GET_PCIE_TRANS(trans);
253
254 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
251 struct list_head *element; 255 struct list_head *element;
252 struct iwl_rx_mem_buffer *rxb; 256 struct iwl_rx_mem_buffer *rxb;
253 struct page *page; 257 struct page *page;
@@ -265,20 +269,21 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
265 if (rxq->free_count > RX_LOW_WATERMARK) 269 if (rxq->free_count > RX_LOW_WATERMARK)
266 gfp_mask |= __GFP_NOWARN; 270 gfp_mask |= __GFP_NOWARN;
267 271
268 if (priv->hw_params.rx_page_order > 0) 272 if (hw_params(trans).rx_page_order > 0)
269 gfp_mask |= __GFP_COMP; 273 gfp_mask |= __GFP_COMP;
270 274
271 /* Alloc a new receive buffer */ 275 /* Alloc a new receive buffer */
272 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); 276 page = alloc_pages(gfp_mask,
277 hw_params(trans).rx_page_order);
273 if (!page) { 278 if (!page) {
274 if (net_ratelimit()) 279 if (net_ratelimit())
275 IWL_DEBUG_INFO(priv, "alloc_pages failed, " 280 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
276 "order: %d\n", 281 "order: %d\n",
277 priv->hw_params.rx_page_order); 282 hw_params(trans).rx_page_order);
278 283
279 if ((rxq->free_count <= RX_LOW_WATERMARK) && 284 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
280 net_ratelimit()) 285 net_ratelimit())
281 IWL_CRIT(priv, "Failed to alloc_pages with %s." 286 IWL_CRIT(trans, "Failed to alloc_pages with %s."
282 "Only %u free buffers remaining.\n", 287 "Only %u free buffers remaining.\n",
283 priority == GFP_ATOMIC ? 288 priority == GFP_ATOMIC ?
284 "GFP_ATOMIC" : "GFP_KERNEL", 289 "GFP_ATOMIC" : "GFP_KERNEL",
@@ -293,7 +298,7 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
293 298
294 if (list_empty(&rxq->rx_used)) { 299 if (list_empty(&rxq->rx_used)) {
295 spin_unlock_irqrestore(&rxq->lock, flags); 300 spin_unlock_irqrestore(&rxq->lock, flags);
296 __free_pages(page, priv->hw_params.rx_page_order); 301 __free_pages(page, hw_params(trans).rx_page_order);
297 return; 302 return;
298 } 303 }
299 element = rxq->rx_used.next; 304 element = rxq->rx_used.next;
@@ -305,8 +310,8 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
305 BUG_ON(rxb->page); 310 BUG_ON(rxb->page);
306 rxb->page = page; 311 rxb->page = page;
307 /* Get physical address of the RB */ 312 /* Get physical address of the RB */
308 rxb->page_dma = dma_map_page(priv->bus->dev, page, 0, 313 rxb->page_dma = dma_map_page(bus(trans)->dev, page, 0,
309 PAGE_SIZE << priv->hw_params.rx_page_order, 314 PAGE_SIZE << hw_params(trans).rx_page_order,
310 DMA_FROM_DEVICE); 315 DMA_FROM_DEVICE);
311 /* dma address must be no more than 36 bits */ 316 /* dma address must be no more than 36 bits */
312 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 317 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
@@ -322,35 +327,36 @@ static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
322 } 327 }
323} 328}
324 329
325void iwlagn_rx_replenish(struct iwl_priv *priv) 330void iwlagn_rx_replenish(struct iwl_trans *trans)
326{ 331{
327 unsigned long flags; 332 unsigned long flags;
328 333
329 iwlagn_rx_allocate(priv, GFP_KERNEL); 334 iwlagn_rx_allocate(trans, GFP_KERNEL);
330 335
331 spin_lock_irqsave(&priv->lock, flags); 336 spin_lock_irqsave(&trans->shrd->lock, flags);
332 iwlagn_rx_queue_restock(priv); 337 iwlagn_rx_queue_restock(trans);
333 spin_unlock_irqrestore(&priv->lock, flags); 338 spin_unlock_irqrestore(&trans->shrd->lock, flags);
334} 339}
335 340
336static void iwlagn_rx_replenish_now(struct iwl_priv *priv) 341static void iwlagn_rx_replenish_now(struct iwl_trans *trans)
337{ 342{
338 iwlagn_rx_allocate(priv, GFP_ATOMIC); 343 iwlagn_rx_allocate(trans, GFP_ATOMIC);
339 344
340 iwlagn_rx_queue_restock(priv); 345 iwlagn_rx_queue_restock(trans);
341} 346}
342 347
343void iwl_bg_rx_replenish(struct work_struct *data) 348void iwl_bg_rx_replenish(struct work_struct *data)
344{ 349{
345 struct iwl_priv *priv = 350 struct iwl_trans_pcie *trans_pcie =
346 container_of(data, struct iwl_priv, rx_replenish); 351 container_of(data, struct iwl_trans_pcie, rx_replenish);
352 struct iwl_trans *trans = trans_pcie->trans;
347 353
348 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 354 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
349 return; 355 return;
350 356
351 mutex_lock(&priv->mutex); 357 mutex_lock(&trans->shrd->mutex);
352 iwlagn_rx_replenish(priv); 358 iwlagn_rx_replenish(trans);
353 mutex_unlock(&priv->mutex); 359 mutex_unlock(&trans->shrd->mutex);
354} 360}
355 361
356/** 362/**
@@ -360,11 +366,13 @@ void iwl_bg_rx_replenish(struct work_struct *data)
360 * the appropriate handlers, including command responses, 366 * the appropriate handlers, including command responses,
361 * frame-received notifications, and other notifications. 367 * frame-received notifications, and other notifications.
362 */ 368 */
363static void iwl_rx_handle(struct iwl_priv *priv) 369static void iwl_rx_handle(struct iwl_trans *trans)
364{ 370{
365 struct iwl_rx_mem_buffer *rxb; 371 struct iwl_rx_mem_buffer *rxb;
366 struct iwl_rx_packet *pkt; 372 struct iwl_rx_packet *pkt;
367 struct iwl_rx_queue *rxq = &priv->rxq; 373 struct iwl_trans_pcie *trans_pcie =
374 IWL_TRANS_GET_PCIE_TRANS(trans);
375 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
368 u32 r, i; 376 u32 r, i;
369 int reclaim; 377 int reclaim;
370 unsigned long flags; 378 unsigned long flags;
@@ -379,7 +387,7 @@ static void iwl_rx_handle(struct iwl_priv *priv)
379 387
380 /* Rx interrupt, but nothing sent from uCode */ 388 /* Rx interrupt, but nothing sent from uCode */
381 if (i == r) 389 if (i == r)
382 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 390 IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
383 391
384 /* calculate total frames need to be restock after handling RX */ 392 /* calculate total frames need to be restock after handling RX */
385 total_empty = r - rxq->write_actual; 393 total_empty = r - rxq->write_actual;
@@ -404,17 +412,17 @@ static void iwl_rx_handle(struct iwl_priv *priv)
404 412
405 rxq->queue[i] = NULL; 413 rxq->queue[i] = NULL;
406 414
407 dma_unmap_page(priv->bus->dev, rxb->page_dma, 415 dma_unmap_page(bus(trans)->dev, rxb->page_dma,
408 PAGE_SIZE << priv->hw_params.rx_page_order, 416 PAGE_SIZE << hw_params(trans).rx_page_order,
409 DMA_FROM_DEVICE); 417 DMA_FROM_DEVICE);
410 pkt = rxb_addr(rxb); 418 pkt = rxb_addr(rxb);
411 419
412 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 420 IWL_DEBUG_RX(trans, "r = %d, i = %d, %s, 0x%02x\n", r,
413 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 421 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
414 422
415 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 423 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
416 len += sizeof(u32); /* account for status word */ 424 len += sizeof(u32); /* account for status word */
417 trace_iwlwifi_dev_rx(priv, pkt, len); 425 trace_iwlwifi_dev_rx(priv(trans), pkt, len);
418 426
419 /* Reclaim a command buffer only if this packet is a response 427 /* Reclaim a command buffer only if this packet is a response
420 * to a (driver-originated) command. 428 * to a (driver-originated) command.
@@ -430,7 +438,7 @@ static void iwl_rx_handle(struct iwl_priv *priv)
430 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 438 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
431 (pkt->hdr.cmd != REPLY_TX); 439 (pkt->hdr.cmd != REPLY_TX);
432 440
433 iwl_rx_dispatch(priv, rxb); 441 iwl_rx_dispatch(priv(trans), rxb);
434 442
435 /* 443 /*
436 * XXX: After here, we should always check rxb->page 444 * XXX: After here, we should always check rxb->page
@@ -442,12 +450,12 @@ static void iwl_rx_handle(struct iwl_priv *priv)
442 if (reclaim) { 450 if (reclaim) {
443 /* Invoke any callbacks, transfer the buffer to caller, 451 /* Invoke any callbacks, transfer the buffer to caller,
444 * and fire off the (possibly) blocking 452 * and fire off the (possibly) blocking
445 * trans_send_cmd() 453 * iwl_trans_send_cmd()
446 * as we reclaim the driver command queue */ 454 * as we reclaim the driver command queue */
447 if (rxb->page) 455 if (rxb->page)
448 iwl_tx_cmd_complete(priv, rxb); 456 iwl_tx_cmd_complete(priv(trans), rxb);
449 else 457 else
450 IWL_WARN(priv, "Claim null rxb?\n"); 458 IWL_WARN(trans, "Claim null rxb?\n");
451 } 459 }
452 460
453 /* Reuse the page if possible. For notification packets and 461 /* Reuse the page if possible. For notification packets and
@@ -455,8 +463,9 @@ static void iwl_rx_handle(struct iwl_priv *priv)
455 * rx_free list for reuse later. */ 463 * rx_free list for reuse later. */
456 spin_lock_irqsave(&rxq->lock, flags); 464 spin_lock_irqsave(&rxq->lock, flags);
457 if (rxb->page != NULL) { 465 if (rxb->page != NULL) {
458 rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page, 466 rxb->page_dma = dma_map_page(bus(trans)->dev, rxb->page,
459 0, PAGE_SIZE << priv->hw_params.rx_page_order, 467 0, PAGE_SIZE <<
468 hw_params(trans).rx_page_order,
460 DMA_FROM_DEVICE); 469 DMA_FROM_DEVICE);
461 list_add_tail(&rxb->list, &rxq->rx_free); 470 list_add_tail(&rxb->list, &rxq->rx_free);
462 rxq->free_count++; 471 rxq->free_count++;
@@ -472,7 +481,7 @@ static void iwl_rx_handle(struct iwl_priv *priv)
472 count++; 481 count++;
473 if (count >= 8) { 482 if (count >= 8) {
474 rxq->read = i; 483 rxq->read = i;
475 iwlagn_rx_replenish_now(priv); 484 iwlagn_rx_replenish_now(trans);
476 count = 0; 485 count = 0;
477 } 486 }
478 } 487 }
@@ -481,13 +490,415 @@ static void iwl_rx_handle(struct iwl_priv *priv)
481 /* Backtrack one entry */ 490 /* Backtrack one entry */
482 rxq->read = i; 491 rxq->read = i;
483 if (fill_rx) 492 if (fill_rx)
484 iwlagn_rx_replenish_now(priv); 493 iwlagn_rx_replenish_now(trans);
485 else 494 else
486 iwlagn_rx_queue_restock(priv); 495 iwlagn_rx_queue_restock(trans);
496}
497
498static const char * const desc_lookup_text[] = {
499 "OK",
500 "FAIL",
501 "BAD_PARAM",
502 "BAD_CHECKSUM",
503 "NMI_INTERRUPT_WDG",
504 "SYSASSERT",
505 "FATAL_ERROR",
506 "BAD_COMMAND",
507 "HW_ERROR_TUNE_LOCK",
508 "HW_ERROR_TEMPERATURE",
509 "ILLEGAL_CHAN_FREQ",
510 "VCC_NOT_STABLE",
511 "FH_ERROR",
512 "NMI_INTERRUPT_HOST",
513 "NMI_INTERRUPT_ACTION_PT",
514 "NMI_INTERRUPT_UNKNOWN",
515 "UCODE_VERSION_MISMATCH",
516 "HW_ERROR_ABS_LOCK",
517 "HW_ERROR_CAL_LOCK_FAIL",
518 "NMI_INTERRUPT_INST_ACTION_PT",
519 "NMI_INTERRUPT_DATA_ACTION_PT",
520 "NMI_TRM_HW_ER",
521 "NMI_INTERRUPT_TRM",
522 "NMI_INTERRUPT_BREAK_POINT",
523 "DEBUG_0",
524 "DEBUG_1",
525 "DEBUG_2",
526 "DEBUG_3",
527};
528
529static struct { char *name; u8 num; } advanced_lookup[] = {
530 { "NMI_INTERRUPT_WDG", 0x34 },
531 { "SYSASSERT", 0x35 },
532 { "UCODE_VERSION_MISMATCH", 0x37 },
533 { "BAD_COMMAND", 0x38 },
534 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
535 { "FATAL_ERROR", 0x3D },
536 { "NMI_TRM_HW_ERR", 0x46 },
537 { "NMI_INTERRUPT_TRM", 0x4C },
538 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
539 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
540 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
541 { "NMI_INTERRUPT_HOST", 0x66 },
542 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
543 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
544 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
545 { "ADVANCED_SYSASSERT", 0 },
546};
547
548static const char *desc_lookup(u32 num)
549{
550 int i;
551 int max = ARRAY_SIZE(desc_lookup_text);
552
553 if (num < max)
554 return desc_lookup_text[num];
555
556 max = ARRAY_SIZE(advanced_lookup) - 1;
557 for (i = 0; i < max; i++) {
558 if (advanced_lookup[i].num == num)
559 break;
560 }
561 return advanced_lookup[i].name;
562}
563
564#define ERROR_START_OFFSET (1 * sizeof(u32))
565#define ERROR_ELEM_SIZE (7 * sizeof(u32))
566
567static void iwl_dump_nic_error_log(struct iwl_trans *trans)
568{
569 u32 base;
570 struct iwl_error_event_table table;
571 struct iwl_priv *priv = priv(trans);
572 struct iwl_trans_pcie *trans_pcie =
573 IWL_TRANS_GET_PCIE_TRANS(trans);
574
575 base = priv->device_pointers.error_event_table;
576 if (priv->ucode_type == IWL_UCODE_INIT) {
577 if (!base)
578 base = priv->init_errlog_ptr;
579 } else {
580 if (!base)
581 base = priv->inst_errlog_ptr;
582 }
583
584 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
585 IWL_ERR(trans,
586 "Not valid error log pointer 0x%08X for %s uCode\n",
587 base,
588 (priv->ucode_type == IWL_UCODE_INIT)
589 ? "Init" : "RT");
590 return;
591 }
592
593 iwl_read_targ_mem_words(bus(priv), base, &table, sizeof(table));
594
595 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
596 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
597 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
598 trans->shrd->status, table.valid);
599 }
600
601 trans_pcie->isr_stats.err_code = table.error_id;
602
603 trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
604 table.data1, table.data2, table.line,
605 table.blink1, table.blink2, table.ilink1,
606 table.ilink2, table.bcon_time, table.gp1,
607 table.gp2, table.gp3, table.ucode_ver,
608 table.hw_ver, table.brd_ver);
609 IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
610 desc_lookup(table.error_id));
611 IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
612 IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
613 IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
614 IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
615 IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
616 IWL_ERR(trans, "0x%08X | data1\n", table.data1);
617 IWL_ERR(trans, "0x%08X | data2\n", table.data2);
618 IWL_ERR(trans, "0x%08X | line\n", table.line);
619 IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
620 IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
621 IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
622 IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
623 IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
624 IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
625 IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
626 IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
627 IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
628 IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
629}
630
631/**
632 * iwl_irq_handle_error - called for HW or SW error interrupt from card
633 */
634static void iwl_irq_handle_error(struct iwl_trans *trans)
635{
636 struct iwl_priv *priv = priv(trans);
637 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
638 if (priv->cfg->internal_wimax_coex &&
639 (!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
640 APMS_CLK_VAL_MRB_FUNC_MODE) ||
641 (iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
642 APMG_PS_CTRL_VAL_RESET_REQ))) {
643 /*
644 * Keep the restart process from trying to send host
645 * commands by clearing the ready bit.
646 */
647 clear_bit(STATUS_READY, &trans->shrd->status);
648 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
649 wake_up_interruptible(&priv->wait_command_queue);
650 IWL_ERR(trans, "RF is used by WiMAX\n");
651 return;
652 }
653
654 IWL_ERR(trans, "Loaded firmware version: %s\n",
655 priv->hw->wiphy->fw_version);
656
657 iwl_dump_nic_error_log(trans);
658 iwl_dump_csr(trans);
659 iwl_dump_fh(trans, NULL, false);
660 iwl_dump_nic_event_log(trans, false, NULL, false);
661#ifdef CONFIG_IWLWIFI_DEBUG
662 if (iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS)
663 iwl_print_rx_config_cmd(priv,
664 &priv->contexts[IWL_RXON_CTX_BSS]);
665#endif
666
667 iwlagn_fw_error(priv, false);
668}
669
670#define EVENT_START_OFFSET (4 * sizeof(u32))
671
672/**
673 * iwl_print_event_log - Dump error event log to syslog
674 *
675 */
676static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
677 u32 num_events, u32 mode,
678 int pos, char **buf, size_t bufsz)
679{
680 u32 i;
681 u32 base; /* SRAM byte address of event log header */
682 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
683 u32 ptr; /* SRAM byte address of log data */
684 u32 ev, time, data; /* event log data */
685 unsigned long reg_flags;
686 struct iwl_priv *priv = priv(trans);
687
688 if (num_events == 0)
689 return pos;
690
691 base = priv->device_pointers.log_event_table;
692 if (priv->ucode_type == IWL_UCODE_INIT) {
693 if (!base)
694 base = priv->init_evtlog_ptr;
695 } else {
696 if (!base)
697 base = priv->inst_evtlog_ptr;
698 }
699
700 if (mode == 0)
701 event_size = 2 * sizeof(u32);
702 else
703 event_size = 3 * sizeof(u32);
704
705 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
706
707 /* Make sure device is powered up for SRAM reads */
708 spin_lock_irqsave(&bus(priv)->reg_lock, reg_flags);
709 iwl_grab_nic_access(bus(priv));
710
711 /* Set starting address; reads will auto-increment */
712 iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, ptr);
713 rmb();
714
715 /* "time" is actually "data" for mode 0 (no timestamp).
716 * place event id # at far right for easier visual parsing. */
717 for (i = 0; i < num_events; i++) {
718 ev = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
719 time = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
720 if (mode == 0) {
721 /* data, ev */
722 if (bufsz) {
723 pos += scnprintf(*buf + pos, bufsz - pos,
724 "EVT_LOG:0x%08x:%04u\n",
725 time, ev);
726 } else {
727 trace_iwlwifi_dev_ucode_event(priv, 0,
728 time, ev);
729 IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
730 time, ev);
731 }
732 } else {
733 data = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
734 if (bufsz) {
735 pos += scnprintf(*buf + pos, bufsz - pos,
736 "EVT_LOGT:%010u:0x%08x:%04u\n",
737 time, data, ev);
738 } else {
739 IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
740 time, data, ev);
741 trace_iwlwifi_dev_ucode_event(priv, time,
742 data, ev);
743 }
744 }
745 }
746
747 /* Allow device to power down */
748 iwl_release_nic_access(bus(priv));
749 spin_unlock_irqrestore(&bus(priv)->reg_lock, reg_flags);
750 return pos;
751}
752
753/**
754 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
755 */
756static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
757 u32 num_wraps, u32 next_entry,
758 u32 size, u32 mode,
759 int pos, char **buf, size_t bufsz)
760{
761 /*
762 * display the newest DEFAULT_LOG_ENTRIES entries
763 * i.e the entries just before the next ont that uCode would fill.
764 */
765 if (num_wraps) {
766 if (next_entry < size) {
767 pos = iwl_print_event_log(trans,
768 capacity - (size - next_entry),
769 size - next_entry, mode,
770 pos, buf, bufsz);
771 pos = iwl_print_event_log(trans, 0,
772 next_entry, mode,
773 pos, buf, bufsz);
774 } else
775 pos = iwl_print_event_log(trans, next_entry - size,
776 size, mode, pos, buf, bufsz);
777 } else {
778 if (next_entry < size) {
779 pos = iwl_print_event_log(trans, 0, next_entry,
780 mode, pos, buf, bufsz);
781 } else {
782 pos = iwl_print_event_log(trans, next_entry - size,
783 size, mode, pos, buf, bufsz);
784 }
785 }
786 return pos;
787}
788
789#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
790
791int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
792 char **buf, bool display)
793{
794 u32 base; /* SRAM byte address of event log header */
795 u32 capacity; /* event log capacity in # entries */
796 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
797 u32 num_wraps; /* # times uCode wrapped to top of log */
798 u32 next_entry; /* index of next entry to be written by uCode */
799 u32 size; /* # entries that we'll print */
800 u32 logsize;
801 int pos = 0;
802 size_t bufsz = 0;
803 struct iwl_priv *priv = priv(trans);
804
805 base = priv->device_pointers.log_event_table;
806 if (priv->ucode_type == IWL_UCODE_INIT) {
807 logsize = priv->init_evtlog_size;
808 if (!base)
809 base = priv->init_evtlog_ptr;
810 } else {
811 logsize = priv->inst_evtlog_size;
812 if (!base)
813 base = priv->inst_evtlog_ptr;
814 }
815
816 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
817 IWL_ERR(trans,
818 "Invalid event log pointer 0x%08X for %s uCode\n",
819 base,
820 (priv->ucode_type == IWL_UCODE_INIT)
821 ? "Init" : "RT");
822 return -EINVAL;
823 }
824
825 /* event log header */
826 capacity = iwl_read_targ_mem(bus(priv), base);
827 mode = iwl_read_targ_mem(bus(priv), base + (1 * sizeof(u32)));
828 num_wraps = iwl_read_targ_mem(bus(priv), base + (2 * sizeof(u32)));
829 next_entry = iwl_read_targ_mem(bus(priv), base + (3 * sizeof(u32)));
830
831 if (capacity > logsize) {
832 IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
833 "entries\n", capacity, logsize);
834 capacity = logsize;
835 }
836
837 if (next_entry > logsize) {
838 IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
839 next_entry, logsize);
840 next_entry = logsize;
841 }
842
843 size = num_wraps ? capacity : next_entry;
844
845 /* bail out if nothing in log */
846 if (size == 0) {
847 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
848 return pos;
849 }
850
851 /* enable/disable bt channel inhibition */
852 priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce;
853
854#ifdef CONFIG_IWLWIFI_DEBUG
855 if (!(iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) && !full_log)
856 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
857 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
858#else
859 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
860 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
861#endif
862 IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
863 size);
864
865#ifdef CONFIG_IWLWIFI_DEBUG
866 if (display) {
867 if (full_log)
868 bufsz = capacity * 48;
869 else
870 bufsz = size * 48;
871 *buf = kmalloc(bufsz, GFP_KERNEL);
872 if (!*buf)
873 return -ENOMEM;
874 }
875 if ((iwl_get_debug_level(trans->shrd) & IWL_DL_FW_ERRORS) || full_log) {
876 /*
877 * if uCode has wrapped back to top of log,
878 * start at the oldest entry,
879 * i.e the next one that uCode would fill.
880 */
881 if (num_wraps)
882 pos = iwl_print_event_log(trans, next_entry,
883 capacity - next_entry, mode,
884 pos, buf, bufsz);
885 /* (then/else) start at top of log */
886 pos = iwl_print_event_log(trans, 0,
887 next_entry, mode, pos, buf, bufsz);
888 } else
889 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
890 next_entry, size, mode,
891 pos, buf, bufsz);
892#else
893 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
894 next_entry, size, mode,
895 pos, buf, bufsz);
896#endif
897 return pos;
487} 898}
488 899
489/* tasklet for iwlagn interrupt */ 900/* tasklet for iwlagn interrupt */
490void iwl_irq_tasklet(struct iwl_priv *priv) 901void iwl_irq_tasklet(struct iwl_trans *trans)
491{ 902{
492 u32 inta = 0; 903 u32 inta = 0;
493 u32 handled = 0; 904 u32 handled = 0;
@@ -497,7 +908,12 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
497 u32 inta_mask; 908 u32 inta_mask;
498#endif 909#endif
499 910
500 spin_lock_irqsave(&priv->lock, flags); 911 struct iwl_trans_pcie *trans_pcie =
912 IWL_TRANS_GET_PCIE_TRANS(trans);
913 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
914
915
916 spin_lock_irqsave(&trans->shrd->lock, flags);
501 917
502 /* Ack/clear/reset pending uCode interrupts. 918 /* Ack/clear/reset pending uCode interrupts.
503 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 919 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -510,33 +926,34 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
510 * hardware bugs here by ACKing all the possible interrupts so that 926 * hardware bugs here by ACKing all the possible interrupts so that
511 * interrupt coalescing can still be achieved. 927 * interrupt coalescing can still be achieved.
512 */ 928 */
513 iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); 929 iwl_write32(bus(trans), CSR_INT,
930 trans_pcie->inta | ~trans_pcie->inta_mask);
514 931
515 inta = priv->inta; 932 inta = trans_pcie->inta;
516 933
517#ifdef CONFIG_IWLWIFI_DEBUG 934#ifdef CONFIG_IWLWIFI_DEBUG
518 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 935 if (iwl_get_debug_level(trans->shrd) & IWL_DL_ISR) {
519 /* just for debug */ 936 /* just for debug */
520 inta_mask = iwl_read32(priv, CSR_INT_MASK); 937 inta_mask = iwl_read32(bus(trans), CSR_INT_MASK);
521 IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ", 938 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ",
522 inta, inta_mask); 939 inta, inta_mask);
523 } 940 }
524#endif 941#endif
525 942
526 spin_unlock_irqrestore(&priv->lock, flags); 943 spin_unlock_irqrestore(&trans->shrd->lock, flags);
527 944
528 /* saved interrupt in inta variable now we can reset priv->inta */ 945 /* saved interrupt in inta variable now we can reset trans_pcie->inta */
529 priv->inta = 0; 946 trans_pcie->inta = 0;
530 947
531 /* Now service all interrupt bits discovered above. */ 948 /* Now service all interrupt bits discovered above. */
532 if (inta & CSR_INT_BIT_HW_ERR) { 949 if (inta & CSR_INT_BIT_HW_ERR) {
533 IWL_ERR(priv, "Hardware error detected. Restarting.\n"); 950 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
534 951
535 /* Tell the device to stop sending interrupts */ 952 /* Tell the device to stop sending interrupts */
536 iwl_disable_interrupts(priv); 953 iwl_disable_interrupts(trans);
537 954
538 priv->isr_stats.hw++; 955 isr_stats->hw++;
539 iwl_irq_handle_error(priv); 956 iwl_irq_handle_error(trans);
540 957
541 handled |= CSR_INT_BIT_HW_ERR; 958 handled |= CSR_INT_BIT_HW_ERR;
542 959
@@ -544,18 +961,18 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
544 } 961 }
545 962
546#ifdef CONFIG_IWLWIFI_DEBUG 963#ifdef CONFIG_IWLWIFI_DEBUG
547 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 964 if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
548 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 965 /* NIC fires this, but we don't use it, redundant with WAKEUP */
549 if (inta & CSR_INT_BIT_SCD) { 966 if (inta & CSR_INT_BIT_SCD) {
550 IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " 967 IWL_DEBUG_ISR(trans, "Scheduler finished to transmit "
551 "the frame/frames.\n"); 968 "the frame/frames.\n");
552 priv->isr_stats.sch++; 969 isr_stats->sch++;
553 } 970 }
554 971
555 /* Alive notification via Rx interrupt will do the real work */ 972 /* Alive notification via Rx interrupt will do the real work */
556 if (inta & CSR_INT_BIT_ALIVE) { 973 if (inta & CSR_INT_BIT_ALIVE) {
557 IWL_DEBUG_ISR(priv, "Alive interrupt\n"); 974 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
558 priv->isr_stats.alive++; 975 isr_stats->alive++;
559 } 976 }
560 } 977 }
561#endif 978#endif
@@ -565,26 +982,29 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
565 /* HW RF KILL switch toggled */ 982 /* HW RF KILL switch toggled */
566 if (inta & CSR_INT_BIT_RF_KILL) { 983 if (inta & CSR_INT_BIT_RF_KILL) {
567 int hw_rf_kill = 0; 984 int hw_rf_kill = 0;
568 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 985 if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
569 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 986 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
570 hw_rf_kill = 1; 987 hw_rf_kill = 1;
571 988
572 IWL_WARN(priv, "RF_KILL bit toggled to %s.\n", 989 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
573 hw_rf_kill ? "disable radio" : "enable radio"); 990 hw_rf_kill ? "disable radio" : "enable radio");
574 991
575 priv->isr_stats.rfkill++; 992 isr_stats->rfkill++;
576 993
577 /* driver only loads ucode once setting the interface up. 994 /* driver only loads ucode once setting the interface up.
578 * the driver allows loading the ucode even if the radio 995 * the driver allows loading the ucode even if the radio
579 * is killed. Hence update the killswitch state here. The 996 * is killed. Hence update the killswitch state here. The
580 * rfkill handler will care about restarting if needed. 997 * rfkill handler will care about restarting if needed.
581 */ 998 */
582 if (!test_bit(STATUS_ALIVE, &priv->status)) { 999 if (!test_bit(STATUS_ALIVE, &trans->shrd->status)) {
583 if (hw_rf_kill) 1000 if (hw_rf_kill)
584 set_bit(STATUS_RF_KILL_HW, &priv->status); 1001 set_bit(STATUS_RF_KILL_HW,
1002 &trans->shrd->status);
585 else 1003 else
586 clear_bit(STATUS_RF_KILL_HW, &priv->status); 1004 clear_bit(STATUS_RF_KILL_HW,
587 wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill); 1005 &trans->shrd->status);
1006 wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy,
1007 hw_rf_kill);
588 } 1008 }
589 1009
590 handled |= CSR_INT_BIT_RF_KILL; 1010 handled |= CSR_INT_BIT_RF_KILL;
@@ -592,28 +1012,29 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
592 1012
593 /* Chip got too hot and stopped itself */ 1013 /* Chip got too hot and stopped itself */
594 if (inta & CSR_INT_BIT_CT_KILL) { 1014 if (inta & CSR_INT_BIT_CT_KILL) {
595 IWL_ERR(priv, "Microcode CT kill error detected.\n"); 1015 IWL_ERR(trans, "Microcode CT kill error detected.\n");
596 priv->isr_stats.ctkill++; 1016 isr_stats->ctkill++;
597 handled |= CSR_INT_BIT_CT_KILL; 1017 handled |= CSR_INT_BIT_CT_KILL;
598 } 1018 }
599 1019
600 /* Error detected by uCode */ 1020 /* Error detected by uCode */
601 if (inta & CSR_INT_BIT_SW_ERR) { 1021 if (inta & CSR_INT_BIT_SW_ERR) {
602 IWL_ERR(priv, "Microcode SW error detected. " 1022 IWL_ERR(trans, "Microcode SW error detected. "
603 " Restarting 0x%X.\n", inta); 1023 " Restarting 0x%X.\n", inta);
604 priv->isr_stats.sw++; 1024 isr_stats->sw++;
605 iwl_irq_handle_error(priv); 1025 iwl_irq_handle_error(trans);
606 handled |= CSR_INT_BIT_SW_ERR; 1026 handled |= CSR_INT_BIT_SW_ERR;
607 } 1027 }
608 1028
609 /* uCode wakes up after power-down sleep */ 1029 /* uCode wakes up after power-down sleep */
610 if (inta & CSR_INT_BIT_WAKEUP) { 1030 if (inta & CSR_INT_BIT_WAKEUP) {
611 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1031 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
612 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1032 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
613 for (i = 0; i < priv->hw_params.max_txq_num; i++) 1033 for (i = 0; i < hw_params(trans).max_txq_num; i++)
614 iwl_txq_update_write_ptr(priv, &priv->txq[i]); 1034 iwl_txq_update_write_ptr(trans,
1035 &trans_pcie->txq[i]);
615 1036
616 priv->isr_stats.wakeup++; 1037 isr_stats->wakeup++;
617 1038
618 handled |= CSR_INT_BIT_WAKEUP; 1039 handled |= CSR_INT_BIT_WAKEUP;
619 } 1040 }
@@ -623,15 +1044,16 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
623 * notifications from uCode come through here*/ 1044 * notifications from uCode come through here*/
624 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1045 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
625 CSR_INT_BIT_RX_PERIODIC)) { 1046 CSR_INT_BIT_RX_PERIODIC)) {
626 IWL_DEBUG_ISR(priv, "Rx interrupt\n"); 1047 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
627 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1048 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
628 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1049 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
629 iwl_write32(priv, CSR_FH_INT_STATUS, 1050 iwl_write32(bus(trans), CSR_FH_INT_STATUS,
630 CSR_FH_INT_RX_MASK); 1051 CSR_FH_INT_RX_MASK);
631 } 1052 }
632 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1053 if (inta & CSR_INT_BIT_RX_PERIODIC) {
633 handled |= CSR_INT_BIT_RX_PERIODIC; 1054 handled |= CSR_INT_BIT_RX_PERIODIC;
634 iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1055 iwl_write32(bus(trans),
1056 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
635 } 1057 }
636 /* Sending RX interrupt require many steps to be done in the 1058 /* Sending RX interrupt require many steps to be done in the
637 * the device: 1059 * the device:
@@ -645,9 +1067,9 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
645 */ 1067 */
646 1068
647 /* Disable periodic interrupt; we use it as just a one-shot. */ 1069 /* Disable periodic interrupt; we use it as just a one-shot. */
648 iwl_write8(priv, CSR_INT_PERIODIC_REG, 1070 iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
649 CSR_INT_PERIODIC_DIS); 1071 CSR_INT_PERIODIC_DIS);
650 iwl_rx_handle(priv); 1072 iwl_rx_handle(trans);
651 1073
652 /* 1074 /*
653 * Enable periodic interrupt in 8 msec only if we received 1075 * Enable periodic interrupt in 8 msec only if we received
@@ -657,40 +1079,40 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
657 * to extend the periodic interrupt; one-shot is enough. 1079 * to extend the periodic interrupt; one-shot is enough.
658 */ 1080 */
659 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1081 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
660 iwl_write8(priv, CSR_INT_PERIODIC_REG, 1082 iwl_write8(bus(trans), CSR_INT_PERIODIC_REG,
661 CSR_INT_PERIODIC_ENA); 1083 CSR_INT_PERIODIC_ENA);
662 1084
663 priv->isr_stats.rx++; 1085 isr_stats->rx++;
664 } 1086 }
665 1087
666 /* This "Tx" DMA channel is used only for loading uCode */ 1088 /* This "Tx" DMA channel is used only for loading uCode */
667 if (inta & CSR_INT_BIT_FH_TX) { 1089 if (inta & CSR_INT_BIT_FH_TX) {
668 iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1090 iwl_write32(bus(trans), CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
669 IWL_DEBUG_ISR(priv, "uCode load interrupt\n"); 1091 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
670 priv->isr_stats.tx++; 1092 isr_stats->tx++;
671 handled |= CSR_INT_BIT_FH_TX; 1093 handled |= CSR_INT_BIT_FH_TX;
672 /* Wake up uCode load routine, now that load is complete */ 1094 /* Wake up uCode load routine, now that load is complete */
673 priv->ucode_write_complete = 1; 1095 priv(trans)->ucode_write_complete = 1;
674 wake_up_interruptible(&priv->wait_command_queue); 1096 wake_up_interruptible(&priv(trans)->wait_command_queue);
675 } 1097 }
676 1098
677 if (inta & ~handled) { 1099 if (inta & ~handled) {
678 IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1100 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
679 priv->isr_stats.unhandled++; 1101 isr_stats->unhandled++;
680 } 1102 }
681 1103
682 if (inta & ~(priv->inta_mask)) { 1104 if (inta & ~(trans_pcie->inta_mask)) {
683 IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", 1105 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
684 inta & ~priv->inta_mask); 1106 inta & ~trans_pcie->inta_mask);
685 } 1107 }
686 1108
687 /* Re-enable all interrupts */ 1109 /* Re-enable all interrupts */
688 /* only Re-enable if disabled by irq */ 1110 /* only Re-enable if disabled by irq */
689 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1111 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status))
690 iwl_enable_interrupts(priv); 1112 iwl_enable_interrupts(trans);
691 /* Re-enable RF_KILL if it occurred */ 1113 /* Re-enable RF_KILL if it occurred */
692 else if (handled & CSR_INT_BIT_RF_KILL) 1114 else if (handled & CSR_INT_BIT_RF_KILL)
693 iwl_enable_rfkill_int(priv); 1115 iwl_enable_rfkill_int(priv(trans));
694} 1116}
695 1117
696/****************************************************************************** 1118/******************************************************************************
@@ -701,18 +1123,21 @@ void iwl_irq_tasklet(struct iwl_priv *priv)
701#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) 1123#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
702 1124
703/* Free dram table */ 1125/* Free dram table */
704void iwl_free_isr_ict(struct iwl_priv *priv) 1126void iwl_free_isr_ict(struct iwl_trans *trans)
705{ 1127{
706 if (priv->ict_tbl_vir) { 1128 struct iwl_trans_pcie *trans_pcie =
707 dma_free_coherent(priv->bus->dev, 1129 IWL_TRANS_GET_PCIE_TRANS(trans);
1130
1131 if (trans_pcie->ict_tbl_vir) {
1132 dma_free_coherent(bus(trans)->dev,
708 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, 1133 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
709 priv->ict_tbl_vir, 1134 trans_pcie->ict_tbl_vir,
710 priv->ict_tbl_dma); 1135 trans_pcie->ict_tbl_dma);
711 priv->ict_tbl_vir = NULL; 1136 trans_pcie->ict_tbl_vir = NULL;
712 memset(&priv->ict_tbl_dma, 0, 1137 memset(&trans_pcie->ict_tbl_dma, 0,
713 sizeof(priv->ict_tbl_dma)); 1138 sizeof(trans_pcie->ict_tbl_dma));
714 memset(&priv->aligned_ict_tbl_dma, 0, 1139 memset(&trans_pcie->aligned_ict_tbl_dma, 0,
715 sizeof(priv->aligned_ict_tbl_dma)); 1140 sizeof(trans_pcie->aligned_ict_tbl_dma));
716 } 1141 }
717} 1142}
718 1143
@@ -720,157 +1145,168 @@ void iwl_free_isr_ict(struct iwl_priv *priv)
720/* allocate dram shared table it is a PAGE_SIZE aligned 1145/* allocate dram shared table it is a PAGE_SIZE aligned
721 * also reset all data related to ICT table interrupt. 1146 * also reset all data related to ICT table interrupt.
722 */ 1147 */
723int iwl_alloc_isr_ict(struct iwl_priv *priv) 1148int iwl_alloc_isr_ict(struct iwl_trans *trans)
724{ 1149{
1150 struct iwl_trans_pcie *trans_pcie =
1151 IWL_TRANS_GET_PCIE_TRANS(trans);
725 1152
726 /* allocate shrared data table */ 1153 /* allocate shrared data table */
727 priv->ict_tbl_vir = 1154 trans_pcie->ict_tbl_vir =
728 dma_alloc_coherent(priv->bus->dev, 1155 dma_alloc_coherent(bus(trans)->dev,
729 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE, 1156 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
730 &priv->ict_tbl_dma, GFP_KERNEL); 1157 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
731 if (!priv->ict_tbl_vir) 1158 if (!trans_pcie->ict_tbl_vir)
732 return -ENOMEM; 1159 return -ENOMEM;
733 1160
734 /* align table to PAGE_SIZE boundary */ 1161 /* align table to PAGE_SIZE boundary */
735 priv->aligned_ict_tbl_dma = 1162 trans_pcie->aligned_ict_tbl_dma =
736 ALIGN(priv->ict_tbl_dma, PAGE_SIZE); 1163 ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
737 1164
738 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n", 1165 IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
739 (unsigned long long)priv->ict_tbl_dma, 1166 (unsigned long long)trans_pcie->ict_tbl_dma,
740 (unsigned long long)priv->aligned_ict_tbl_dma, 1167 (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
741 (int)(priv->aligned_ict_tbl_dma - 1168 (int)(trans_pcie->aligned_ict_tbl_dma -
742 priv->ict_tbl_dma)); 1169 trans_pcie->ict_tbl_dma));
743 1170
744 priv->ict_tbl = priv->ict_tbl_vir + 1171 trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
745 (priv->aligned_ict_tbl_dma - 1172 (trans_pcie->aligned_ict_tbl_dma -
746 priv->ict_tbl_dma); 1173 trans_pcie->ict_tbl_dma);
747 1174
748 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n", 1175 IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
749 priv->ict_tbl, priv->ict_tbl_vir, 1176 trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
750 (int)(priv->aligned_ict_tbl_dma - 1177 (int)(trans_pcie->aligned_ict_tbl_dma -
751 priv->ict_tbl_dma)); 1178 trans_pcie->ict_tbl_dma));
752 1179
753 /* reset table and index to all 0 */ 1180 /* reset table and index to all 0 */
754 memset(priv->ict_tbl_vir, 0, 1181 memset(trans_pcie->ict_tbl_vir, 0,
755 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE); 1182 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
756 priv->ict_index = 0; 1183 trans_pcie->ict_index = 0;
757 1184
758 /* add periodic RX interrupt */ 1185 /* add periodic RX interrupt */
759 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC; 1186 trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
760 return 0; 1187 return 0;
761} 1188}
762 1189
763/* Device is going up inform it about using ICT interrupt table, 1190/* Device is going up inform it about using ICT interrupt table,
764 * also we need to tell the driver to start using ICT interrupt. 1191 * also we need to tell the driver to start using ICT interrupt.
765 */ 1192 */
766int iwl_reset_ict(struct iwl_priv *priv) 1193int iwl_reset_ict(struct iwl_trans *trans)
767{ 1194{
768 u32 val; 1195 u32 val;
769 unsigned long flags; 1196 unsigned long flags;
1197 struct iwl_trans_pcie *trans_pcie =
1198 IWL_TRANS_GET_PCIE_TRANS(trans);
770 1199
771 if (!priv->ict_tbl_vir) 1200 if (!trans_pcie->ict_tbl_vir)
772 return 0; 1201 return 0;
773 1202
774 spin_lock_irqsave(&priv->lock, flags); 1203 spin_lock_irqsave(&trans->shrd->lock, flags);
775 iwl_disable_interrupts(priv); 1204 iwl_disable_interrupts(trans);
776 1205
777 memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT); 1206 memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
778 1207
779 val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT; 1208 val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
780 1209
781 val |= CSR_DRAM_INT_TBL_ENABLE; 1210 val |= CSR_DRAM_INT_TBL_ENABLE;
782 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK; 1211 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
783 1212
784 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X " 1213 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
785 "aligned dma address %Lx\n", 1214 "aligned dma address %Lx\n",
786 val, 1215 val,
787 (unsigned long long)priv->aligned_ict_tbl_dma); 1216 (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
788 1217
789 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val); 1218 iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
790 priv->use_ict = true; 1219 trans_pcie->use_ict = true;
791 priv->ict_index = 0; 1220 trans_pcie->ict_index = 0;
792 iwl_write32(priv, CSR_INT, priv->inta_mask); 1221 iwl_write32(bus(trans), CSR_INT, trans_pcie->inta_mask);
793 iwl_enable_interrupts(priv); 1222 iwl_enable_interrupts(trans);
794 spin_unlock_irqrestore(&priv->lock, flags); 1223 spin_unlock_irqrestore(&trans->shrd->lock, flags);
795 1224
796 return 0; 1225 return 0;
797} 1226}
798 1227
799/* Device is going down disable ict interrupt usage */ 1228/* Device is going down disable ict interrupt usage */
800void iwl_disable_ict(struct iwl_priv *priv) 1229void iwl_disable_ict(struct iwl_trans *trans)
801{ 1230{
1231 struct iwl_trans_pcie *trans_pcie =
1232 IWL_TRANS_GET_PCIE_TRANS(trans);
1233
802 unsigned long flags; 1234 unsigned long flags;
803 1235
804 spin_lock_irqsave(&priv->lock, flags); 1236 spin_lock_irqsave(&trans->shrd->lock, flags);
805 priv->use_ict = false; 1237 trans_pcie->use_ict = false;
806 spin_unlock_irqrestore(&priv->lock, flags); 1238 spin_unlock_irqrestore(&trans->shrd->lock, flags);
807} 1239}
808 1240
809static irqreturn_t iwl_isr(int irq, void *data) 1241static irqreturn_t iwl_isr(int irq, void *data)
810{ 1242{
811 struct iwl_priv *priv = data; 1243 struct iwl_trans *trans = data;
1244 struct iwl_trans_pcie *trans_pcie;
812 u32 inta, inta_mask; 1245 u32 inta, inta_mask;
813 unsigned long flags; 1246 unsigned long flags;
814#ifdef CONFIG_IWLWIFI_DEBUG 1247#ifdef CONFIG_IWLWIFI_DEBUG
815 u32 inta_fh; 1248 u32 inta_fh;
816#endif 1249#endif
817 if (!priv) 1250 if (!trans)
818 return IRQ_NONE; 1251 return IRQ_NONE;
819 1252
820 spin_lock_irqsave(&priv->lock, flags); 1253 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1254
1255 spin_lock_irqsave(&trans->shrd->lock, flags);
821 1256
822 /* Disable (but don't clear!) interrupts here to avoid 1257 /* Disable (but don't clear!) interrupts here to avoid
823 * back-to-back ISRs and sporadic interrupts from our NIC. 1258 * back-to-back ISRs and sporadic interrupts from our NIC.
824 * If we have something to service, the tasklet will re-enable ints. 1259 * If we have something to service, the tasklet will re-enable ints.
825 * If we *don't* have something, we'll re-enable before leaving here. */ 1260 * If we *don't* have something, we'll re-enable before leaving here. */
826 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ 1261 inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
827 iwl_write32(priv, CSR_INT_MASK, 0x00000000); 1262 iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
828 1263
829 /* Discover which interrupts are active/pending */ 1264 /* Discover which interrupts are active/pending */
830 inta = iwl_read32(priv, CSR_INT); 1265 inta = iwl_read32(bus(trans), CSR_INT);
831 1266
832 /* Ignore interrupt if there's nothing in NIC to service. 1267 /* Ignore interrupt if there's nothing in NIC to service.
833 * This may be due to IRQ shared with another device, 1268 * This may be due to IRQ shared with another device,
834 * or due to sporadic interrupts thrown from our NIC. */ 1269 * or due to sporadic interrupts thrown from our NIC. */
835 if (!inta) { 1270 if (!inta) {
836 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); 1271 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
837 goto none; 1272 goto none;
838 } 1273 }
839 1274
840 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1275 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
841 /* Hardware disappeared. It might have already raised 1276 /* Hardware disappeared. It might have already raised
842 * an interrupt */ 1277 * an interrupt */
843 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1278 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
844 goto unplugged; 1279 goto unplugged;
845 } 1280 }
846 1281
847#ifdef CONFIG_IWLWIFI_DEBUG 1282#ifdef CONFIG_IWLWIFI_DEBUG
848 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { 1283 if (iwl_get_debug_level(trans->shrd) & (IWL_DL_ISR)) {
849 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); 1284 inta_fh = iwl_read32(bus(trans), CSR_FH_INT_STATUS);
850 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, " 1285 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x, "
851 "fh 0x%08x\n", inta, inta_mask, inta_fh); 1286 "fh 0x%08x\n", inta, inta_mask, inta_fh);
852 } 1287 }
853#endif 1288#endif
854 1289
855 priv->inta |= inta; 1290 trans_pcie->inta |= inta;
856 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 1291 /* iwl_irq_tasklet() will service interrupts and re-enable them */
857 if (likely(inta)) 1292 if (likely(inta))
858 tasklet_schedule(&priv->irq_tasklet); 1293 tasklet_schedule(&trans_pcie->irq_tasklet);
859 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && 1294 else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
860 !priv->inta) 1295 !trans_pcie->inta)
861 iwl_enable_interrupts(priv); 1296 iwl_enable_interrupts(trans);
862 1297
863 unplugged: 1298 unplugged:
864 spin_unlock_irqrestore(&priv->lock, flags); 1299 spin_unlock_irqrestore(&trans->shrd->lock, flags);
865 return IRQ_HANDLED; 1300 return IRQ_HANDLED;
866 1301
867 none: 1302 none:
868 /* re-enable interrupts here since we don't have anything to service. */ 1303 /* re-enable interrupts here since we don't have anything to service. */
869 /* only Re-enable if disabled by irq and no schedules tasklet. */ 1304 /* only Re-enable if disabled by irq and no schedules tasklet. */
870 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) 1305 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
871 iwl_enable_interrupts(priv); 1306 !trans_pcie->inta)
1307 iwl_enable_interrupts(trans);
872 1308
873 spin_unlock_irqrestore(&priv->lock, flags); 1309 spin_unlock_irqrestore(&trans->shrd->lock, flags);
874 return IRQ_NONE; 1310 return IRQ_NONE;
875} 1311}
876 1312
@@ -884,50 +1320,53 @@ static irqreturn_t iwl_isr(int irq, void *data)
884 */ 1320 */
885irqreturn_t iwl_isr_ict(int irq, void *data) 1321irqreturn_t iwl_isr_ict(int irq, void *data)
886{ 1322{
887 struct iwl_priv *priv = data; 1323 struct iwl_trans *trans = data;
1324 struct iwl_trans_pcie *trans_pcie;
888 u32 inta, inta_mask; 1325 u32 inta, inta_mask;
889 u32 val = 0; 1326 u32 val = 0;
890 unsigned long flags; 1327 unsigned long flags;
891 1328
892 if (!priv) 1329 if (!trans)
893 return IRQ_NONE; 1330 return IRQ_NONE;
894 1331
1332 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1333
895 /* dram interrupt table not set yet, 1334 /* dram interrupt table not set yet,
896 * use legacy interrupt. 1335 * use legacy interrupt.
897 */ 1336 */
898 if (!priv->use_ict) 1337 if (!trans_pcie->use_ict)
899 return iwl_isr(irq, data); 1338 return iwl_isr(irq, data);
900 1339
901 spin_lock_irqsave(&priv->lock, flags); 1340 spin_lock_irqsave(&trans->shrd->lock, flags);
902 1341
903 /* Disable (but don't clear!) interrupts here to avoid 1342 /* Disable (but don't clear!) interrupts here to avoid
904 * back-to-back ISRs and sporadic interrupts from our NIC. 1343 * back-to-back ISRs and sporadic interrupts from our NIC.
905 * If we have something to service, the tasklet will re-enable ints. 1344 * If we have something to service, the tasklet will re-enable ints.
906 * If we *don't* have something, we'll re-enable before leaving here. 1345 * If we *don't* have something, we'll re-enable before leaving here.
907 */ 1346 */
908 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ 1347 inta_mask = iwl_read32(bus(trans), CSR_INT_MASK); /* just for debug */
909 iwl_write32(priv, CSR_INT_MASK, 0x00000000); 1348 iwl_write32(bus(trans), CSR_INT_MASK, 0x00000000);
910 1349
911 1350
912 /* Ignore interrupt if there's nothing in NIC to service. 1351 /* Ignore interrupt if there's nothing in NIC to service.
913 * This may be due to IRQ shared with another device, 1352 * This may be due to IRQ shared with another device,
914 * or due to sporadic interrupts thrown from our NIC. */ 1353 * or due to sporadic interrupts thrown from our NIC. */
915 if (!priv->ict_tbl[priv->ict_index]) { 1354 if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
916 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n"); 1355 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
917 goto none; 1356 goto none;
918 } 1357 }
919 1358
920 /* read all entries that not 0 start with ict_index */ 1359 /* read all entries that not 0 start with ict_index */
921 while (priv->ict_tbl[priv->ict_index]) { 1360 while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
922 1361
923 val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]); 1362 val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
924 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n", 1363 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
925 priv->ict_index, 1364 trans_pcie->ict_index,
926 le32_to_cpu( 1365 le32_to_cpu(
927 priv->ict_tbl[priv->ict_index])); 1366 trans_pcie->ict_tbl[trans_pcie->ict_index]));
928 priv->ict_tbl[priv->ict_index] = 0; 1367 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
929 priv->ict_index = iwl_queue_inc_wrap(priv->ict_index, 1368 trans_pcie->ict_index =
930 ICT_COUNT); 1369 iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
931 1370
932 } 1371 }
933 1372
@@ -946,34 +1385,35 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
946 val |= 0x8000; 1385 val |= 0x8000;
947 1386
948 inta = (0xff & val) | ((0xff00 & val) << 16); 1387 inta = (0xff & val) | ((0xff00 & val) << 16);
949 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n", 1388 IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
950 inta, inta_mask, val); 1389 inta, inta_mask, val);
951 1390
952 inta &= priv->inta_mask; 1391 inta &= trans_pcie->inta_mask;
953 priv->inta |= inta; 1392 trans_pcie->inta |= inta;
954 1393
955 /* iwl_irq_tasklet() will service interrupts and re-enable them */ 1394 /* iwl_irq_tasklet() will service interrupts and re-enable them */
956 if (likely(inta)) 1395 if (likely(inta))
957 tasklet_schedule(&priv->irq_tasklet); 1396 tasklet_schedule(&trans_pcie->irq_tasklet);
958 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && 1397 else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
959 !priv->inta) { 1398 !trans_pcie->inta) {
960 /* Allow interrupt if was disabled by this handler and 1399 /* Allow interrupt if was disabled by this handler and
961 * no tasklet was schedules, We should not enable interrupt, 1400 * no tasklet was schedules, We should not enable interrupt,
962 * tasklet will enable it. 1401 * tasklet will enable it.
963 */ 1402 */
964 iwl_enable_interrupts(priv); 1403 iwl_enable_interrupts(trans);
965 } 1404 }
966 1405
967 spin_unlock_irqrestore(&priv->lock, flags); 1406 spin_unlock_irqrestore(&trans->shrd->lock, flags);
968 return IRQ_HANDLED; 1407 return IRQ_HANDLED;
969 1408
970 none: 1409 none:
971 /* re-enable interrupts here since we don't have anything to service. 1410 /* re-enable interrupts here since we don't have anything to service.
972 * only Re-enable if disabled by irq. 1411 * only Re-enable if disabled by irq.
973 */ 1412 */
974 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) 1413 if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
975 iwl_enable_interrupts(priv); 1414 !trans_pcie->inta)
1415 iwl_enable_interrupts(trans);
976 1416
977 spin_unlock_irqrestore(&priv->lock, flags); 1417 spin_unlock_irqrestore(&trans->shrd->lock, flags);
978 return IRQ_NONE; 1418 return IRQ_NONE;
979} 1419}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
index a6b2b1db0b1d..5dd6a6d1dfd7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c
@@ -29,7 +29,6 @@
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <net/mac80211.h>
33 32
34#include "iwl-agn.h" 33#include "iwl-agn.h"
35#include "iwl-dev.h" 34#include "iwl-dev.h"
@@ -41,11 +40,13 @@
41/** 40/**
42 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 41 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
43 */ 42 */
44void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, 43void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
45 struct iwl_tx_queue *txq, 44 struct iwl_tx_queue *txq,
46 u16 byte_cnt) 45 u16 byte_cnt)
47{ 46{
48 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 47 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
48 struct iwl_trans_pcie *trans_pcie =
49 IWL_TRANS_GET_PCIE_TRANS(trans);
49 int write_ptr = txq->q.write_ptr; 50 int write_ptr = txq->q.write_ptr;
50 int txq_id = txq->q.id; 51 int txq_id = txq->q.id;
51 u8 sec_ctl = 0; 52 u8 sec_ctl = 0;
@@ -53,6 +54,8 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
53 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 54 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
54 __le16 bc_ent; 55 __le16 bc_ent;
55 56
57 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
58
56 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); 59 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
57 60
58 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; 61 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
@@ -82,7 +85,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
82/** 85/**
83 * iwl_txq_update_write_ptr - Send new write index to hardware 86 * iwl_txq_update_write_ptr - Send new write index to hardware
84 */ 87 */
85void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) 88void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
86{ 89{
87 u32 reg = 0; 90 u32 reg = 0;
88 int txq_id = txq->q.id; 91 int txq_id = txq->q.id;
@@ -90,28 +93,28 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
90 if (txq->need_update == 0) 93 if (txq->need_update == 0)
91 return; 94 return;
92 95
93 if (priv->cfg->base_params->shadow_reg_enable) { 96 if (hw_params(trans).shadow_reg_enable) {
94 /* shadow register enabled */ 97 /* shadow register enabled */
95 iwl_write32(priv, HBUS_TARG_WRPTR, 98 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
96 txq->q.write_ptr | (txq_id << 8)); 99 txq->q.write_ptr | (txq_id << 8));
97 } else { 100 } else {
98 /* if we're trying to save power */ 101 /* if we're trying to save power */
99 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 102 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) {
100 /* wake up nic if it's powered down ... 103 /* wake up nic if it's powered down ...
101 * uCode will wake up, and interrupt us again, so next 104 * uCode will wake up, and interrupt us again, so next
102 * time we'll skip this part. */ 105 * time we'll skip this part. */
103 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 106 reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1);
104 107
105 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 108 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
106 IWL_DEBUG_INFO(priv, 109 IWL_DEBUG_INFO(trans,
107 "Tx queue %d requesting wakeup," 110 "Tx queue %d requesting wakeup,"
108 " GP1 = 0x%x\n", txq_id, reg); 111 " GP1 = 0x%x\n", txq_id, reg);
109 iwl_set_bit(priv, CSR_GP_CNTRL, 112 iwl_set_bit(bus(trans), CSR_GP_CNTRL,
110 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 113 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
111 return; 114 return;
112 } 115 }
113 116
114 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 117 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
115 txq->q.write_ptr | (txq_id << 8)); 118 txq->q.write_ptr | (txq_id << 8));
116 119
117 /* 120 /*
@@ -120,7 +123,7 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
120 * trying to tx (during RFKILL, we're not trying to tx). 123 * trying to tx (during RFKILL, we're not trying to tx).
121 */ 124 */
122 } else 125 } else
123 iwl_write32(priv, HBUS_TARG_WRPTR, 126 iwl_write32(bus(trans), HBUS_TARG_WRPTR,
124 txq->q.write_ptr | (txq_id << 8)); 127 txq->q.write_ptr | (txq_id << 8));
125 } 128 }
126 txq->need_update = 0; 129 txq->need_update = 0;
@@ -165,7 +168,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
165 return tfd->num_tbs & 0x1f; 168 return tfd->num_tbs & 0x1f;
166} 169}
167 170
168static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, 171static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
169 struct iwl_tfd *tfd, enum dma_data_direction dma_dir) 172 struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
170{ 173{
171 int i; 174 int i;
@@ -175,56 +178,56 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
175 num_tbs = iwl_tfd_get_num_tbs(tfd); 178 num_tbs = iwl_tfd_get_num_tbs(tfd);
176 179
177 if (num_tbs >= IWL_NUM_OF_TBS) { 180 if (num_tbs >= IWL_NUM_OF_TBS) {
178 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); 181 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
179 /* @todo issue fatal error, it is quite serious situation */ 182 /* @todo issue fatal error, it is quite serious situation */
180 return; 183 return;
181 } 184 }
182 185
183 /* Unmap tx_cmd */ 186 /* Unmap tx_cmd */
184 if (num_tbs) 187 if (num_tbs)
185 dma_unmap_single(priv->bus->dev, 188 dma_unmap_single(bus(trans)->dev,
186 dma_unmap_addr(meta, mapping), 189 dma_unmap_addr(meta, mapping),
187 dma_unmap_len(meta, len), 190 dma_unmap_len(meta, len),
188 DMA_BIDIRECTIONAL); 191 DMA_BIDIRECTIONAL);
189 192
190 /* Unmap chunks, if any. */ 193 /* Unmap chunks, if any. */
191 for (i = 1; i < num_tbs; i++) 194 for (i = 1; i < num_tbs; i++)
192 dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i), 195 dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i),
193 iwl_tfd_tb_get_len(tfd, i), dma_dir); 196 iwl_tfd_tb_get_len(tfd, i), dma_dir);
194} 197}
195 198
196/** 199/**
197 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 200 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
198 * @priv - driver private data 201 * @trans - transport private data
199 * @txq - tx queue 202 * @txq - tx queue
200 * @index - the index of the TFD to be freed 203 * @index - the index of the TFD to be freed
201 * 204 *
202 * Does NOT advance any TFD circular buffer read/write indexes 205 * Does NOT advance any TFD circular buffer read/write indexes
203 * Does NOT free the TFD itself (which is within circular buffer) 206 * Does NOT free the TFD itself (which is within circular buffer)
204 */ 207 */
205void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, 208void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
206 int index) 209 int index)
207{ 210{
208 struct iwl_tfd *tfd_tmp = txq->tfds; 211 struct iwl_tfd *tfd_tmp = txq->tfds;
209 212
210 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index], 213 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index],
211 DMA_TO_DEVICE); 214 DMA_TO_DEVICE);
212 215
213 /* free SKB */ 216 /* free SKB */
214 if (txq->txb) { 217 if (txq->skbs) {
215 struct sk_buff *skb; 218 struct sk_buff *skb;
216 219
217 skb = txq->txb[index].skb; 220 skb = txq->skbs[index];
218 221
219 /* can be called from irqs-disabled context */ 222 /* can be called from irqs-disabled context */
220 if (skb) { 223 if (skb) {
221 dev_kfree_skb_any(skb); 224 dev_kfree_skb_any(skb);
222 txq->txb[index].skb = NULL; 225 txq->skbs[index] = NULL;
223 } 226 }
224 } 227 }
225} 228}
226 229
227int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, 230int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
228 struct iwl_tx_queue *txq, 231 struct iwl_tx_queue *txq,
229 dma_addr_t addr, u16 len, 232 dma_addr_t addr, u16 len,
230 u8 reset) 233 u8 reset)
@@ -244,7 +247,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
244 247
245 /* Each TFD can point to a maximum 20 Tx buffers */ 248 /* Each TFD can point to a maximum 20 Tx buffers */
246 if (num_tbs >= IWL_NUM_OF_TBS) { 249 if (num_tbs >= IWL_NUM_OF_TBS) {
247 IWL_ERR(priv, "Error can not send more than %d chunks\n", 250 IWL_ERR(trans, "Error can not send more than %d chunks\n",
248 IWL_NUM_OF_TBS); 251 IWL_NUM_OF_TBS);
249 return -EINVAL; 252 return -EINVAL;
250 } 253 }
@@ -253,7 +256,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
253 return -EINVAL; 256 return -EINVAL;
254 257
255 if (unlikely(addr & ~IWL_TX_DMA_MASK)) 258 if (unlikely(addr & ~IWL_TX_DMA_MASK))
256 IWL_ERR(priv, "Unaligned address = %llx\n", 259 IWL_ERR(trans, "Unaligned address = %llx\n",
257 (unsigned long long)addr); 260 (unsigned long long)addr);
258 261
259 iwl_tfd_set_tb(tfd, num_tbs, addr, len); 262 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
@@ -302,8 +305,7 @@ int iwl_queue_space(const struct iwl_queue *q)
302/** 305/**
303 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 306 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
304 */ 307 */
305int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, 308int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id)
306 int count, int slots_num, u32 id)
307{ 309{
308 q->n_bd = count; 310 q->n_bd = count;
309 q->n_window = slots_num; 311 q->n_window = slots_num;
@@ -332,16 +334,12 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
332 return 0; 334 return 0;
333} 335}
334 336
335/*TODO: this functions should NOT be exported from trans module - export it 337static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
336 * until the reclaim flow will be brought to the transport module too.
337 * Add a declaration to make sparse happy */
338void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
339 struct iwl_tx_queue *txq);
340
341void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
342 struct iwl_tx_queue *txq) 338 struct iwl_tx_queue *txq)
343{ 339{
344 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; 340 struct iwl_trans_pcie *trans_pcie =
341 IWL_TRANS_GET_PCIE_TRANS(trans);
342 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
345 int txq_id = txq->q.id; 343 int txq_id = txq->q.id;
346 int read_ptr = txq->q.read_ptr; 344 int read_ptr = txq->q.read_ptr;
347 u8 sta_id = 0; 345 u8 sta_id = 0;
@@ -349,7 +347,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
349 347
350 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 348 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
351 349
352 if (txq_id != priv->cmd_queue) 350 if (txq_id != trans->shrd->cmd_queue)
353 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; 351 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
354 352
355 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 353 bc_ent = cpu_to_le16(1 | (sta_id << 12));
@@ -360,56 +358,61 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
360 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; 358 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
361} 359}
362 360
363static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, 361static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
364 u16 txq_id) 362 u16 txq_id)
365{ 363{
366 u32 tbl_dw_addr; 364 u32 tbl_dw_addr;
367 u32 tbl_dw; 365 u32 tbl_dw;
368 u16 scd_q2ratid; 366 u16 scd_q2ratid;
369 367
368 struct iwl_trans_pcie *trans_pcie =
369 IWL_TRANS_GET_PCIE_TRANS(trans);
370
370 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 371 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
371 372
372 tbl_dw_addr = priv->scd_base_addr + 373 tbl_dw_addr = trans_pcie->scd_base_addr +
373 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 374 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
374 375
375 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); 376 tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr);
376 377
377 if (txq_id & 0x1) 378 if (txq_id & 0x1)
378 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 379 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
379 else 380 else
380 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 381 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
381 382
382 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); 383 iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw);
383 384
384 return 0; 385 return 0;
385} 386}
386 387
387static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) 388static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
388{ 389{
389 /* Simply stop the queue, but don't change any configuration; 390 /* Simply stop the queue, but don't change any configuration;
390 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ 391 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
391 iwl_write_prph(priv, 392 iwl_write_prph(bus(trans),
392 SCD_QUEUE_STATUS_BITS(txq_id), 393 SCD_QUEUE_STATUS_BITS(txq_id),
393 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| 394 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
394 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); 395 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
395} 396}
396 397
397void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, 398void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
398 int txq_id, u32 index) 399 int txq_id, u32 index)
399{ 400{
400 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 401 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
401 (index & 0xff) | (txq_id << 8)); 402 (index & 0xff) | (txq_id << 8));
402 iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index); 403 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
403} 404}
404 405
405void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, 406void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
406 struct iwl_tx_queue *txq, 407 struct iwl_tx_queue *txq,
407 int tx_fifo_id, int scd_retry) 408 int tx_fifo_id, int scd_retry)
408{ 409{
410 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
409 int txq_id = txq->q.id; 411 int txq_id = txq->q.id;
410 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; 412 int active =
413 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
411 414
412 iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id), 415 iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
413 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 416 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
414 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 417 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
415 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 418 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
@@ -417,55 +420,75 @@ void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
417 420
418 txq->sched_retry = scd_retry; 421 txq->sched_retry = scd_retry;
419 422
420 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", 423 IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
421 active ? "Activate" : "Deactivate", 424 active ? "Activate" : "Deactivate",
422 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 425 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
423} 426}
424 427
425void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, 428static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
426 int frame_limit) 429 u8 ctx, u16 tid)
430{
431 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
432 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
433 return ac_to_fifo[tid_to_ac[tid]];
434
435 /* no support for TIDs 8-15 yet */
436 return -EINVAL;
437}
438
439void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
440 enum iwl_rxon_context_id ctx, int sta_id,
441 int tid, int frame_limit)
427{ 442{
428 int tx_fifo, txq_id, ssn_idx; 443 int tx_fifo, txq_id, ssn_idx;
429 u16 ra_tid; 444 u16 ra_tid;
430 unsigned long flags; 445 unsigned long flags;
431 struct iwl_tid_data *tid_data; 446 struct iwl_tid_data *tid_data;
432 447
448 struct iwl_trans_pcie *trans_pcie =
449 IWL_TRANS_GET_PCIE_TRANS(trans);
450
433 if (WARN_ON(sta_id == IWL_INVALID_STATION)) 451 if (WARN_ON(sta_id == IWL_INVALID_STATION))
434 return; 452 return;
435 if (WARN_ON(tid >= MAX_TID_COUNT)) 453 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
436 return; 454 return;
437 455
438 spin_lock_irqsave(&priv->sta_lock, flags); 456 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
439 tid_data = &priv->stations[sta_id].tid[tid]; 457 if (WARN_ON(tx_fifo < 0)) {
458 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
459 return;
460 }
461
462 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
463 tid_data = &trans->shrd->tid_data[sta_id][tid];
440 ssn_idx = SEQ_TO_SN(tid_data->seq_number); 464 ssn_idx = SEQ_TO_SN(tid_data->seq_number);
441 txq_id = tid_data->agg.txq_id; 465 txq_id = tid_data->agg.txq_id;
442 tx_fifo = tid_data->agg.tx_fifo; 466 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
443 spin_unlock_irqrestore(&priv->sta_lock, flags);
444 467
445 ra_tid = BUILD_RAxTID(sta_id, tid); 468 ra_tid = BUILD_RAxTID(sta_id, tid);
446 469
447 spin_lock_irqsave(&priv->lock, flags); 470 spin_lock_irqsave(&trans->shrd->lock, flags);
448 471
449 /* Stop this Tx queue before configuring it */ 472 /* Stop this Tx queue before configuring it */
450 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 473 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
451 474
452 /* Map receiver-address / traffic-ID to this queue */ 475 /* Map receiver-address / traffic-ID to this queue */
453 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 476 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
454 477
455 /* Set this queue as a chain-building queue */ 478 /* Set this queue as a chain-building queue */
456 iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); 479 iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id));
457 480
458 /* enable aggregations for the queue */ 481 /* enable aggregations for the queue */
459 iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id)); 482 iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id));
460 483
461 /* Place first TFD at index corresponding to start sequence number. 484 /* Place first TFD at index corresponding to start sequence number.
462 * Assumes that ssn_idx is valid (!= 0xFFF) */ 485 * Assumes that ssn_idx is valid (!= 0xFFF) */
463 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 486 trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
464 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 487 trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
465 iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); 488 iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
466 489
467 /* Set up Tx window size and frame limit for this queue */ 490 /* Set up Tx window size and frame limit for this queue */
468 iwl_write_targ_mem(priv, priv->scd_base_addr + 491 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
469 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 492 SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
470 sizeof(u32), 493 sizeof(u32),
471 ((frame_limit << 494 ((frame_limit <<
@@ -475,40 +498,159 @@ void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
475 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & 498 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
476 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 499 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
477 500
478 iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); 501 iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
479 502
480 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 503 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
481 iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 504 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
505 tx_fifo, 1);
506
507 trans_pcie->txq[txq_id].sta_id = sta_id;
508 trans_pcie->txq[txq_id].tid = tid;
509
510 spin_unlock_irqrestore(&trans->shrd->lock, flags);
511}
512
513/*
514 * Find first available (lowest unused) Tx Queue, mark it "active".
515 * Called only when finding queue for aggregation.
516 * Should never return anything < 7, because they should already
517 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
518 */
519static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
520{
521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
522 int txq_id;
523
524 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
525 if (!test_and_set_bit(txq_id,
526 &trans_pcie->txq_ctx_active_msk))
527 return txq_id;
528 return -1;
529}
530
531int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
532 enum iwl_rxon_context_id ctx, int sta_id,
533 int tid, u16 *ssn)
534{
535 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
536 struct iwl_tid_data *tid_data;
537 unsigned long flags;
538 u16 txq_id;
539 struct iwl_priv *priv = priv(trans);
540
541 txq_id = iwlagn_txq_ctx_activate_free(trans);
542 if (txq_id == -1) {
543 IWL_ERR(trans, "No free aggregation queue available\n");
544 return -ENXIO;
545 }
546
547 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
548 tid_data = &trans->shrd->tid_data[sta_id][tid];
549 *ssn = SEQ_TO_SN(tid_data->seq_number);
550 tid_data->agg.txq_id = txq_id;
551 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
552
553 tid_data = &trans->shrd->tid_data[sta_id][tid];
554 if (tid_data->tfds_in_queue == 0) {
555 IWL_DEBUG_HT(trans, "HW queue is empty\n");
556 tid_data->agg.state = IWL_AGG_ON;
557 iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
558 } else {
559 IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
560 "queue\n", tid_data->tfds_in_queue);
561 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
562 }
563 spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
564
565 return 0;
566}
567
568void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
569{
570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
571 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
572
573 iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
574
575 trans_pcie->txq[txq_id].q.read_ptr = 0;
576 trans_pcie->txq[txq_id].q.write_ptr = 0;
577 /* supposes that ssn_idx is valid (!= 0xFFF) */
578 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
482 579
483 spin_unlock_irqrestore(&priv->lock, flags); 580 iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
581 iwl_txq_ctx_deactivate(trans_pcie, txq_id);
582 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
484} 583}
485 584
486int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 585int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
487 u16 ssn_idx, u8 tx_fifo) 586 enum iwl_rxon_context_id ctx, int sta_id,
587 int tid)
488{ 588{
589 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
590 unsigned long flags;
591 int read_ptr, write_ptr;
592 struct iwl_tid_data *tid_data;
593 int txq_id;
594
595 spin_lock_irqsave(&trans->shrd->sta_lock, flags);
596
597 tid_data = &trans->shrd->tid_data[sta_id][tid];
598 txq_id = tid_data->agg.txq_id;
599
489 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || 600 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
490 (IWLAGN_FIRST_AMPDU_QUEUE + 601 (IWLAGN_FIRST_AMPDU_QUEUE +
491 priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { 602 hw_params(trans).num_ampdu_queues <= txq_id)) {
492 IWL_ERR(priv, 603 IWL_ERR(trans,
493 "queue number out of range: %d, must be %d to %d\n", 604 "queue number out of range: %d, must be %d to %d\n",
494 txq_id, IWLAGN_FIRST_AMPDU_QUEUE, 605 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
495 IWLAGN_FIRST_AMPDU_QUEUE + 606 IWLAGN_FIRST_AMPDU_QUEUE +
496 priv->cfg->base_params->num_of_ampdu_queues - 1); 607 hw_params(trans).num_ampdu_queues - 1);
608 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
497 return -EINVAL; 609 return -EINVAL;
498 } 610 }
499 611
500 iwlagn_tx_queue_stop_scheduler(priv, txq_id); 612 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
613 case IWL_EMPTYING_HW_QUEUE_ADDBA:
614 /*
615 * This can happen if the peer stops aggregation
616 * again before we've had a chance to drain the
617 * queue we selected previously, i.e. before the
618 * session was really started completely.
619 */
620 IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
621 goto turn_off;
622 case IWL_AGG_ON:
623 break;
624 default:
625 IWL_WARN(trans, "Stopping AGG while state not ON"
626 "or starting\n");
627 }
501 628
502 iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id)); 629 write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
630 read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
503 631
504 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); 632 /* The queue is not empty */
505 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); 633 if (write_ptr != read_ptr) {
506 /* supposes that ssn_idx is valid (!= 0xFFF) */ 634 IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
507 iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); 635 trans->shrd->tid_data[sta_id][tid].agg.state =
636 IWL_EMPTYING_HW_QUEUE_DELBA;
637 spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
638 return 0;
639 }
640
641 IWL_DEBUG_HT(trans, "HW queue is empty\n");
642turn_off:
643 trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
644
645 /* do not restore/save irqs */
646 spin_unlock(&trans->shrd->sta_lock);
647 spin_lock(&trans->shrd->lock);
648
649 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
650
651 spin_unlock_irqrestore(&trans->shrd->lock, flags);
508 652
509 iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); 653 iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
510 iwl_txq_ctx_deactivate(priv, txq_id);
511 iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
512 654
513 return 0; 655 return 0;
514} 656}
@@ -524,9 +666,10 @@ int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
524 * failed. On success, it turns the index (> 0) of command in the 666 * failed. On success, it turns the index (> 0) of command in the
525 * command queue. 667 * command queue.
526 */ 668 */
527static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 669static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
528{ 670{
529 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 671 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
672 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
530 struct iwl_queue *q = &txq->q; 673 struct iwl_queue *q = &txq->q;
531 struct iwl_device_cmd *out_cmd; 674 struct iwl_device_cmd *out_cmd;
532 struct iwl_cmd_meta *out_meta; 675 struct iwl_cmd_meta *out_meta;
@@ -544,14 +687,14 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
544 int trace_idx; 687 int trace_idx;
545#endif 688#endif
546 689
547 if (test_bit(STATUS_FW_ERROR, &priv->status)) { 690 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
548 IWL_WARN(priv, "fw recovery, no hcmd send\n"); 691 IWL_WARN(trans, "fw recovery, no hcmd send\n");
549 return -EIO; 692 return -EIO;
550 } 693 }
551 694
552 if ((priv->ucode_owner == IWL_OWNERSHIP_TM) && 695 if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) &&
553 !(cmd->flags & CMD_ON_DEMAND)) { 696 !(cmd->flags & CMD_ON_DEMAND)) {
554 IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n"); 697 IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n");
555 return -EIO; 698 return -EIO;
556 } 699 }
557 700
@@ -584,22 +727,22 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
584 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) 727 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
585 return -EINVAL; 728 return -EINVAL;
586 729
587 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { 730 if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) {
588 IWL_WARN(priv, "Not sending command - %s KILL\n", 731 IWL_WARN(trans, "Not sending command - %s KILL\n",
589 iwl_is_rfkill(priv) ? "RF" : "CT"); 732 iwl_is_rfkill(trans->shrd) ? "RF" : "CT");
590 return -EIO; 733 return -EIO;
591 } 734 }
592 735
593 spin_lock_irqsave(&priv->hcmd_lock, flags); 736 spin_lock_irqsave(&trans->hcmd_lock, flags);
594 737
595 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 738 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
596 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 739 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
597 740
598 IWL_ERR(priv, "No space in command queue\n"); 741 IWL_ERR(trans, "No space in command queue\n");
599 is_ct_kill = iwl_check_for_ct_kill(priv); 742 is_ct_kill = iwl_check_for_ct_kill(priv(trans));
600 if (!is_ct_kill) { 743 if (!is_ct_kill) {
601 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 744 IWL_ERR(trans, "Restarting adapter queue is full\n");
602 iwlagn_fw_error(priv, false); 745 iwlagn_fw_error(priv(trans), false);
603 } 746 }
604 return -ENOSPC; 747 return -ENOSPC;
605 } 748 }
@@ -618,8 +761,9 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
618 761
619 out_cmd->hdr.cmd = cmd->id; 762 out_cmd->hdr.cmd = cmd->id;
620 out_cmd->hdr.flags = 0; 763 out_cmd->hdr.flags = 0;
621 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | 764 out_cmd->hdr.sequence =
622 INDEX_TO_SEQ(q->write_ptr)); 765 cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) |
766 INDEX_TO_SEQ(q->write_ptr));
623 767
624 /* and copy the data that needs to be copied */ 768 /* and copy the data that needs to be copied */
625 769
@@ -633,16 +777,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
633 cmd_dest += cmd->len[i]; 777 cmd_dest += cmd->len[i];
634 } 778 }
635 779
636 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " 780 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, "
637 "%d bytes at %d[%d]:%d\n", 781 "%d bytes at %d[%d]:%d\n",
638 get_cmd_string(out_cmd->hdr.cmd), 782 get_cmd_string(out_cmd->hdr.cmd),
639 out_cmd->hdr.cmd, 783 out_cmd->hdr.cmd,
640 le16_to_cpu(out_cmd->hdr.sequence), cmd_size, 784 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
641 q->write_ptr, idx, priv->cmd_queue); 785 q->write_ptr, idx, trans->shrd->cmd_queue);
642 786
643 phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size, 787 phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size,
644 DMA_BIDIRECTIONAL); 788 DMA_BIDIRECTIONAL);
645 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { 789 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
646 idx = -ENOMEM; 790 idx = -ENOMEM;
647 goto out; 791 goto out;
648 } 792 }
@@ -650,7 +794,8 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
650 dma_unmap_addr_set(out_meta, mapping, phys_addr); 794 dma_unmap_addr_set(out_meta, mapping, phys_addr);
651 dma_unmap_len_set(out_meta, len, copy_size); 795 dma_unmap_len_set(out_meta, len, copy_size);
652 796
653 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); 797 iwlagn_txq_attach_buf_to_tfd(trans, txq,
798 phys_addr, copy_size, 1);
654#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 799#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
655 trace_bufs[0] = &out_cmd->hdr; 800 trace_bufs[0] = &out_cmd->hdr;
656 trace_lens[0] = copy_size; 801 trace_lens[0] = copy_size;
@@ -662,17 +807,18 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
662 continue; 807 continue;
663 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) 808 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
664 continue; 809 continue;
665 phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i], 810 phys_addr = dma_map_single(bus(trans)->dev,
811 (void *)cmd->data[i],
666 cmd->len[i], DMA_BIDIRECTIONAL); 812 cmd->len[i], DMA_BIDIRECTIONAL);
667 if (dma_mapping_error(priv->bus->dev, phys_addr)) { 813 if (dma_mapping_error(bus(trans)->dev, phys_addr)) {
668 iwlagn_unmap_tfd(priv, out_meta, 814 iwlagn_unmap_tfd(trans, out_meta,
669 &txq->tfds[q->write_ptr], 815 &txq->tfds[q->write_ptr],
670 DMA_BIDIRECTIONAL); 816 DMA_BIDIRECTIONAL);
671 idx = -ENOMEM; 817 idx = -ENOMEM;
672 goto out; 818 goto out;
673 } 819 }
674 820
675 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, 821 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
676 cmd->len[i], 0); 822 cmd->len[i], 0);
677#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 823#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
678 trace_bufs[trace_idx] = cmd->data[i]; 824 trace_bufs[trace_idx] = cmd->data[i];
@@ -688,7 +834,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
688 /* check that tracing gets all possible blocks */ 834 /* check that tracing gets all possible blocks */
689 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); 835 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
690#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 836#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
691 trace_iwlwifi_dev_hcmd(priv, cmd->flags, 837 trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags,
692 trace_bufs[0], trace_lens[0], 838 trace_bufs[0], trace_lens[0],
693 trace_bufs[1], trace_lens[1], 839 trace_bufs[1], trace_lens[1],
694 trace_bufs[2], trace_lens[2]); 840 trace_bufs[2], trace_lens[2]);
@@ -696,10 +842,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
696 842
697 /* Increment and update queue's write index */ 843 /* Increment and update queue's write index */
698 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 844 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
699 iwl_txq_update_write_ptr(priv, txq); 845 iwl_txq_update_write_ptr(trans, txq);
700 846
701 out: 847 out:
702 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 848 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
703 return idx; 849 return idx;
704} 850}
705 851
@@ -712,7 +858,9 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
712 */ 858 */
713static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) 859static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
714{ 860{
715 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 861 struct iwl_trans_pcie *trans_pcie =
862 IWL_TRANS_GET_PCIE_TRANS(trans(priv));
863 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
716 struct iwl_queue *q = &txq->q; 864 struct iwl_queue *q = &txq->q;
717 int nfreed = 0; 865 int nfreed = 0;
718 866
@@ -752,17 +900,19 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
752 int cmd_index; 900 int cmd_index;
753 struct iwl_device_cmd *cmd; 901 struct iwl_device_cmd *cmd;
754 struct iwl_cmd_meta *meta; 902 struct iwl_cmd_meta *meta;
755 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; 903 struct iwl_trans *trans = trans(priv);
904 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
905 struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
756 unsigned long flags; 906 unsigned long flags;
757 907
758 /* If a Tx command is being handled and it isn't in the actual 908 /* If a Tx command is being handled and it isn't in the actual
759 * command queue then there a command routing bug has been introduced 909 * command queue then there a command routing bug has been introduced
760 * in the queue management code. */ 910 * in the queue management code. */
761 if (WARN(txq_id != priv->cmd_queue, 911 if (WARN(txq_id != trans->shrd->cmd_queue,
762 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 912 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
763 txq_id, priv->cmd_queue, sequence, 913 txq_id, trans->shrd->cmd_queue, sequence,
764 priv->txq[priv->cmd_queue].q.read_ptr, 914 trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
765 priv->txq[priv->cmd_queue].q.write_ptr)) { 915 trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
766 iwl_print_hex_error(priv, pkt, 32); 916 iwl_print_hex_error(priv, pkt, 32);
767 return; 917 return;
768 } 918 }
@@ -771,7 +921,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
771 cmd = txq->cmd[cmd_index]; 921 cmd = txq->cmd[cmd_index];
772 meta = &txq->meta[cmd_index]; 922 meta = &txq->meta[cmd_index];
773 923
774 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 924 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
925 DMA_BIDIRECTIONAL);
775 926
776 /* Input error checking is done when commands are added to queue. */ 927 /* Input error checking is done when commands are added to queue. */
777 if (meta->flags & CMD_WANT_SKB) { 928 if (meta->flags & CMD_WANT_SKB) {
@@ -780,20 +931,20 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
780 } else if (meta->callback) 931 } else if (meta->callback)
781 meta->callback(priv, cmd, pkt); 932 meta->callback(priv, cmd, pkt);
782 933
783 spin_lock_irqsave(&priv->hcmd_lock, flags); 934 spin_lock_irqsave(&trans->hcmd_lock, flags);
784 935
785 iwl_hcmd_queue_reclaim(priv, txq_id, index); 936 iwl_hcmd_queue_reclaim(priv, txq_id, index);
786 937
787 if (!(meta->flags & CMD_ASYNC)) { 938 if (!(meta->flags & CMD_ASYNC)) {
788 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 939 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
789 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", 940 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
790 get_cmd_string(cmd->hdr.cmd)); 941 get_cmd_string(cmd->hdr.cmd));
791 wake_up_interruptible(&priv->wait_command_queue); 942 wake_up_interruptible(&priv->wait_command_queue);
792 } 943 }
793 944
794 meta->flags = 0; 945 meta->flags = 0;
795 946
796 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 947 spin_unlock_irqrestore(&trans->hcmd_lock, flags);
797} 948}
798 949
799const char *get_cmd_string(u8 cmd) 950const char *get_cmd_string(u8 cmd)
@@ -904,7 +1055,7 @@ static void iwl_generic_cmd_callback(struct iwl_priv *priv,
904#endif 1055#endif
905} 1056}
906 1057
907static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 1058static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
908{ 1059{
909 int ret; 1060 int ret;
910 1061
@@ -916,77 +1067,78 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
916 if (!cmd->callback) 1067 if (!cmd->callback)
917 cmd->callback = iwl_generic_cmd_callback; 1068 cmd->callback = iwl_generic_cmd_callback;
918 1069
919 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1070 if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
920 return -EBUSY; 1071 return -EBUSY;
921 1072
922 ret = iwl_enqueue_hcmd(priv, cmd); 1073 ret = iwl_enqueue_hcmd(trans, cmd);
923 if (ret < 0) { 1074 if (ret < 0) {
924 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", 1075 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
925 get_cmd_string(cmd->id), ret); 1076 get_cmd_string(cmd->id), ret);
926 return ret; 1077 return ret;
927 } 1078 }
928 return 0; 1079 return 0;
929} 1080}
930 1081
931static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 1082static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
932{ 1083{
1084 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933 int cmd_idx; 1085 int cmd_idx;
934 int ret; 1086 int ret;
935 1087
936 lockdep_assert_held(&priv->mutex); 1088 lockdep_assert_held(&trans->shrd->mutex);
937 1089
938 /* A synchronous command can not have a callback set. */ 1090 /* A synchronous command can not have a callback set. */
939 if (WARN_ON(cmd->callback)) 1091 if (WARN_ON(cmd->callback))
940 return -EINVAL; 1092 return -EINVAL;
941 1093
942 IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", 1094 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
943 get_cmd_string(cmd->id)); 1095 get_cmd_string(cmd->id));
944 1096
945 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 1097 set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
946 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", 1098 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
947 get_cmd_string(cmd->id)); 1099 get_cmd_string(cmd->id));
948 1100
949 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 1101 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
950 if (cmd_idx < 0) { 1102 if (cmd_idx < 0) {
951 ret = cmd_idx; 1103 ret = cmd_idx;
952 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1104 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
953 IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", 1105 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
954 get_cmd_string(cmd->id), ret); 1106 get_cmd_string(cmd->id), ret);
955 return ret; 1107 return ret;
956 } 1108 }
957 1109
958 ret = wait_event_interruptible_timeout(priv->wait_command_queue, 1110 ret = wait_event_interruptible_timeout(priv(trans)->wait_command_queue,
959 !test_bit(STATUS_HCMD_ACTIVE, &priv->status), 1111 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status),
960 HOST_COMPLETE_TIMEOUT); 1112 HOST_COMPLETE_TIMEOUT);
961 if (!ret) { 1113 if (!ret) {
962 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { 1114 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
963 IWL_ERR(priv, 1115 IWL_ERR(trans,
964 "Error sending %s: time out after %dms.\n", 1116 "Error sending %s: time out after %dms.\n",
965 get_cmd_string(cmd->id), 1117 get_cmd_string(cmd->id),
966 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1118 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
967 1119
968 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1120 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
969 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command" 1121 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command"
970 "%s\n", get_cmd_string(cmd->id)); 1122 "%s\n", get_cmd_string(cmd->id));
971 ret = -ETIMEDOUT; 1123 ret = -ETIMEDOUT;
972 goto cancel; 1124 goto cancel;
973 } 1125 }
974 } 1126 }
975 1127
976 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { 1128 if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
977 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", 1129 IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
978 get_cmd_string(cmd->id)); 1130 get_cmd_string(cmd->id));
979 ret = -ECANCELED; 1131 ret = -ECANCELED;
980 goto fail; 1132 goto fail;
981 } 1133 }
982 if (test_bit(STATUS_FW_ERROR, &priv->status)) { 1134 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
983 IWL_ERR(priv, "Command %s failed: FW Error\n", 1135 IWL_ERR(trans, "Command %s failed: FW Error\n",
984 get_cmd_string(cmd->id)); 1136 get_cmd_string(cmd->id));
985 ret = -EIO; 1137 ret = -EIO;
986 goto fail; 1138 goto fail;
987 } 1139 }
988 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { 1140 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
989 IWL_ERR(priv, "Error: Response NULL in '%s'\n", 1141 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
990 get_cmd_string(cmd->id)); 1142 get_cmd_string(cmd->id));
991 ret = -EIO; 1143 ret = -EIO;
992 goto cancel; 1144 goto cancel;
@@ -1002,28 +1154,28 @@ cancel:
1002 * in later, it will possibly set an invalid 1154 * in later, it will possibly set an invalid
1003 * address (cmd->meta.source). 1155 * address (cmd->meta.source).
1004 */ 1156 */
1005 priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= 1157 trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
1006 ~CMD_WANT_SKB; 1158 ~CMD_WANT_SKB;
1007 } 1159 }
1008fail: 1160fail:
1009 if (cmd->reply_page) { 1161 if (cmd->reply_page) {
1010 iwl_free_pages(priv, cmd->reply_page); 1162 iwl_free_pages(trans->shrd, cmd->reply_page);
1011 cmd->reply_page = 0; 1163 cmd->reply_page = 0;
1012 } 1164 }
1013 1165
1014 return ret; 1166 return ret;
1015} 1167}
1016 1168
1017int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 1169int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1018{ 1170{
1019 if (cmd->flags & CMD_ASYNC) 1171 if (cmd->flags & CMD_ASYNC)
1020 return iwl_send_cmd_async(priv, cmd); 1172 return iwl_send_cmd_async(trans, cmd);
1021 1173
1022 return iwl_send_cmd_sync(priv, cmd); 1174 return iwl_send_cmd_sync(trans, cmd);
1023} 1175}
1024 1176
1025int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len, 1177int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
1026 const void *data) 1178 u16 len, const void *data)
1027{ 1179{
1028 struct iwl_host_cmd cmd = { 1180 struct iwl_host_cmd cmd = {
1029 .id = id, 1181 .id = id,
@@ -1032,5 +1184,53 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
1032 .flags = flags, 1184 .flags = flags,
1033 }; 1185 };
1034 1186
1035 return iwl_send_cmd(priv, &cmd); 1187 return iwl_trans_pcie_send_cmd(trans, &cmd);
1188}
1189
1190/* Frees buffers until index _not_ inclusive */
1191int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1192 struct sk_buff_head *skbs)
1193{
1194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1196 struct iwl_queue *q = &txq->q;
1197 int last_to_free;
1198 int freed = 0;
1199
1200 /*Since we free until index _not_ inclusive, the one before index is
1201 * the last we will free. This one must be used */
1202 last_to_free = iwl_queue_dec_wrap(index, q->n_bd);
1203
1204 if ((index >= q->n_bd) ||
1205 (iwl_queue_used(q, last_to_free) == 0)) {
1206 IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
1207 "last_to_free %d is out of range [0-%d] %d %d.\n",
1208 __func__, txq_id, last_to_free, q->n_bd,
1209 q->write_ptr, q->read_ptr);
1210 return 0;
1211 }
1212
1213 IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
1214 q->read_ptr, index);
1215
1216 if (WARN_ON(!skb_queue_empty(skbs)))
1217 return 0;
1218
1219 for (;
1220 q->read_ptr != index;
1221 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1222
1223 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL))
1224 continue;
1225
1226 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]);
1227
1228 txq->skbs[txq->q.read_ptr] = NULL;
1229
1230 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1231
1232 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr);
1233 freed++;
1234 }
1235 return freed;
1036} 1236}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.c b/drivers/net/wireless/iwlwifi/iwl-trans.c
index 3001bfb46e25..cec13adb018e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.c
@@ -60,6 +60,11 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#include <linux/interrupt.h>
64#include <linux/debugfs.h>
65#include <linux/bitops.h>
66#include <linux/gfp.h>
67
63#include "iwl-dev.h" 68#include "iwl-dev.h"
64#include "iwl-trans.h" 69#include "iwl-trans.h"
65#include "iwl-core.h" 70#include "iwl-core.h"
@@ -67,14 +72,16 @@
67#include "iwl-trans-int-pcie.h" 72#include "iwl-trans-int-pcie.h"
68/*TODO remove uneeded includes when the transport layer tx_free will be here */ 73/*TODO remove uneeded includes when the transport layer tx_free will be here */
69#include "iwl-agn.h" 74#include "iwl-agn.h"
70#include "iwl-core.h" 75#include "iwl-shared.h"
71 76
72static int iwl_trans_rx_alloc(struct iwl_priv *priv) 77static int iwl_trans_rx_alloc(struct iwl_trans *trans)
73{ 78{
74 struct iwl_rx_queue *rxq = &priv->rxq; 79 struct iwl_trans_pcie *trans_pcie =
75 struct device *dev = priv->bus->dev; 80 IWL_TRANS_GET_PCIE_TRANS(trans);
81 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
82 struct device *dev = bus(trans)->dev;
76 83
77 memset(&priv->rxq, 0, sizeof(priv->rxq)); 84 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
78 85
79 spin_lock_init(&rxq->lock); 86 spin_lock_init(&rxq->lock);
80 INIT_LIST_HEAD(&rxq->rx_free); 87 INIT_LIST_HEAD(&rxq->rx_free);
@@ -108,9 +115,11 @@ err_bd:
108 return -ENOMEM; 115 return -ENOMEM;
109} 116}
110 117
111static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv) 118static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
112{ 119{
113 struct iwl_rx_queue *rxq = &priv->rxq; 120 struct iwl_trans_pcie *trans_pcie =
121 IWL_TRANS_GET_PCIE_TRANS(trans);
122 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
114 int i; 123 int i;
115 124
116 /* Fill the rx_used queue with _all_ of the Rx buffers */ 125 /* Fill the rx_used queue with _all_ of the Rx buffers */
@@ -118,17 +127,18 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_priv *priv)
118 /* In the reset function, these buffers may have been allocated 127 /* In the reset function, these buffers may have been allocated
119 * to an SKB, so we need to unmap and free potential storage */ 128 * to an SKB, so we need to unmap and free potential storage */
120 if (rxq->pool[i].page != NULL) { 129 if (rxq->pool[i].page != NULL) {
121 dma_unmap_page(priv->bus->dev, rxq->pool[i].page_dma, 130 dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
122 PAGE_SIZE << priv->hw_params.rx_page_order, 131 PAGE_SIZE << hw_params(trans).rx_page_order,
123 DMA_FROM_DEVICE); 132 DMA_FROM_DEVICE);
124 __iwl_free_pages(priv, rxq->pool[i].page); 133 __free_pages(rxq->pool[i].page,
134 hw_params(trans).rx_page_order);
125 rxq->pool[i].page = NULL; 135 rxq->pool[i].page = NULL;
126 } 136 }
127 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 137 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
128 } 138 }
129} 139}
130 140
131static void iwl_trans_rx_hw_init(struct iwl_priv *priv, 141static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
132 struct iwl_rx_queue *rxq) 142 struct iwl_rx_queue *rxq)
133{ 143{
134 u32 rb_size; 144 u32 rb_size;
@@ -143,17 +153,17 @@ static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
143 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 153 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
144 154
145 /* Stop Rx DMA */ 155 /* Stop Rx DMA */
146 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 156 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
147 157
148 /* Reset driver's Rx queue write index */ 158 /* Reset driver's Rx queue write index */
149 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 159 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
150 160
151 /* Tell device where to find RBD circular buffer in DRAM */ 161 /* Tell device where to find RBD circular buffer in DRAM */
152 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 162 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
153 (u32)(rxq->bd_dma >> 8)); 163 (u32)(rxq->bd_dma >> 8));
154 164
155 /* Tell device where in DRAM to update its Rx status */ 165 /* Tell device where in DRAM to update its Rx status */
156 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 166 iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
157 rxq->rb_stts_dma >> 4); 167 rxq->rb_stts_dma >> 4);
158 168
159 /* Enable Rx DMA 169 /* Enable Rx DMA
@@ -164,7 +174,7 @@ static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
164 * RB timeout 0x10 174 * RB timeout 0x10
165 * 256 RBDs 175 * 256 RBDs
166 */ 176 */
167 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 177 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
168 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 178 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
169 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 179 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
170 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 180 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
@@ -174,17 +184,20 @@ static void iwl_trans_rx_hw_init(struct iwl_priv *priv,
174 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 184 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
175 185
176 /* Set interrupt coalescing timer to default (2048 usecs) */ 186 /* Set interrupt coalescing timer to default (2048 usecs) */
177 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 187 iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
178} 188}
179 189
180static int iwl_rx_init(struct iwl_priv *priv) 190static int iwl_rx_init(struct iwl_trans *trans)
181{ 191{
182 struct iwl_rx_queue *rxq = &priv->rxq; 192 struct iwl_trans_pcie *trans_pcie =
193 IWL_TRANS_GET_PCIE_TRANS(trans);
194 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
195
183 int i, err; 196 int i, err;
184 unsigned long flags; 197 unsigned long flags;
185 198
186 if (!rxq->bd) { 199 if (!rxq->bd) {
187 err = iwl_trans_rx_alloc(priv); 200 err = iwl_trans_rx_alloc(trans);
188 if (err) 201 if (err)
189 return err; 202 return err;
190 } 203 }
@@ -193,7 +206,7 @@ static int iwl_rx_init(struct iwl_priv *priv)
193 INIT_LIST_HEAD(&rxq->rx_free); 206 INIT_LIST_HEAD(&rxq->rx_free);
194 INIT_LIST_HEAD(&rxq->rx_used); 207 INIT_LIST_HEAD(&rxq->rx_used);
195 208
196 iwl_trans_rxq_free_rx_bufs(priv); 209 iwl_trans_rxq_free_rx_bufs(trans);
197 210
198 for (i = 0; i < RX_QUEUE_SIZE; i++) 211 for (i = 0; i < RX_QUEUE_SIZE; i++)
199 rxq->queue[i] = NULL; 212 rxq->queue[i] = NULL;
@@ -205,65 +218,68 @@ static int iwl_rx_init(struct iwl_priv *priv)
205 rxq->free_count = 0; 218 rxq->free_count = 0;
206 spin_unlock_irqrestore(&rxq->lock, flags); 219 spin_unlock_irqrestore(&rxq->lock, flags);
207 220
208 iwlagn_rx_replenish(priv); 221 iwlagn_rx_replenish(trans);
209 222
210 iwl_trans_rx_hw_init(priv, rxq); 223 iwl_trans_rx_hw_init(trans, rxq);
211 224
212 spin_lock_irqsave(&priv->lock, flags); 225 spin_lock_irqsave(&trans->shrd->lock, flags);
213 rxq->need_update = 1; 226 rxq->need_update = 1;
214 iwl_rx_queue_update_write_ptr(priv, rxq); 227 iwl_rx_queue_update_write_ptr(trans, rxq);
215 spin_unlock_irqrestore(&priv->lock, flags); 228 spin_unlock_irqrestore(&trans->shrd->lock, flags);
216 229
217 return 0; 230 return 0;
218} 231}
219 232
220static void iwl_trans_rx_free(struct iwl_priv *priv) 233static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
221{ 234{
222 struct iwl_rx_queue *rxq = &priv->rxq; 235 struct iwl_trans_pcie *trans_pcie =
236 IWL_TRANS_GET_PCIE_TRANS(trans);
237 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
238
223 unsigned long flags; 239 unsigned long flags;
224 240
225 /*if rxq->bd is NULL, it means that nothing has been allocated, 241 /*if rxq->bd is NULL, it means that nothing has been allocated,
226 * exit now */ 242 * exit now */
227 if (!rxq->bd) { 243 if (!rxq->bd) {
228 IWL_DEBUG_INFO(priv, "Free NULL rx context\n"); 244 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
229 return; 245 return;
230 } 246 }
231 247
232 spin_lock_irqsave(&rxq->lock, flags); 248 spin_lock_irqsave(&rxq->lock, flags);
233 iwl_trans_rxq_free_rx_bufs(priv); 249 iwl_trans_rxq_free_rx_bufs(trans);
234 spin_unlock_irqrestore(&rxq->lock, flags); 250 spin_unlock_irqrestore(&rxq->lock, flags);
235 251
236 dma_free_coherent(priv->bus->dev, sizeof(__le32) * RX_QUEUE_SIZE, 252 dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
237 rxq->bd, rxq->bd_dma); 253 rxq->bd, rxq->bd_dma);
238 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma)); 254 memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
239 rxq->bd = NULL; 255 rxq->bd = NULL;
240 256
241 if (rxq->rb_stts) 257 if (rxq->rb_stts)
242 dma_free_coherent(priv->bus->dev, 258 dma_free_coherent(bus(trans)->dev,
243 sizeof(struct iwl_rb_status), 259 sizeof(struct iwl_rb_status),
244 rxq->rb_stts, rxq->rb_stts_dma); 260 rxq->rb_stts, rxq->rb_stts_dma);
245 else 261 else
246 IWL_DEBUG_INFO(priv, "Free rxq->rb_stts which is NULL\n"); 262 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
247 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma)); 263 memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
248 rxq->rb_stts = NULL; 264 rxq->rb_stts = NULL;
249} 265}
250 266
251static int iwl_trans_rx_stop(struct iwl_priv *priv) 267static int iwl_trans_rx_stop(struct iwl_trans *trans)
252{ 268{
253 269
254 /* stop Rx DMA */ 270 /* stop Rx DMA */
255 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 271 iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
256 return iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 272 return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
257 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 273 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
258} 274}
259 275
260static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv, 276static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
261 struct iwl_dma_ptr *ptr, size_t size) 277 struct iwl_dma_ptr *ptr, size_t size)
262{ 278{
263 if (WARN_ON(ptr->addr)) 279 if (WARN_ON(ptr->addr))
264 return -EINVAL; 280 return -EINVAL;
265 281
266 ptr->addr = dma_alloc_coherent(priv->bus->dev, size, 282 ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
267 &ptr->dma, GFP_KERNEL); 283 &ptr->dma, GFP_KERNEL);
268 if (!ptr->addr) 284 if (!ptr->addr)
269 return -ENOMEM; 285 return -ENOMEM;
@@ -271,23 +287,24 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
271 return 0; 287 return 0;
272} 288}
273 289
274static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv, 290static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
275 struct iwl_dma_ptr *ptr) 291 struct iwl_dma_ptr *ptr)
276{ 292{
277 if (unlikely(!ptr->addr)) 293 if (unlikely(!ptr->addr))
278 return; 294 return;
279 295
280 dma_free_coherent(priv->bus->dev, ptr->size, ptr->addr, ptr->dma); 296 dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
281 memset(ptr, 0, sizeof(*ptr)); 297 memset(ptr, 0, sizeof(*ptr));
282} 298}
283 299
284static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq, 300static int iwl_trans_txq_alloc(struct iwl_trans *trans,
285 int slots_num, u32 txq_id) 301 struct iwl_tx_queue *txq, int slots_num,
302 u32 txq_id)
286{ 303{
287 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; 304 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
288 int i; 305 int i;
289 306
290 if (WARN_ON(txq->meta || txq->cmd || txq->txb || txq->tfds)) 307 if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
291 return -EINVAL; 308 return -EINVAL;
292 309
293 txq->q.n_window = slots_num; 310 txq->q.n_window = slots_num;
@@ -300,45 +317,46 @@ static int iwl_trans_txq_alloc(struct iwl_priv *priv, struct iwl_tx_queue *txq,
300 if (!txq->meta || !txq->cmd) 317 if (!txq->meta || !txq->cmd)
301 goto error; 318 goto error;
302 319
303 for (i = 0; i < slots_num; i++) { 320 if (txq_id == trans->shrd->cmd_queue)
304 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), 321 for (i = 0; i < slots_num; i++) {
305 GFP_KERNEL); 322 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
306 if (!txq->cmd[i]) 323 GFP_KERNEL);
307 goto error; 324 if (!txq->cmd[i])
308 } 325 goto error;
326 }
309 327
310 /* Alloc driver data array and TFD circular buffer */ 328 /* Alloc driver data array and TFD circular buffer */
311 /* Driver private data, only for Tx (not command) queues, 329 /* Driver private data, only for Tx (not command) queues,
312 * not shared with device. */ 330 * not shared with device. */
313 if (txq_id != priv->cmd_queue) { 331 if (txq_id != trans->shrd->cmd_queue) {
314 txq->txb = kzalloc(sizeof(txq->txb[0]) * 332 txq->skbs = kzalloc(sizeof(txq->skbs[0]) *
315 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 333 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
316 if (!txq->txb) { 334 if (!txq->skbs) {
317 IWL_ERR(priv, "kmalloc for auxiliary BD " 335 IWL_ERR(trans, "kmalloc for auxiliary BD "
318 "structures failed\n"); 336 "structures failed\n");
319 goto error; 337 goto error;
320 } 338 }
321 } else { 339 } else {
322 txq->txb = NULL; 340 txq->skbs = NULL;
323 } 341 }
324 342
325 /* Circular buffer of transmit frame descriptors (TFDs), 343 /* Circular buffer of transmit frame descriptors (TFDs),
326 * shared with device */ 344 * shared with device */
327 txq->tfds = dma_alloc_coherent(priv->bus->dev, tfd_sz, &txq->q.dma_addr, 345 txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
328 GFP_KERNEL); 346 &txq->q.dma_addr, GFP_KERNEL);
329 if (!txq->tfds) { 347 if (!txq->tfds) {
330 IWL_ERR(priv, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 348 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
331 goto error; 349 goto error;
332 } 350 }
333 txq->q.id = txq_id; 351 txq->q.id = txq_id;
334 352
335 return 0; 353 return 0;
336error: 354error:
337 kfree(txq->txb); 355 kfree(txq->skbs);
338 txq->txb = NULL; 356 txq->skbs = NULL;
339 /* since txq->cmd has been zeroed, 357 /* since txq->cmd has been zeroed,
340 * all non allocated cmd[i] will be NULL */ 358 * all non allocated cmd[i] will be NULL */
341 if (txq->cmd) 359 if (txq->cmd && txq_id == trans->shrd->cmd_queue)
342 for (i = 0; i < slots_num; i++) 360 for (i = 0; i < slots_num; i++)
343 kfree(txq->cmd[i]); 361 kfree(txq->cmd[i]);
344 kfree(txq->meta); 362 kfree(txq->meta);
@@ -350,7 +368,7 @@ error:
350 368
351} 369}
352 370
353static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 371static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
354 int slots_num, u32 txq_id) 372 int slots_num, u32 txq_id)
355{ 373{
356 int ret; 374 int ret;
@@ -371,7 +389,7 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
371 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 389 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
372 390
373 /* Initialize queue's high/low-water marks, and head/tail indexes */ 391 /* Initialize queue's high/low-water marks, and head/tail indexes */
374 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, 392 ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
375 txq_id); 393 txq_id);
376 if (ret) 394 if (ret)
377 return ret; 395 return ret;
@@ -380,7 +398,7 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
380 * Tell nic where to find circular buffer of Tx Frame Descriptors for 398 * Tell nic where to find circular buffer of Tx Frame Descriptors for
381 * given Tx queue, and enable the DMA channel used for that queue. 399 * given Tx queue, and enable the DMA channel used for that queue.
382 * Circular buffer (TFD queue in DRAM) physical base address */ 400 * Circular buffer (TFD queue in DRAM) physical base address */
383 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 401 iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
384 txq->q.dma_addr >> 8); 402 txq->q.dma_addr >> 8);
385 403
386 return 0; 404 return 0;
@@ -389,9 +407,10 @@ static int iwl_trans_txq_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
389/** 407/**
390 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 408 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
391 */ 409 */
392static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) 410static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
393{ 411{
394 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 412 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
413 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
395 struct iwl_queue *q = &txq->q; 414 struct iwl_queue *q = &txq->q;
396 415
397 if (!q->n_bd) 416 if (!q->n_bd)
@@ -399,7 +418,7 @@ static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
399 418
400 while (q->write_ptr != q->read_ptr) { 419 while (q->write_ptr != q->read_ptr) {
401 /* The read_ptr needs to bound by q->n_window */ 420 /* The read_ptr needs to bound by q->n_window */
402 iwlagn_txq_free_tfd(priv, txq, get_cmd_index(q, q->read_ptr)); 421 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr));
403 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 422 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
404 } 423 }
405} 424}
@@ -412,30 +431,33 @@ static void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
412 * Free all buffers. 431 * Free all buffers.
413 * 0-fill, but do not free "txq" descriptor structure. 432 * 0-fill, but do not free "txq" descriptor structure.
414 */ 433 */
415static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 434static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
416{ 435{
417 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 436 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
418 struct device *dev = priv->bus->dev; 437 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
438 struct device *dev = bus(trans)->dev;
419 int i; 439 int i;
420 if (WARN_ON(!txq)) 440 if (WARN_ON(!txq))
421 return; 441 return;
422 442
423 iwl_tx_queue_unmap(priv, txq_id); 443 iwl_tx_queue_unmap(trans, txq_id);
424 444
425 /* De-alloc array of command/tx buffers */ 445 /* De-alloc array of command/tx buffers */
426 for (i = 0; i < txq->q.n_window; i++) 446
427 kfree(txq->cmd[i]); 447 if (txq_id == trans->shrd->cmd_queue)
448 for (i = 0; i < txq->q.n_window; i++)
449 kfree(txq->cmd[i]);
428 450
429 /* De-alloc circular buffer of TFDs */ 451 /* De-alloc circular buffer of TFDs */
430 if (txq->q.n_bd) { 452 if (txq->q.n_bd) {
431 dma_free_coherent(dev, priv->hw_params.tfd_size * 453 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
432 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 454 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
433 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); 455 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
434 } 456 }
435 457
436 /* De-alloc array of per-TFD driver data */ 458 /* De-alloc array of per-TFD driver data */
437 kfree(txq->txb); 459 kfree(txq->skbs);
438 txq->txb = NULL; 460 txq->skbs = NULL;
439 461
440 /* deallocate arrays */ 462 /* deallocate arrays */
441 kfree(txq->cmd); 463 kfree(txq->cmd);
@@ -452,22 +474,24 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
452 * 474 *
453 * Destroy all TX DMA queues and structures 475 * Destroy all TX DMA queues and structures
454 */ 476 */
455static void iwl_trans_tx_free(struct iwl_priv *priv) 477static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
456{ 478{
457 int txq_id; 479 int txq_id;
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
458 481
459 /* Tx queues */ 482 /* Tx queues */
460 if (priv->txq) { 483 if (trans_pcie->txq) {
461 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 484 for (txq_id = 0;
462 iwl_tx_queue_free(priv, txq_id); 485 txq_id < hw_params(trans).max_txq_num; txq_id++)
486 iwl_tx_queue_free(trans, txq_id);
463 } 487 }
464 488
465 kfree(priv->txq); 489 kfree(trans_pcie->txq);
466 priv->txq = NULL; 490 trans_pcie->txq = NULL;
467 491
468 iwlagn_free_dma_ptr(priv, &priv->kw); 492 iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
469 493
470 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls); 494 iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
471} 495}
472 496
473/** 497/**
@@ -477,48 +501,52 @@ static void iwl_trans_tx_free(struct iwl_priv *priv)
477 * @param priv 501 * @param priv
478 * @return error code 502 * @return error code
479 */ 503 */
480static int iwl_trans_tx_alloc(struct iwl_priv *priv) 504static int iwl_trans_tx_alloc(struct iwl_trans *trans)
481{ 505{
482 int ret; 506 int ret;
483 int txq_id, slots_num; 507 int txq_id, slots_num;
508 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
509
510 u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
511 sizeof(struct iwlagn_scd_bc_tbl);
484 512
485 /*It is not allowed to alloc twice, so warn when this happens. 513 /*It is not allowed to alloc twice, so warn when this happens.
486 * We cannot rely on the previous allocation, so free and fail */ 514 * We cannot rely on the previous allocation, so free and fail */
487 if (WARN_ON(priv->txq)) { 515 if (WARN_ON(trans_pcie->txq)) {
488 ret = -EINVAL; 516 ret = -EINVAL;
489 goto error; 517 goto error;
490 } 518 }
491 519
492 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 520 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
493 priv->hw_params.scd_bc_tbls_size); 521 scd_bc_tbls_size);
494 if (ret) { 522 if (ret) {
495 IWL_ERR(priv, "Scheduler BC Table allocation failed\n"); 523 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
496 goto error; 524 goto error;
497 } 525 }
498 526
499 /* Alloc keep-warm buffer */ 527 /* Alloc keep-warm buffer */
500 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 528 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
501 if (ret) { 529 if (ret) {
502 IWL_ERR(priv, "Keep Warm allocation failed\n"); 530 IWL_ERR(trans, "Keep Warm allocation failed\n");
503 goto error; 531 goto error;
504 } 532 }
505 533
506 priv->txq = kzalloc(sizeof(struct iwl_tx_queue) * 534 trans_pcie->txq = kzalloc(sizeof(struct iwl_tx_queue) *
507 priv->cfg->base_params->num_of_queues, GFP_KERNEL); 535 hw_params(trans).max_txq_num, GFP_KERNEL);
508 if (!priv->txq) { 536 if (!trans_pcie->txq) {
509 IWL_ERR(priv, "Not enough memory for txq\n"); 537 IWL_ERR(trans, "Not enough memory for txq\n");
510 ret = ENOMEM; 538 ret = ENOMEM;
511 goto error; 539 goto error;
512 } 540 }
513 541
514 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 542 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
515 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 543 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
516 slots_num = (txq_id == priv->cmd_queue) ? 544 slots_num = (txq_id == trans->shrd->cmd_queue) ?
517 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 545 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
518 ret = iwl_trans_txq_alloc(priv, &priv->txq[txq_id], slots_num, 546 ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
519 txq_id); 547 slots_num, txq_id);
520 if (ret) { 548 if (ret) {
521 IWL_ERR(priv, "Tx %d queue alloc failed\n", txq_id); 549 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
522 goto error; 550 goto error;
523 } 551 }
524 } 552 }
@@ -526,42 +554,44 @@ static int iwl_trans_tx_alloc(struct iwl_priv *priv)
526 return 0; 554 return 0;
527 555
528error: 556error:
529 trans_tx_free(&priv->trans); 557 iwl_trans_pcie_tx_free(trans);
530 558
531 return ret; 559 return ret;
532} 560}
533static int iwl_tx_init(struct iwl_priv *priv) 561static int iwl_tx_init(struct iwl_trans *trans)
534{ 562{
535 int ret; 563 int ret;
536 int txq_id, slots_num; 564 int txq_id, slots_num;
537 unsigned long flags; 565 unsigned long flags;
538 bool alloc = false; 566 bool alloc = false;
567 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
539 568
540 if (!priv->txq) { 569 if (!trans_pcie->txq) {
541 ret = iwl_trans_tx_alloc(priv); 570 ret = iwl_trans_tx_alloc(trans);
542 if (ret) 571 if (ret)
543 goto error; 572 goto error;
544 alloc = true; 573 alloc = true;
545 } 574 }
546 575
547 spin_lock_irqsave(&priv->lock, flags); 576 spin_lock_irqsave(&trans->shrd->lock, flags);
548 577
549 /* Turn off all Tx DMA fifos */ 578 /* Turn off all Tx DMA fifos */
550 iwl_write_prph(priv, SCD_TXFACT, 0); 579 iwl_write_prph(bus(trans), SCD_TXFACT, 0);
551 580
552 /* Tell NIC where to find the "keep warm" buffer */ 581 /* Tell NIC where to find the "keep warm" buffer */
553 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); 582 iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
583 trans_pcie->kw.dma >> 4);
554 584
555 spin_unlock_irqrestore(&priv->lock, flags); 585 spin_unlock_irqrestore(&trans->shrd->lock, flags);
556 586
557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 587 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
558 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 588 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
559 slots_num = (txq_id == priv->cmd_queue) ? 589 slots_num = (txq_id == trans->shrd->cmd_queue) ?
560 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 590 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
561 ret = iwl_trans_txq_init(priv, &priv->txq[txq_id], slots_num, 591 ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
562 txq_id); 592 slots_num, txq_id);
563 if (ret) { 593 if (ret) {
564 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id); 594 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
565 goto error; 595 goto error;
566 } 596 }
567 } 597 }
@@ -570,58 +600,61 @@ static int iwl_tx_init(struct iwl_priv *priv)
570error: 600error:
571 /*Upon error, free only if we allocated something */ 601 /*Upon error, free only if we allocated something */
572 if (alloc) 602 if (alloc)
573 trans_tx_free(&priv->trans); 603 iwl_trans_pcie_tx_free(trans);
574 return ret; 604 return ret;
575} 605}
576 606
577static void iwl_set_pwr_vmain(struct iwl_priv *priv) 607static void iwl_set_pwr_vmain(struct iwl_priv *priv)
578{ 608{
609 struct iwl_trans *trans = trans(priv);
579/* 610/*
580 * (for documentation purposes) 611 * (for documentation purposes)
581 * to set power to V_AUX, do: 612 * to set power to V_AUX, do:
582 613
583 if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) 614 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
584 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 615 iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
585 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 616 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
586 ~APMG_PS_CTRL_MSK_PWR_SRC); 617 ~APMG_PS_CTRL_MSK_PWR_SRC);
587 */ 618 */
588 619
589 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 620 iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
590 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 621 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
591 ~APMG_PS_CTRL_MSK_PWR_SRC); 622 ~APMG_PS_CTRL_MSK_PWR_SRC);
592} 623}
593 624
594static int iwl_nic_init(struct iwl_priv *priv) 625static int iwl_nic_init(struct iwl_trans *trans)
595{ 626{
596 unsigned long flags; 627 unsigned long flags;
628 struct iwl_priv *priv = priv(trans);
597 629
598 /* nic_init */ 630 /* nic_init */
599 spin_lock_irqsave(&priv->lock, flags); 631 spin_lock_irqsave(&trans->shrd->lock, flags);
600 iwl_apm_init(priv); 632 iwl_apm_init(priv);
601 633
602 /* Set interrupt coalescing calibration timer to default (512 usecs) */ 634 /* Set interrupt coalescing calibration timer to default (512 usecs) */
603 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF); 635 iwl_write8(bus(trans), CSR_INT_COALESCING,
636 IWL_HOST_INT_CALIB_TIMEOUT_DEF);
604 637
605 spin_unlock_irqrestore(&priv->lock, flags); 638 spin_unlock_irqrestore(&trans->shrd->lock, flags);
606 639
607 iwl_set_pwr_vmain(priv); 640 iwl_set_pwr_vmain(priv);
608 641
609 priv->cfg->lib->nic_config(priv); 642 priv->cfg->lib->nic_config(priv);
610 643
611 /* Allocate the RX queue, or reset if it is already allocated */ 644 /* Allocate the RX queue, or reset if it is already allocated */
612 iwl_rx_init(priv); 645 iwl_rx_init(trans);
613 646
614 /* Allocate or reset and init all Tx and Command queues */ 647 /* Allocate or reset and init all Tx and Command queues */
615 if (iwl_tx_init(priv)) 648 if (iwl_tx_init(trans))
616 return -ENOMEM; 649 return -ENOMEM;
617 650
618 if (priv->cfg->base_params->shadow_reg_enable) { 651 if (hw_params(trans).shadow_reg_enable) {
619 /* enable shadow regs in HW */ 652 /* enable shadow regs in HW */
620 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL, 653 iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
621 0x800FFFFF); 654 0x800FFFFF);
622 } 655 }
623 656
624 set_bit(STATUS_INIT, &priv->status); 657 set_bit(STATUS_INIT, &trans->shrd->status);
625 658
626 return 0; 659 return 0;
627} 660}
@@ -629,39 +662,39 @@ static int iwl_nic_init(struct iwl_priv *priv)
629#define HW_READY_TIMEOUT (50) 662#define HW_READY_TIMEOUT (50)
630 663
631/* Note: returns poll_bit return value, which is >= 0 if success */ 664/* Note: returns poll_bit return value, which is >= 0 if success */
632static int iwl_set_hw_ready(struct iwl_priv *priv) 665static int iwl_set_hw_ready(struct iwl_trans *trans)
633{ 666{
634 int ret; 667 int ret;
635 668
636 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 669 iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
637 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 670 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
638 671
639 /* See if we got it */ 672 /* See if we got it */
640 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 673 ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
641 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 674 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
642 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 675 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
643 HW_READY_TIMEOUT); 676 HW_READY_TIMEOUT);
644 677
645 IWL_DEBUG_INFO(priv, "hardware%s ready\n", ret < 0 ? " not" : ""); 678 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
646 return ret; 679 return ret;
647} 680}
648 681
649/* Note: returns standard 0/-ERROR code */ 682/* Note: returns standard 0/-ERROR code */
650static int iwl_trans_prepare_card_hw(struct iwl_priv *priv) 683static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
651{ 684{
652 int ret; 685 int ret;
653 686
654 IWL_DEBUG_INFO(priv, "iwl_trans_prepare_card_hw enter\n"); 687 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
655 688
656 ret = iwl_set_hw_ready(priv); 689 ret = iwl_set_hw_ready(trans);
657 if (ret >= 0) 690 if (ret >= 0)
658 return 0; 691 return 0;
659 692
660 /* If HW is not ready, prepare the conditions to check again */ 693 /* If HW is not ready, prepare the conditions to check again */
661 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 694 iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
662 CSR_HW_IF_CONFIG_REG_PREPARE); 695 CSR_HW_IF_CONFIG_REG_PREPARE);
663 696
664 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 697 ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
665 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 698 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
666 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000); 699 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
667 700
@@ -669,153 +702,189 @@ static int iwl_trans_prepare_card_hw(struct iwl_priv *priv)
669 return ret; 702 return ret;
670 703
671 /* HW should be ready by now, check again. */ 704 /* HW should be ready by now, check again. */
672 ret = iwl_set_hw_ready(priv); 705 ret = iwl_set_hw_ready(trans);
673 if (ret >= 0) 706 if (ret >= 0)
674 return 0; 707 return 0;
675 return ret; 708 return ret;
676} 709}
677 710
678static int iwl_trans_start_device(struct iwl_priv *priv) 711#define IWL_AC_UNSET -1
712
713struct queue_to_fifo_ac {
714 s8 fifo, ac;
715};
716
717static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
718 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
719 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
720 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
721 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
722 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
723 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
724 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
725 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
726 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
727 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
728 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
729};
730
731static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
732 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
733 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
734 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
735 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
736 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
737 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
738 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
739 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
740 { IWL_TX_FIFO_BE_IPAN, 2, },
741 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
742 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
743};
744
745static const u8 iwlagn_bss_ac_to_fifo[] = {
746 IWL_TX_FIFO_VO,
747 IWL_TX_FIFO_VI,
748 IWL_TX_FIFO_BE,
749 IWL_TX_FIFO_BK,
750};
751static const u8 iwlagn_bss_ac_to_queue[] = {
752 0, 1, 2, 3,
753};
754static const u8 iwlagn_pan_ac_to_fifo[] = {
755 IWL_TX_FIFO_VO_IPAN,
756 IWL_TX_FIFO_VI_IPAN,
757 IWL_TX_FIFO_BE_IPAN,
758 IWL_TX_FIFO_BK_IPAN,
759};
760static const u8 iwlagn_pan_ac_to_queue[] = {
761 7, 6, 5, 4,
762};
763
764static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
679{ 765{
680 int ret; 766 int ret;
767 struct iwl_priv *priv = priv(trans);
768 struct iwl_trans_pcie *trans_pcie =
769 IWL_TRANS_GET_PCIE_TRANS(trans);
681 770
682 priv->ucode_owner = IWL_OWNERSHIP_DRIVER; 771 trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
772 trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
773 trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
683 774
684 if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) && 775 trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
685 iwl_trans_prepare_card_hw(priv)) { 776 trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
686 IWL_WARN(priv, "Exit HW not ready\n"); 777
778 trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
779 trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
780
781 if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
782 iwl_trans_pcie_prepare_card_hw(trans)) {
783 IWL_WARN(trans, "Exit HW not ready\n");
687 return -EIO; 784 return -EIO;
688 } 785 }
689 786
690 /* If platform's RF_KILL switch is NOT set to KILL */ 787 /* If platform's RF_KILL switch is NOT set to KILL */
691 if (iwl_read32(priv, CSR_GP_CNTRL) & 788 if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
692 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 789 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
693 clear_bit(STATUS_RF_KILL_HW, &priv->status); 790 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
694 else 791 else
695 set_bit(STATUS_RF_KILL_HW, &priv->status); 792 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
696 793
697 if (iwl_is_rfkill(priv)) { 794 if (iwl_is_rfkill(trans->shrd)) {
698 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); 795 wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
699 iwl_enable_interrupts(priv); 796 iwl_enable_interrupts(trans);
700 return -ERFKILL; 797 return -ERFKILL;
701 } 798 }
702 799
703 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 800 iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
704 801
705 ret = iwl_nic_init(priv); 802 ret = iwl_nic_init(trans);
706 if (ret) { 803 if (ret) {
707 IWL_ERR(priv, "Unable to init nic\n"); 804 IWL_ERR(trans, "Unable to init nic\n");
708 return ret; 805 return ret;
709 } 806 }
710 807
711 /* make sure rfkill handshake bits are cleared */ 808 /* make sure rfkill handshake bits are cleared */
712 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 809 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
713 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 810 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
714 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 811 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
715 812
716 /* clear (again), then enable host interrupts */ 813 /* clear (again), then enable host interrupts */
717 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 814 iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
718 iwl_enable_interrupts(priv); 815 iwl_enable_interrupts(trans);
719 816
720 /* really make sure rfkill handshake bits are cleared */ 817 /* really make sure rfkill handshake bits are cleared */
721 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 818 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
722 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 819 iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
723 820
724 return 0; 821 return 0;
725} 822}
726 823
727/* 824/*
728 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask 825 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
729 * must be called under priv->lock and mac access 826 * must be called under priv->shrd->lock and mac access
730 */ 827 */
731static void iwl_trans_txq_set_sched(struct iwl_priv *priv, u32 mask) 828static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
732{ 829{
733 iwl_write_prph(priv, SCD_TXFACT, mask); 830 iwl_write_prph(bus(trans), SCD_TXFACT, mask);
734} 831}
735 832
736#define IWL_AC_UNSET -1 833static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
737
738struct queue_to_fifo_ac {
739 s8 fifo, ac;
740};
741
742static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
743 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
744 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
745 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
746 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
747 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
748 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
749 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
750 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
751 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
752 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
753 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
754};
755
756static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
757 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
758 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
759 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
760 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
761 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
762 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
763 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
764 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
765 { IWL_TX_FIFO_BE_IPAN, 2, },
766 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
767 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
768};
769static void iwl_trans_tx_start(struct iwl_priv *priv)
770{ 834{
771 const struct queue_to_fifo_ac *queue_to_fifo; 835 const struct queue_to_fifo_ac *queue_to_fifo;
772 struct iwl_rxon_context *ctx; 836 struct iwl_rxon_context *ctx;
837 struct iwl_priv *priv = priv(trans);
838 struct iwl_trans_pcie *trans_pcie =
839 IWL_TRANS_GET_PCIE_TRANS(trans);
773 u32 a; 840 u32 a;
774 unsigned long flags; 841 unsigned long flags;
775 int i, chan; 842 int i, chan;
776 u32 reg_val; 843 u32 reg_val;
777 844
778 spin_lock_irqsave(&priv->lock, flags); 845 spin_lock_irqsave(&trans->shrd->lock, flags);
779 846
780 priv->scd_base_addr = iwl_read_prph(priv, SCD_SRAM_BASE_ADDR); 847 trans_pcie->scd_base_addr =
781 a = priv->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND; 848 iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
849 a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
782 /* reset conext data memory */ 850 /* reset conext data memory */
783 for (; a < priv->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; 851 for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
784 a += 4) 852 a += 4)
785 iwl_write_targ_mem(priv, a, 0); 853 iwl_write_targ_mem(bus(trans), a, 0);
786 /* reset tx status memory */ 854 /* reset tx status memory */
787 for (; a < priv->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; 855 for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
788 a += 4) 856 a += 4)
789 iwl_write_targ_mem(priv, a, 0); 857 iwl_write_targ_mem(bus(trans), a, 0);
790 for (; a < priv->scd_base_addr + 858 for (; a < trans_pcie->scd_base_addr +
791 SCD_TRANS_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4) 859 SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
792 iwl_write_targ_mem(priv, a, 0); 860 a += 4)
861 iwl_write_targ_mem(bus(trans), a, 0);
793 862
794 iwl_write_prph(priv, SCD_DRAM_BASE_ADDR, 863 iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
795 priv->scd_bc_tbls.dma >> 10); 864 trans_pcie->scd_bc_tbls.dma >> 10);
796 865
797 /* Enable DMA channel */ 866 /* Enable DMA channel */
798 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 867 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
799 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 868 iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
800 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 869 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
801 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 870 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
802 871
803 /* Update FH chicken bits */ 872 /* Update FH chicken bits */
804 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG); 873 reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
805 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG, 874 iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
806 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 875 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
807 876
808 iwl_write_prph(priv, SCD_QUEUECHAIN_SEL, 877 iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
809 SCD_QUEUECHAIN_SEL_ALL(priv)); 878 SCD_QUEUECHAIN_SEL_ALL(trans));
810 iwl_write_prph(priv, SCD_AGGR_SEL, 0); 879 iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
811 880
812 /* initiate the queues */ 881 /* initiate the queues */
813 for (i = 0; i < priv->hw_params.max_txq_num; i++) { 882 for (i = 0; i < hw_params(trans).max_txq_num; i++) {
814 iwl_write_prph(priv, SCD_QUEUE_RDPTR(i), 0); 883 iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
815 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 884 iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
816 iwl_write_targ_mem(priv, priv->scd_base_addr + 885 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
817 SCD_CONTEXT_QUEUE_OFFSET(i), 0); 886 SCD_CONTEXT_QUEUE_OFFSET(i), 0);
818 iwl_write_targ_mem(priv, priv->scd_base_addr + 887 iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
819 SCD_CONTEXT_QUEUE_OFFSET(i) + 888 SCD_CONTEXT_QUEUE_OFFSET(i) +
820 sizeof(u32), 889 sizeof(u32),
821 ((SCD_WIN_SIZE << 890 ((SCD_WIN_SIZE <<
@@ -826,11 +895,11 @@ static void iwl_trans_tx_start(struct iwl_priv *priv)
826 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); 895 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
827 } 896 }
828 897
829 iwl_write_prph(priv, SCD_INTERRUPT_MASK, 898 iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
830 IWL_MASK(0, priv->hw_params.max_txq_num)); 899 IWL_MASK(0, hw_params(trans).max_txq_num));
831 900
832 /* Activate all Tx DMA/FIFO channels */ 901 /* Activate all Tx DMA/FIFO channels */
833 iwl_trans_txq_set_sched(priv, IWL_MASK(0, 7)); 902 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
834 903
835 /* map queues to FIFOs */ 904 /* map queues to FIFOs */
836 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) 905 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
@@ -838,96 +907,111 @@ static void iwl_trans_tx_start(struct iwl_priv *priv)
838 else 907 else
839 queue_to_fifo = iwlagn_default_queue_to_tx_fifo; 908 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
840 909
841 iwl_trans_set_wr_ptrs(priv, priv->cmd_queue, 0); 910 iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
842 911
843 /* make sure all queue are not stopped */ 912 /* make sure all queue are not stopped */
844 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped)); 913 memset(&trans_pcie->queue_stopped[0], 0,
914 sizeof(trans_pcie->queue_stopped));
845 for (i = 0; i < 4; i++) 915 for (i = 0; i < 4; i++)
846 atomic_set(&priv->queue_stop_count[i], 0); 916 atomic_set(&trans_pcie->queue_stop_count[i], 0);
847 for_each_context(priv, ctx) 917 for_each_context(priv, ctx)
848 ctx->last_tx_rejected = false; 918 ctx->last_tx_rejected = false;
849 919
850 /* reset to 0 to enable all the queue first */ 920 /* reset to 0 to enable all the queue first */
851 priv->txq_ctx_active_msk = 0; 921 trans_pcie->txq_ctx_active_msk = 0;
852 922
853 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 923 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
854 IWLAGN_FIRST_AMPDU_QUEUE); 924 IWLAGN_FIRST_AMPDU_QUEUE);
855 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 925 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
856 IWLAGN_FIRST_AMPDU_QUEUE); 926 IWLAGN_FIRST_AMPDU_QUEUE);
857 927
858 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { 928 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
859 int fifo = queue_to_fifo[i].fifo; 929 int fifo = queue_to_fifo[i].fifo;
860 int ac = queue_to_fifo[i].ac; 930 int ac = queue_to_fifo[i].ac;
861 931
862 iwl_txq_ctx_activate(priv, i); 932 iwl_txq_ctx_activate(trans_pcie, i);
863 933
864 if (fifo == IWL_TX_FIFO_UNUSED) 934 if (fifo == IWL_TX_FIFO_UNUSED)
865 continue; 935 continue;
866 936
867 if (ac != IWL_AC_UNSET) 937 if (ac != IWL_AC_UNSET)
868 iwl_set_swq_id(&priv->txq[i], ac, i); 938 iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
869 iwl_trans_tx_queue_set_status(priv, &priv->txq[i], fifo, 0); 939 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
940 fifo, 0);
870 } 941 }
871 942
872 spin_unlock_irqrestore(&priv->lock, flags); 943 spin_unlock_irqrestore(&trans->shrd->lock, flags);
873 944
874 /* Enable L1-Active */ 945 /* Enable L1-Active */
875 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG, 946 iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
876 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 947 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
877} 948}
878 949
879/** 950/**
880 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels 951 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
881 */ 952 */
882static int iwl_trans_tx_stop(struct iwl_priv *priv) 953static int iwl_trans_tx_stop(struct iwl_trans *trans)
883{ 954{
884 int ch, txq_id; 955 int ch, txq_id;
885 unsigned long flags; 956 unsigned long flags;
957 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
886 958
887 /* Turn off all Tx DMA fifos */ 959 /* Turn off all Tx DMA fifos */
888 spin_lock_irqsave(&priv->lock, flags); 960 spin_lock_irqsave(&trans->shrd->lock, flags);
889 961
890 iwl_trans_txq_set_sched(priv, 0); 962 iwl_trans_txq_set_sched(trans, 0);
891 963
892 /* Stop each Tx DMA channel, and wait for it to be idle */ 964 /* Stop each Tx DMA channel, and wait for it to be idle */
893 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 965 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
894 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 966 iwl_write_direct32(bus(trans),
895 if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, 967 FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
968 if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
896 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 969 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
897 1000)) 970 1000))
898 IWL_ERR(priv, "Failing on timeout while stopping" 971 IWL_ERR(trans, "Failing on timeout while stopping"
899 " DMA channel %d [0x%08x]", ch, 972 " DMA channel %d [0x%08x]", ch,
900 iwl_read_direct32(priv, FH_TSSR_TX_STATUS_REG)); 973 iwl_read_direct32(bus(trans),
974 FH_TSSR_TX_STATUS_REG));
901 } 975 }
902 spin_unlock_irqrestore(&priv->lock, flags); 976 spin_unlock_irqrestore(&trans->shrd->lock, flags);
903 977
904 if (!priv->txq) { 978 if (!trans_pcie->txq) {
905 IWL_WARN(priv, "Stopping tx queues that aren't allocated..."); 979 IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
906 return 0; 980 return 0;
907 } 981 }
908 982
909 /* Unmap DMA from host system and free skb's */ 983 /* Unmap DMA from host system and free skb's */
910 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 984 for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
911 iwl_tx_queue_unmap(priv, txq_id); 985 iwl_tx_queue_unmap(trans, txq_id);
912 986
913 return 0; 987 return 0;
914} 988}
915 989
916static void iwl_trans_stop_device(struct iwl_priv *priv) 990static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
917{ 991{
918 unsigned long flags; 992 unsigned long flags;
993 struct iwl_trans_pcie *trans_pcie =
994 IWL_TRANS_GET_PCIE_TRANS(trans);
995
996 spin_lock_irqsave(&trans->shrd->lock, flags);
997 iwl_disable_interrupts(trans);
998 spin_unlock_irqrestore(&trans->shrd->lock, flags);
999
1000 /* wait to make sure we flush pending tasklet*/
1001 synchronize_irq(bus(trans)->irq);
1002 tasklet_kill(&trans_pcie->irq_tasklet);
1003}
919 1004
1005static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1006{
920 /* stop and reset the on-board processor */ 1007 /* stop and reset the on-board processor */
921 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 1008 iwl_write32(bus(trans), CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
922 1009
923 /* tell the device to stop sending interrupts */ 1010 /* tell the device to stop sending interrupts */
924 spin_lock_irqsave(&priv->lock, flags); 1011 iwl_trans_pcie_disable_sync_irq(trans);
925 iwl_disable_interrupts(priv);
926 spin_unlock_irqrestore(&priv->lock, flags);
927 trans_sync_irq(&priv->trans);
928 1012
929 /* device going down, Stop using ICT table */ 1013 /* device going down, Stop using ICT table */
930 iwl_disable_ict(priv); 1014 iwl_disable_ict(trans);
931 1015
932 /* 1016 /*
933 * If a HW restart happens during firmware loading, 1017 * If a HW restart happens during firmware loading,
@@ -936,67 +1020,100 @@ static void iwl_trans_stop_device(struct iwl_priv *priv)
936 * restart. So don't process again if the device is 1020 * restart. So don't process again if the device is
937 * already dead. 1021 * already dead.
938 */ 1022 */
939 if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) { 1023 if (test_bit(STATUS_DEVICE_ENABLED, &trans->shrd->status)) {
940 iwl_trans_tx_stop(priv); 1024 iwl_trans_tx_stop(trans);
941 iwl_trans_rx_stop(priv); 1025 iwl_trans_rx_stop(trans);
942 1026
943 /* Power-down device's busmaster DMA clocks */ 1027 /* Power-down device's busmaster DMA clocks */
944 iwl_write_prph(priv, APMG_CLK_DIS_REG, 1028 iwl_write_prph(bus(trans), APMG_CLK_DIS_REG,
945 APMG_CLK_VAL_DMA_CLK_RQT); 1029 APMG_CLK_VAL_DMA_CLK_RQT);
946 udelay(5); 1030 udelay(5);
947 } 1031 }
948 1032
949 /* Make sure (redundant) we've released our request to stay awake */ 1033 /* Make sure (redundant) we've released our request to stay awake */
950 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1034 iwl_clear_bit(bus(trans), CSR_GP_CNTRL,
1035 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
951 1036
952 /* Stop the device, and put it in low power state */ 1037 /* Stop the device, and put it in low power state */
953 iwl_apm_stop(priv); 1038 iwl_apm_stop(priv(trans));
954}
955
956static struct iwl_tx_cmd *iwl_trans_get_tx_cmd(struct iwl_priv *priv,
957 int txq_id)
958{
959 struct iwl_tx_queue *txq = &priv->txq[txq_id];
960 struct iwl_queue *q = &txq->q;
961 struct iwl_device_cmd *dev_cmd;
962
963 if (unlikely(iwl_queue_space(q) < q->high_mark))
964 return NULL;
965
966 /*
967 * Set up the Tx-command (not MAC!) header.
968 * Store the chosen Tx queue and TFD index within the sequence field;
969 * after Tx, uCode's Tx response will return this value so driver can
970 * locate the frame within the tx queue and do post-tx processing.
971 */
972 dev_cmd = txq->cmd[q->write_ptr];
973 memset(dev_cmd, 0, sizeof(*dev_cmd));
974 dev_cmd->hdr.cmd = REPLY_TX;
975 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
976 INDEX_TO_SEQ(q->write_ptr)));
977 return &dev_cmd->cmd.tx;
978} 1039}
979 1040
980static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb, 1041static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
981 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, 1042 struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id)
982 struct iwl_rxon_context *ctx)
983{ 1043{
984 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1044 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
985 struct iwl_queue *q = &txq->q; 1045 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
986 struct iwl_device_cmd *dev_cmd = txq->cmd[q->write_ptr]; 1046 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1047 struct iwl_tx_cmd *tx_cmd = &dev_cmd->cmd.tx;
987 struct iwl_cmd_meta *out_meta; 1048 struct iwl_cmd_meta *out_meta;
1049 struct iwl_tx_queue *txq;
1050 struct iwl_queue *q;
988 1051
989 dma_addr_t phys_addr = 0; 1052 dma_addr_t phys_addr = 0;
990 dma_addr_t txcmd_phys; 1053 dma_addr_t txcmd_phys;
991 dma_addr_t scratch_phys; 1054 dma_addr_t scratch_phys;
992 u16 len, firstlen, secondlen; 1055 u16 len, firstlen, secondlen;
1056 u16 seq_number = 0;
993 u8 wait_write_ptr = 0; 1057 u8 wait_write_ptr = 0;
1058 u8 txq_id;
1059 u8 tid = 0;
1060 bool is_agg = false;
1061 __le16 fc = hdr->frame_control;
994 u8 hdr_len = ieee80211_hdrlen(fc); 1062 u8 hdr_len = ieee80211_hdrlen(fc);
995 1063
1064 /*
1065 * Send this frame after DTIM -- there's a special queue
1066 * reserved for this for contexts that support AP mode.
1067 */
1068 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1069 txq_id = trans_pcie->mcast_queue[ctx];
1070
1071 /*
1072 * The microcode will clear the more data
1073 * bit in the last frame it transmits.
1074 */
1075 hdr->frame_control |=
1076 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1077 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1078 txq_id = IWL_AUX_QUEUE;
1079 else
1080 txq_id =
1081 trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1082
1083 if (ieee80211_is_data_qos(fc)) {
1084 u8 *qc = NULL;
1085 struct iwl_tid_data *tid_data;
1086 qc = ieee80211_get_qos_ctl(hdr);
1087 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1088 tid_data = &trans->shrd->tid_data[sta_id][tid];
1089
1090 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1091 return -1;
1092
1093 seq_number = tid_data->seq_number;
1094 seq_number &= IEEE80211_SCTL_SEQ;
1095 hdr->seq_ctrl = hdr->seq_ctrl &
1096 cpu_to_le16(IEEE80211_SCTL_FRAG);
1097 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1098 seq_number += 0x10;
1099 /* aggregation is on for this <sta,tid> */
1100 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1101 tid_data->agg.state == IWL_AGG_ON) {
1102 txq_id = tid_data->agg.txq_id;
1103 is_agg = true;
1104 }
1105 }
1106
1107 txq = &trans_pcie->txq[txq_id];
1108 q = &txq->q;
1109
996 /* Set up driver data for this TFD */ 1110 /* Set up driver data for this TFD */
997 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 1111 txq->skbs[q->write_ptr] = skb;
998 txq->txb[q->write_ptr].skb = skb; 1112 txq->cmd[q->write_ptr] = dev_cmd;
999 txq->txb[q->write_ptr].ctx = ctx; 1113
1114 dev_cmd->hdr.cmd = REPLY_TX;
1115 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1116 INDEX_TO_SEQ(q->write_ptr)));
1000 1117
1001 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1118 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1002 out_meta = &txq->meta[q->write_ptr]; 1119 out_meta = &txq->meta[q->write_ptr];
@@ -1020,10 +1137,10 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
1020 1137
1021 /* Physical address of this Tx command's header (not MAC header!), 1138 /* Physical address of this Tx command's header (not MAC header!),
1022 * within command buffer array. */ 1139 * within command buffer array. */
1023 txcmd_phys = dma_map_single(priv->bus->dev, 1140 txcmd_phys = dma_map_single(bus(trans)->dev,
1024 &dev_cmd->hdr, firstlen, 1141 &dev_cmd->hdr, firstlen,
1025 DMA_BIDIRECTIONAL); 1142 DMA_BIDIRECTIONAL);
1026 if (unlikely(dma_mapping_error(priv->bus->dev, txcmd_phys))) 1143 if (unlikely(dma_mapping_error(bus(trans)->dev, txcmd_phys)))
1027 return -1; 1144 return -1;
1028 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 1145 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1029 dma_unmap_len_set(out_meta, len, firstlen); 1146 dma_unmap_len_set(out_meta, len, firstlen);
@@ -1039,10 +1156,10 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
1039 * if any (802.11 null frames have no payload). */ 1156 * if any (802.11 null frames have no payload). */
1040 secondlen = skb->len - hdr_len; 1157 secondlen = skb->len - hdr_len;
1041 if (secondlen > 0) { 1158 if (secondlen > 0) {
1042 phys_addr = dma_map_single(priv->bus->dev, skb->data + hdr_len, 1159 phys_addr = dma_map_single(bus(trans)->dev, skb->data + hdr_len,
1043 secondlen, DMA_TO_DEVICE); 1160 secondlen, DMA_TO_DEVICE);
1044 if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { 1161 if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) {
1045 dma_unmap_single(priv->bus->dev, 1162 dma_unmap_single(bus(trans)->dev,
1046 dma_unmap_addr(out_meta, mapping), 1163 dma_unmap_addr(out_meta, mapping),
1047 dma_unmap_len(out_meta, len), 1164 dma_unmap_len(out_meta, len),
1048 DMA_BIDIRECTIONAL); 1165 DMA_BIDIRECTIONAL);
@@ -1051,35 +1168,35 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
1051 } 1168 }
1052 1169
1053 /* Attach buffers to TFD */ 1170 /* Attach buffers to TFD */
1054 iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); 1171 iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1055 if (secondlen > 0) 1172 if (secondlen > 0)
1056 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, 1173 iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1057 secondlen, 0); 1174 secondlen, 0);
1058 1175
1059 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 1176 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1060 offsetof(struct iwl_tx_cmd, scratch); 1177 offsetof(struct iwl_tx_cmd, scratch);
1061 1178
1062 /* take back ownership of DMA buffer to enable update */ 1179 /* take back ownership of DMA buffer to enable update */
1063 dma_sync_single_for_cpu(priv->bus->dev, txcmd_phys, firstlen, 1180 dma_sync_single_for_cpu(bus(trans)->dev, txcmd_phys, firstlen,
1064 DMA_BIDIRECTIONAL); 1181 DMA_BIDIRECTIONAL);
1065 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1182 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1066 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1183 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1067 1184
1068 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", 1185 IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1069 le16_to_cpu(dev_cmd->hdr.sequence)); 1186 le16_to_cpu(dev_cmd->hdr.sequence));
1070 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); 1187 IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1071 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); 1188 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1072 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); 1189 iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1073 1190
1074 /* Set up entry for this TFD in Tx byte-count array */ 1191 /* Set up entry for this TFD in Tx byte-count array */
1075 if (ampdu) 1192 if (is_agg)
1076 iwl_trans_txq_update_byte_cnt_tbl(priv, txq, 1193 iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
1077 le16_to_cpu(tx_cmd->len)); 1194 le16_to_cpu(tx_cmd->len));
1078 1195
1079 dma_sync_single_for_device(priv->bus->dev, txcmd_phys, firstlen, 1196 dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
1080 DMA_BIDIRECTIONAL); 1197 DMA_BIDIRECTIONAL);
1081 1198
1082 trace_iwlwifi_dev_tx(priv, 1199 trace_iwlwifi_dev_tx(priv(trans),
1083 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 1200 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1084 sizeof(struct iwl_tfd), 1201 sizeof(struct iwl_tfd),
1085 &dev_cmd->hdr, firstlen, 1202 &dev_cmd->hdr, firstlen,
@@ -1087,7 +1204,14 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
1087 1204
1088 /* Tell device the write index *just past* this latest filled TFD */ 1205 /* Tell device the write index *just past* this latest filled TFD */
1089 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1206 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1090 iwl_txq_update_write_ptr(priv, txq); 1207 iwl_txq_update_write_ptr(trans, txq);
1208
1209 if (ieee80211_is_data_qos(fc)) {
1210 trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
1211 if (!ieee80211_has_morefrags(fc))
1212 trans->shrd->tid_data[sta_id][tid].seq_number =
1213 seq_number;
1214 }
1091 1215
1092 /* 1216 /*
1093 * At this point the frame is "transmitted" successfully 1217 * At this point the frame is "transmitted" successfully
@@ -1095,82 +1219,883 @@ static int iwl_trans_tx(struct iwl_priv *priv, struct sk_buff *skb,
1095 * regardless of the value of ret. "ret" only indicates 1219 * regardless of the value of ret. "ret" only indicates
1096 * whether or not we should update the write pointer. 1220 * whether or not we should update the write pointer.
1097 */ 1221 */
1098 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { 1222 if (iwl_queue_space(q) < q->high_mark) {
1099 if (wait_write_ptr) { 1223 if (wait_write_ptr) {
1100 txq->need_update = 1; 1224 txq->need_update = 1;
1101 iwl_txq_update_write_ptr(priv, txq); 1225 iwl_txq_update_write_ptr(trans, txq);
1102 } else { 1226 } else {
1103 iwl_stop_queue(priv, txq); 1227 iwl_stop_queue(trans, txq);
1104 } 1228 }
1105 } 1229 }
1106 return 0; 1230 return 0;
1107} 1231}
1108 1232
1109static void iwl_trans_kick_nic(struct iwl_priv *priv) 1233static void iwl_trans_pcie_kick_nic(struct iwl_trans *trans)
1110{ 1234{
1111 /* Remove all resets to allow NIC to operate */ 1235 /* Remove all resets to allow NIC to operate */
1112 iwl_write32(priv, CSR_RESET, 0); 1236 iwl_write32(bus(trans), CSR_RESET, 0);
1113} 1237}
1114 1238
1115static void iwl_trans_sync_irq(struct iwl_priv *priv) 1239static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
1116{ 1240{
1117 /* wait to make sure we flush pending tasklet*/ 1241 struct iwl_trans_pcie *trans_pcie =
1118 synchronize_irq(priv->bus->irq); 1242 IWL_TRANS_GET_PCIE_TRANS(trans);
1119 tasklet_kill(&priv->irq_tasklet); 1243 int err;
1244
1245 trans_pcie->inta_mask = CSR_INI_SET_MASK;
1246
1247 tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1248 iwl_irq_tasklet, (unsigned long)trans);
1249
1250 iwl_alloc_isr_ict(trans);
1251
1252 err = request_irq(bus(trans)->irq, iwl_isr_ict, IRQF_SHARED,
1253 DRV_NAME, trans);
1254 if (err) {
1255 IWL_ERR(trans, "Error allocating IRQ %d\n", bus(trans)->irq);
1256 iwl_free_isr_ict(trans);
1257 return err;
1258 }
1259
1260 INIT_WORK(&trans_pcie->rx_replenish, iwl_bg_rx_replenish);
1261 return 0;
1262}
1263
1264static int iwlagn_txq_check_empty(struct iwl_trans *trans,
1265 int sta_id, u8 tid, int txq_id)
1266{
1267 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1268 struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
1269 struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
1270
1271 lockdep_assert_held(&trans->shrd->sta_lock);
1272
1273 switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
1274 case IWL_EMPTYING_HW_QUEUE_DELBA:
1275 /* We are reclaiming the last packet of the */
1276 /* aggregated HW queue */
1277 if ((txq_id == tid_data->agg.txq_id) &&
1278 (q->read_ptr == q->write_ptr)) {
1279 IWL_DEBUG_HT(trans,
1280 "HW queue empty: continue DELBA flow\n");
1281 iwl_trans_pcie_txq_agg_disable(trans, txq_id);
1282 tid_data->agg.state = IWL_AGG_OFF;
1283 iwl_stop_tx_ba_trans_ready(priv(trans),
1284 NUM_IWL_RXON_CTX,
1285 sta_id, tid);
1286 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1287 }
1288 break;
1289 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1290 /* We are reclaiming the last packet of the queue */
1291 if (tid_data->tfds_in_queue == 0) {
1292 IWL_DEBUG_HT(trans,
1293 "HW queue empty: continue ADDBA flow\n");
1294 tid_data->agg.state = IWL_AGG_ON;
1295 iwl_start_tx_ba_trans_ready(priv(trans),
1296 NUM_IWL_RXON_CTX,
1297 sta_id, tid);
1298 }
1299 break;
1300 }
1301
1302 return 0;
1303}
1304
1305static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
1306 int sta_id, int tid, int freed)
1307{
1308 lockdep_assert_held(&trans->shrd->sta_lock);
1309
1310 if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
1311 trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
1312 else {
1313 IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
1314 trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
1315 freed);
1316 trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
1317 }
1318}
1319
1320static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1321 int txq_id, int ssn, u32 status,
1322 struct sk_buff_head *skbs)
1323{
1324 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1325 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1326 /* n_bd is usually 256 => n_bd - 1 = 0xff */
1327 int tfd_num = ssn & (txq->q.n_bd - 1);
1328 int freed = 0;
1329 u8 agg_state;
1330 bool cond;
1331
1332 txq->time_stamp = jiffies;
1333
1334 if (txq->sched_retry) {
1335 agg_state =
1336 trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
1337 cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
1338 } else {
1339 cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
1340 }
1341
1342 if (txq->q.read_ptr != tfd_num) {
1343 IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
1344 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1345 ssn , tfd_num, txq_id, txq->swq_id);
1346 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1347 if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
1348 iwl_wake_queue(trans, txq);
1349 }
1350
1351 iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
1352 iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
1353}
1354
1355static void iwl_trans_pcie_free(struct iwl_trans *trans)
1356{
1357 iwl_trans_pcie_tx_free(trans);
1358 iwl_trans_pcie_rx_free(trans);
1359 free_irq(bus(trans)->irq, trans);
1360 iwl_free_isr_ict(trans);
1361 trans->shrd->trans = NULL;
1362 kfree(trans);
1363}
1364
1365#ifdef CONFIG_PM
1366
1367static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1368{
1369 /*
1370 * This function is called when system goes into suspend state
1371 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1372 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1373 * it will not call apm_ops.stop() to stop the DMA operation.
1374 * Calling apm_ops.stop here to make sure we stop the DMA.
1375 *
1376 * But of course ... if we have configured WoWLAN then we did other
1377 * things already :-)
1378 */
1379 if (!trans->shrd->wowlan)
1380 iwl_apm_stop(priv(trans));
1381
1382 return 0;
1383}
1384
1385static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1386{
1387 bool hw_rfkill = false;
1388
1389 iwl_enable_interrupts(trans);
1390
1391 if (!(iwl_read32(bus(trans), CSR_GP_CNTRL) &
1392 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
1393 hw_rfkill = true;
1394
1395 if (hw_rfkill)
1396 set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1397 else
1398 clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
1399
1400 wiphy_rfkill_set_hw_state(priv(trans)->hw->wiphy, hw_rfkill);
1401
1402 return 0;
1403}
1404#else /* CONFIG_PM */
1405static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1406{ return 0; }
1407
1408static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1409{ return 0; }
1410
1411#endif /* CONFIG_PM */
1412
1413static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
1414 u8 ctx)
1415{
1416 u8 ac, txq_id;
1417 struct iwl_trans_pcie *trans_pcie =
1418 IWL_TRANS_GET_PCIE_TRANS(trans);
1419
1420 for (ac = 0; ac < AC_NUM; ac++) {
1421 txq_id = trans_pcie->ac_to_queue[ctx][ac];
1422 IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
1423 ac,
1424 (atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
1425 ? "stopped" : "awake");
1426 iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
1427 }
1120} 1428}
1121 1429
1122static void iwl_trans_free(struct iwl_priv *priv) 1430const struct iwl_trans_ops trans_ops_pcie;
1431
1432static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
1123{ 1433{
1124 free_irq(priv->bus->irq, priv); 1434 struct iwl_trans *iwl_trans = kzalloc(sizeof(struct iwl_trans) +
1125 iwl_free_isr_ict(priv); 1435 sizeof(struct iwl_trans_pcie),
1436 GFP_KERNEL);
1437 if (iwl_trans) {
1438 struct iwl_trans_pcie *trans_pcie =
1439 IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
1440 iwl_trans->ops = &trans_ops_pcie;
1441 iwl_trans->shrd = shrd;
1442 trans_pcie->trans = iwl_trans;
1443 spin_lock_init(&iwl_trans->hcmd_lock);
1444 }
1445
1446 return iwl_trans;
1126} 1447}
1127 1448
1128static const struct iwl_trans_ops trans_ops = { 1449static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
1129 .start_device = iwl_trans_start_device, 1450{
1130 .prepare_card_hw = iwl_trans_prepare_card_hw, 1451 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1131 .stop_device = iwl_trans_stop_device, 1452
1453 iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
1454}
1132 1455
1133 .tx_start = iwl_trans_tx_start, 1456#define IWL_FLUSH_WAIT_MS 2000
1134 1457
1135 .rx_free = iwl_trans_rx_free, 1458static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1136 .tx_free = iwl_trans_tx_free, 1459{
1460 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1461 struct iwl_tx_queue *txq;
1462 struct iwl_queue *q;
1463 int cnt;
1464 unsigned long now = jiffies;
1465 int ret = 0;
1466
1467 /* waiting for all the tx frames complete might take a while */
1468 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1469 if (cnt == trans->shrd->cmd_queue)
1470 continue;
1471 txq = &trans_pcie->txq[cnt];
1472 q = &txq->q;
1473 while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1474 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
1475 msleep(1);
1476
1477 if (q->read_ptr != q->write_ptr) {
1478 IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1479 ret = -ETIMEDOUT;
1480 break;
1481 }
1482 }
1483 return ret;
1484}
1137 1485
1138 .send_cmd = iwl_send_cmd, 1486/*
1139 .send_cmd_pdu = iwl_send_cmd_pdu, 1487 * On every watchdog tick we check (latest) time stamp. If it does not
1488 * change during timeout period and queue is not empty we reset firmware.
1489 */
1490static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1491{
1492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1493 struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1494 struct iwl_queue *q = &txq->q;
1495 unsigned long timeout;
1140 1496
1141 .get_tx_cmd = iwl_trans_get_tx_cmd, 1497 if (q->read_ptr == q->write_ptr) {
1142 .tx = iwl_trans_tx, 1498 txq->time_stamp = jiffies;
1499 return 0;
1500 }
1143 1501
1144 .txq_agg_disable = iwl_trans_txq_agg_disable, 1502 timeout = txq->time_stamp +
1145 .txq_agg_setup = iwl_trans_txq_agg_setup, 1503 msecs_to_jiffies(hw_params(trans).wd_timeout);
1146 1504
1147 .kick_nic = iwl_trans_kick_nic, 1505 if (time_after(jiffies, timeout)) {
1506 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1507 hw_params(trans).wd_timeout);
1508 return 1;
1509 }
1148 1510
1149 .sync_irq = iwl_trans_sync_irq, 1511 return 0;
1150 .free = iwl_trans_free, 1512}
1513
1514#ifdef CONFIG_IWLWIFI_DEBUGFS
1515/* create and remove of files */
1516#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1517 if (!debugfs_create_file(#name, mode, parent, trans, \
1518 &iwl_dbgfs_##name##_ops)) \
1519 return -ENOMEM; \
1520} while (0)
1521
1522/* file operation */
1523#define DEBUGFS_READ_FUNC(name) \
1524static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1525 char __user *user_buf, \
1526 size_t count, loff_t *ppos);
1527
1528#define DEBUGFS_WRITE_FUNC(name) \
1529static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1530 const char __user *user_buf, \
1531 size_t count, loff_t *ppos);
1532
1533
1534static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
1535{
1536 file->private_data = inode->i_private;
1537 return 0;
1538}
1539
1540#define DEBUGFS_READ_FILE_OPS(name) \
1541 DEBUGFS_READ_FUNC(name); \
1542static const struct file_operations iwl_dbgfs_##name##_ops = { \
1543 .read = iwl_dbgfs_##name##_read, \
1544 .open = iwl_dbgfs_open_file_generic, \
1545 .llseek = generic_file_llseek, \
1546};
1547
1548#define DEBUGFS_WRITE_FILE_OPS(name) \
1549 DEBUGFS_WRITE_FUNC(name); \
1550static const struct file_operations iwl_dbgfs_##name##_ops = { \
1551 .write = iwl_dbgfs_##name##_write, \
1552 .open = iwl_dbgfs_open_file_generic, \
1553 .llseek = generic_file_llseek, \
1554};
1555
1556#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1557 DEBUGFS_READ_FUNC(name); \
1558 DEBUGFS_WRITE_FUNC(name); \
1559static const struct file_operations iwl_dbgfs_##name##_ops = { \
1560 .write = iwl_dbgfs_##name##_write, \
1561 .read = iwl_dbgfs_##name##_read, \
1562 .open = iwl_dbgfs_open_file_generic, \
1563 .llseek = generic_file_llseek, \
1151}; 1564};
1152 1565
1153int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv) 1566static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
1567 char __user *user_buf,
1568 size_t count, loff_t *ppos)
1154{ 1569{
1155 int err; 1570 struct iwl_trans *trans = file->private_data;
1571 struct iwl_priv *priv = priv(trans);
1572 int pos = 0, ofs = 0;
1573 int cnt = 0, entry;
1574 struct iwl_trans_pcie *trans_pcie =
1575 IWL_TRANS_GET_PCIE_TRANS(trans);
1576 struct iwl_tx_queue *txq;
1577 struct iwl_queue *q;
1578 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1579 char *buf;
1580 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
1581 (hw_params(trans).max_txq_num * 32 * 8) + 400;
1582 const u8 *ptr;
1583 ssize_t ret;
1584
1585 if (!trans_pcie->txq) {
1586 IWL_ERR(trans, "txq not ready\n");
1587 return -EAGAIN;
1588 }
1589 buf = kzalloc(bufsz, GFP_KERNEL);
1590 if (!buf) {
1591 IWL_ERR(trans, "Can not allocate buffer\n");
1592 return -ENOMEM;
1593 }
1594 pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
1595 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1596 txq = &trans_pcie->txq[cnt];
1597 q = &txq->q;
1598 pos += scnprintf(buf + pos, bufsz - pos,
1599 "q[%d]: read_ptr: %u, write_ptr: %u\n",
1600 cnt, q->read_ptr, q->write_ptr);
1601 }
1602 if (priv->tx_traffic &&
1603 (iwl_get_debug_level(trans->shrd) & IWL_DL_TX)) {
1604 ptr = priv->tx_traffic;
1605 pos += scnprintf(buf + pos, bufsz - pos,
1606 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
1607 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1608 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1609 entry++, ofs += 16) {
1610 pos += scnprintf(buf + pos, bufsz - pos,
1611 "0x%.4x ", ofs);
1612 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1613 buf + pos, bufsz - pos, 0);
1614 pos += strlen(buf + pos);
1615 if (bufsz - pos > 0)
1616 buf[pos++] = '\n';
1617 }
1618 }
1619 }
1156 1620
1157 priv->trans.ops = &trans_ops; 1621 pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
1158 priv->trans.priv = priv; 1622 pos += scnprintf(buf + pos, bufsz - pos,
1623 "read: %u, write: %u\n",
1624 rxq->read, rxq->write);
1625
1626 if (priv->rx_traffic &&
1627 (iwl_get_debug_level(trans->shrd) & IWL_DL_RX)) {
1628 ptr = priv->rx_traffic;
1629 pos += scnprintf(buf + pos, bufsz - pos,
1630 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
1631 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
1632 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
1633 entry++, ofs += 16) {
1634 pos += scnprintf(buf + pos, bufsz - pos,
1635 "0x%.4x ", ofs);
1636 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
1637 buf + pos, bufsz - pos, 0);
1638 pos += strlen(buf + pos);
1639 if (bufsz - pos > 0)
1640 buf[pos++] = '\n';
1641 }
1642 }
1643 }
1159 1644
1160 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 1645 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1161 iwl_irq_tasklet, (unsigned long)priv); 1646 kfree(buf);
1647 return ret;
1648}
1162 1649
1163 iwl_alloc_isr_ict(priv); 1650static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
1651 const char __user *user_buf,
1652 size_t count, loff_t *ppos)
1653{
1654 struct iwl_trans *trans = file->private_data;
1655 char buf[8];
1656 int buf_size;
1657 int traffic_log;
1658
1659 memset(buf, 0, sizeof(buf));
1660 buf_size = min(count, sizeof(buf) - 1);
1661 if (copy_from_user(buf, user_buf, buf_size))
1662 return -EFAULT;
1663 if (sscanf(buf, "%d", &traffic_log) != 1)
1664 return -EFAULT;
1665 if (traffic_log == 0)
1666 iwl_reset_traffic_log(priv(trans));
1667
1668 return count;
1669}
1164 1670
1165 err = request_irq(priv->bus->irq, iwl_isr_ict, IRQF_SHARED, 1671static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1166 DRV_NAME, priv); 1672 char __user *user_buf,
1167 if (err) { 1673 size_t count, loff_t *ppos)
1168 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->bus->irq); 1674{
1169 iwl_free_isr_ict(priv); 1675 struct iwl_trans *trans = file->private_data;
1170 return err; 1676 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1677 struct iwl_priv *priv = priv(trans);
1678 struct iwl_tx_queue *txq;
1679 struct iwl_queue *q;
1680 char *buf;
1681 int pos = 0;
1682 int cnt;
1683 int ret;
1684 const size_t bufsz = sizeof(char) * 64 * hw_params(trans).max_txq_num;
1685
1686 if (!trans_pcie->txq) {
1687 IWL_ERR(priv, "txq not ready\n");
1688 return -EAGAIN;
1689 }
1690 buf = kzalloc(bufsz, GFP_KERNEL);
1691 if (!buf)
1692 return -ENOMEM;
1693
1694 for (cnt = 0; cnt < hw_params(trans).max_txq_num; cnt++) {
1695 txq = &trans_pcie->txq[cnt];
1696 q = &txq->q;
1697 pos += scnprintf(buf + pos, bufsz - pos,
1698 "hwq %.2d: read=%u write=%u stop=%d"
1699 " swq_id=%#.2x (ac %d/hwq %d)\n",
1700 cnt, q->read_ptr, q->write_ptr,
1701 !!test_bit(cnt, trans_pcie->queue_stopped),
1702 txq->swq_id, txq->swq_id & 3,
1703 (txq->swq_id >> 2) & 0x1f);
1704 if (cnt >= 4)
1705 continue;
1706 /* for the ACs, display the stop count too */
1707 pos += scnprintf(buf + pos, bufsz - pos,
1708 " stop-count: %d\n",
1709 atomic_read(&trans_pcie->queue_stop_count[cnt]));
1710 }
1711 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1712 kfree(buf);
1713 return ret;
1714}
1715
1716static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1717 char __user *user_buf,
1718 size_t count, loff_t *ppos) {
1719 struct iwl_trans *trans = file->private_data;
1720 struct iwl_trans_pcie *trans_pcie =
1721 IWL_TRANS_GET_PCIE_TRANS(trans);
1722 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1723 char buf[256];
1724 int pos = 0;
1725 const size_t bufsz = sizeof(buf);
1726
1727 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1728 rxq->read);
1729 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1730 rxq->write);
1731 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1732 rxq->free_count);
1733 if (rxq->rb_stts) {
1734 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1735 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1736 } else {
1737 pos += scnprintf(buf + pos, bufsz - pos,
1738 "closed_rb_num: Not Allocated\n");
1739 }
1740 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1741}
1742
1743static ssize_t iwl_dbgfs_log_event_read(struct file *file,
1744 char __user *user_buf,
1745 size_t count, loff_t *ppos)
1746{
1747 struct iwl_trans *trans = file->private_data;
1748 char *buf;
1749 int pos = 0;
1750 ssize_t ret = -ENOMEM;
1751
1752 ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
1753 if (buf) {
1754 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1755 kfree(buf);
1756 }
1757 return ret;
1758}
1759
1760static ssize_t iwl_dbgfs_log_event_write(struct file *file,
1761 const char __user *user_buf,
1762 size_t count, loff_t *ppos)
1763{
1764 struct iwl_trans *trans = file->private_data;
1765 u32 event_log_flag;
1766 char buf[8];
1767 int buf_size;
1768
1769 memset(buf, 0, sizeof(buf));
1770 buf_size = min(count, sizeof(buf) - 1);
1771 if (copy_from_user(buf, user_buf, buf_size))
1772 return -EFAULT;
1773 if (sscanf(buf, "%d", &event_log_flag) != 1)
1774 return -EFAULT;
1775 if (event_log_flag == 1)
1776 iwl_dump_nic_event_log(trans, true, NULL, false);
1777
1778 return count;
1779}
1780
1781static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1782 char __user *user_buf,
1783 size_t count, loff_t *ppos) {
1784
1785 struct iwl_trans *trans = file->private_data;
1786 struct iwl_trans_pcie *trans_pcie =
1787 IWL_TRANS_GET_PCIE_TRANS(trans);
1788 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1789
1790 int pos = 0;
1791 char *buf;
1792 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1793 ssize_t ret;
1794
1795 buf = kzalloc(bufsz, GFP_KERNEL);
1796 if (!buf) {
1797 IWL_ERR(trans, "Can not allocate Buffer\n");
1798 return -ENOMEM;
1799 }
1800
1801 pos += scnprintf(buf + pos, bufsz - pos,
1802 "Interrupt Statistics Report:\n");
1803
1804 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1805 isr_stats->hw);
1806 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1807 isr_stats->sw);
1808 if (isr_stats->sw || isr_stats->hw) {
1809 pos += scnprintf(buf + pos, bufsz - pos,
1810 "\tLast Restarting Code: 0x%X\n",
1811 isr_stats->err_code);
1812 }
1813#ifdef CONFIG_IWLWIFI_DEBUG
1814 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1815 isr_stats->sch);
1816 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1817 isr_stats->alive);
1818#endif
1819 pos += scnprintf(buf + pos, bufsz - pos,
1820 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1821
1822 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1823 isr_stats->ctkill);
1824
1825 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1826 isr_stats->wakeup);
1827
1828 pos += scnprintf(buf + pos, bufsz - pos,
1829 "Rx command responses:\t\t %u\n", isr_stats->rx);
1830
1831 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1832 isr_stats->tx);
1833
1834 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1835 isr_stats->unhandled);
1836
1837 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1838 kfree(buf);
1839 return ret;
1840}
1841
1842static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1843 const char __user *user_buf,
1844 size_t count, loff_t *ppos)
1845{
1846 struct iwl_trans *trans = file->private_data;
1847 struct iwl_trans_pcie *trans_pcie =
1848 IWL_TRANS_GET_PCIE_TRANS(trans);
1849 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1850
1851 char buf[8];
1852 int buf_size;
1853 u32 reset_flag;
1854
1855 memset(buf, 0, sizeof(buf));
1856 buf_size = min(count, sizeof(buf) - 1);
1857 if (copy_from_user(buf, user_buf, buf_size))
1858 return -EFAULT;
1859 if (sscanf(buf, "%x", &reset_flag) != 1)
1860 return -EFAULT;
1861 if (reset_flag == 0)
1862 memset(isr_stats, 0, sizeof(*isr_stats));
1863
1864 return count;
1865}
1866
1867static const char *get_csr_string(int cmd)
1868{
1869 switch (cmd) {
1870 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1871 IWL_CMD(CSR_INT_COALESCING);
1872 IWL_CMD(CSR_INT);
1873 IWL_CMD(CSR_INT_MASK);
1874 IWL_CMD(CSR_FH_INT_STATUS);
1875 IWL_CMD(CSR_GPIO_IN);
1876 IWL_CMD(CSR_RESET);
1877 IWL_CMD(CSR_GP_CNTRL);
1878 IWL_CMD(CSR_HW_REV);
1879 IWL_CMD(CSR_EEPROM_REG);
1880 IWL_CMD(CSR_EEPROM_GP);
1881 IWL_CMD(CSR_OTP_GP_REG);
1882 IWL_CMD(CSR_GIO_REG);
1883 IWL_CMD(CSR_GP_UCODE_REG);
1884 IWL_CMD(CSR_GP_DRIVER_REG);
1885 IWL_CMD(CSR_UCODE_DRV_GP1);
1886 IWL_CMD(CSR_UCODE_DRV_GP2);
1887 IWL_CMD(CSR_LED_REG);
1888 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1889 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1890 IWL_CMD(CSR_ANA_PLL_CFG);
1891 IWL_CMD(CSR_HW_REV_WA_REG);
1892 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1893 default:
1894 return "UNKNOWN";
1895 }
1896}
1897
1898void iwl_dump_csr(struct iwl_trans *trans)
1899{
1900 int i;
1901 static const u32 csr_tbl[] = {
1902 CSR_HW_IF_CONFIG_REG,
1903 CSR_INT_COALESCING,
1904 CSR_INT,
1905 CSR_INT_MASK,
1906 CSR_FH_INT_STATUS,
1907 CSR_GPIO_IN,
1908 CSR_RESET,
1909 CSR_GP_CNTRL,
1910 CSR_HW_REV,
1911 CSR_EEPROM_REG,
1912 CSR_EEPROM_GP,
1913 CSR_OTP_GP_REG,
1914 CSR_GIO_REG,
1915 CSR_GP_UCODE_REG,
1916 CSR_GP_DRIVER_REG,
1917 CSR_UCODE_DRV_GP1,
1918 CSR_UCODE_DRV_GP2,
1919 CSR_LED_REG,
1920 CSR_DRAM_INT_TBL_REG,
1921 CSR_GIO_CHICKEN_BITS,
1922 CSR_ANA_PLL_CFG,
1923 CSR_HW_REV_WA_REG,
1924 CSR_DBG_HPET_MEM_REG
1925 };
1926 IWL_ERR(trans, "CSR values:\n");
1927 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1928 "CSR_INT_PERIODIC_REG)\n");
1929 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1930 IWL_ERR(trans, " %25s: 0X%08x\n",
1931 get_csr_string(csr_tbl[i]),
1932 iwl_read32(bus(trans), csr_tbl[i]));
1171 } 1933 }
1934}
1935
1936static ssize_t iwl_dbgfs_csr_write(struct file *file,
1937 const char __user *user_buf,
1938 size_t count, loff_t *ppos)
1939{
1940 struct iwl_trans *trans = file->private_data;
1941 char buf[8];
1942 int buf_size;
1943 int csr;
1172 1944
1173 INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); 1945 memset(buf, 0, sizeof(buf));
1946 buf_size = min(count, sizeof(buf) - 1);
1947 if (copy_from_user(buf, user_buf, buf_size))
1948 return -EFAULT;
1949 if (sscanf(buf, "%d", &csr) != 1)
1950 return -EFAULT;
1174 1951
1952 iwl_dump_csr(trans);
1953
1954 return count;
1955}
1956
1957static const char *get_fh_string(int cmd)
1958{
1959 switch (cmd) {
1960 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1961 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1962 IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1963 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1964 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1965 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1966 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1967 IWL_CMD(FH_TSSR_TX_STATUS_REG);
1968 IWL_CMD(FH_TSSR_TX_ERROR_REG);
1969 default:
1970 return "UNKNOWN";
1971 }
1972}
1973
1974int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1975{
1976 int i;
1977#ifdef CONFIG_IWLWIFI_DEBUG
1978 int pos = 0;
1979 size_t bufsz = 0;
1980#endif
1981 static const u32 fh_tbl[] = {
1982 FH_RSCSR_CHNL0_STTS_WPTR_REG,
1983 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1984 FH_RSCSR_CHNL0_WPTR,
1985 FH_MEM_RCSR_CHNL0_CONFIG_REG,
1986 FH_MEM_RSSR_SHARED_CTRL_REG,
1987 FH_MEM_RSSR_RX_STATUS_REG,
1988 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1989 FH_TSSR_TX_STATUS_REG,
1990 FH_TSSR_TX_ERROR_REG
1991 };
1992#ifdef CONFIG_IWLWIFI_DEBUG
1993 if (display) {
1994 bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1995 *buf = kmalloc(bufsz, GFP_KERNEL);
1996 if (!*buf)
1997 return -ENOMEM;
1998 pos += scnprintf(*buf + pos, bufsz - pos,
1999 "FH register values:\n");
2000 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2001 pos += scnprintf(*buf + pos, bufsz - pos,
2002 " %34s: 0X%08x\n",
2003 get_fh_string(fh_tbl[i]),
2004 iwl_read_direct32(bus(trans), fh_tbl[i]));
2005 }
2006 return pos;
2007 }
2008#endif
2009 IWL_ERR(trans, "FH register values:\n");
2010 for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
2011 IWL_ERR(trans, " %34s: 0X%08x\n",
2012 get_fh_string(fh_tbl[i]),
2013 iwl_read_direct32(bus(trans), fh_tbl[i]));
2014 }
1175 return 0; 2015 return 0;
1176} 2016}
2017
2018static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2019 char __user *user_buf,
2020 size_t count, loff_t *ppos)
2021{
2022 struct iwl_trans *trans = file->private_data;
2023 char *buf;
2024 int pos = 0;
2025 ssize_t ret = -EFAULT;
2026
2027 ret = pos = iwl_dump_fh(trans, &buf, true);
2028 if (buf) {
2029 ret = simple_read_from_buffer(user_buf,
2030 count, ppos, buf, pos);
2031 kfree(buf);
2032 }
2033
2034 return ret;
2035}
2036
2037DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
2038DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2039DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2040DEBUGFS_READ_FILE_OPS(fh_reg);
2041DEBUGFS_READ_FILE_OPS(rx_queue);
2042DEBUGFS_READ_FILE_OPS(tx_queue);
2043DEBUGFS_WRITE_FILE_OPS(csr);
2044
2045/*
2046 * Create the debugfs files and directories
2047 *
2048 */
2049static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2050 struct dentry *dir)
2051{
2052 DEBUGFS_ADD_FILE(traffic_log, dir, S_IWUSR | S_IRUSR);
2053 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2054 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2055 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
2056 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2057 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2058 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2059 return 0;
2060}
2061#else
2062static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2063 struct dentry *dir)
2064{ return 0; }
2065
2066#endif /*CONFIG_IWLWIFI_DEBUGFS */
2067
2068const struct iwl_trans_ops trans_ops_pcie = {
2069 .alloc = iwl_trans_pcie_alloc,
2070 .request_irq = iwl_trans_pcie_request_irq,
2071 .start_device = iwl_trans_pcie_start_device,
2072 .prepare_card_hw = iwl_trans_pcie_prepare_card_hw,
2073 .stop_device = iwl_trans_pcie_stop_device,
2074
2075 .tx_start = iwl_trans_pcie_tx_start,
2076 .wake_any_queue = iwl_trans_pcie_wake_any_queue,
2077
2078 .send_cmd = iwl_trans_pcie_send_cmd,
2079 .send_cmd_pdu = iwl_trans_pcie_send_cmd_pdu,
2080
2081 .tx = iwl_trans_pcie_tx,
2082 .reclaim = iwl_trans_pcie_reclaim,
2083
2084 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
2085 .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
2086 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
2087
2088 .kick_nic = iwl_trans_pcie_kick_nic,
2089
2090 .free = iwl_trans_pcie_free,
2091 .stop_queue = iwl_trans_pcie_stop_queue,
2092
2093 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2094
2095 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2096 .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
2097
2098 .suspend = iwl_trans_pcie_suspend,
2099 .resume = iwl_trans_pcie_resume,
2100};
2101
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 7993aa7ae668..7a2daa886dfd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -63,163 +63,235 @@
63#ifndef __iwl_trans_h__ 63#ifndef __iwl_trans_h__
64#define __iwl_trans_h__ 64#define __iwl_trans_h__
65 65
66#include <linux/debugfs.h>
67#include <linux/skbuff.h>
68
69#include "iwl-shared.h"
70#include "iwl-commands.h"
71
66 /*This file includes the declaration that are exported from the transport 72 /*This file includes the declaration that are exported from the transport
67 * layer */ 73 * layer */
68 74
69struct iwl_priv; 75struct iwl_priv;
70struct iwl_rxon_context; 76struct iwl_rxon_context;
71struct iwl_host_cmd; 77struct iwl_host_cmd;
78struct iwl_shared;
79struct iwl_device_cmd;
72 80
73/** 81/**
74 * struct iwl_trans_ops - transport specific operations 82 * struct iwl_trans_ops - transport specific operations
83 * @alloc: allocates the meta data (not the queues themselves)
84 * @request_irq: requests IRQ - will be called before the FW load in probe flow
75 * @start_device: allocates and inits all the resources for the transport 85 * @start_device: allocates and inits all the resources for the transport
76 * layer. 86 * layer.
77 * @prepare_card_hw: claim the ownership on the HW. Will be called during 87 * @prepare_card_hw: claim the ownership on the HW. Will be called during
78 * probe. 88 * probe.
79 * @tx_start: starts and configures all the Tx fifo - usually done once the fw 89 * @tx_start: starts and configures all the Tx fifo - usually done once the fw
80 * is alive. 90 * is alive.
91 * @wake_any_queue: wake all the queues of a specfic context IWL_RXON_CTX_*
81 * @stop_device:stops the whole device (embedded CPU put to reset) 92 * @stop_device:stops the whole device (embedded CPU put to reset)
82 * @rx_free: frees the rx memory
83 * @tx_free: frees the tx memory
84 * @send_cmd:send a host command 93 * @send_cmd:send a host command
85 * @send_cmd_pdu:send a host command: flags can be CMD_* 94 * @send_cmd_pdu:send a host command: flags can be CMD_*
86 * @get_tx_cmd: returns a pointer to a new Tx cmd for the upper layer use
87 * @tx: send an skb 95 * @tx: send an skb
88 * @txq_agg_setup: setup a tx queue for AMPDU - will be called once the HW is 96 * @reclaim: free packet until ssn. Returns a list of freed packets.
97 * @tx_agg_alloc: allocate resources for a TX BA session
98 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
89 * ready and a successful ADDBA response has been received. 99 * ready and a successful ADDBA response has been received.
90 * @txq_agg_disable: de-configure a Tx queue to send AMPDUs 100 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
91 * @kick_nic: remove the RESET from the embedded CPU and let it run 101 * @kick_nic: remove the RESET from the embedded CPU and let it run
92 * @sync_irq: the upper layer will typically disable interrupt and call this
93 * handler. After this handler returns, it is guaranteed that all
94 * the ISR / tasklet etc... have finished running and the transport
95 * layer shall not pass any Rx.
96 * @free: release all the ressource for the transport layer itself such as 102 * @free: release all the ressource for the transport layer itself such as
97 * irq, tasklet etc... 103 * irq, tasklet etc...
104 * @stop_queue: stop a specific queue
105 * @check_stuck_queue: check if a specific queue is stuck
106 * @wait_tx_queue_empty: wait until all tx queues are empty
107 * @dbgfs_register: add the dbgfs files under this directory. Files will be
108 * automatically deleted.
109 * @suspend: stop the device unless WoWLAN is configured
110 * @resume: resume activity of the device
98 */ 111 */
99struct iwl_trans_ops { 112struct iwl_trans_ops {
100 113
101 int (*start_device)(struct iwl_priv *priv); 114 struct iwl_trans *(*alloc)(struct iwl_shared *shrd);
102 int (*prepare_card_hw)(struct iwl_priv *priv); 115 int (*request_irq)(struct iwl_trans *iwl_trans);
103 void (*stop_device)(struct iwl_priv *priv); 116 int (*start_device)(struct iwl_trans *trans);
104 void (*tx_start)(struct iwl_priv *priv); 117 int (*prepare_card_hw)(struct iwl_trans *trans);
105 void (*tx_free)(struct iwl_priv *priv); 118 void (*stop_device)(struct iwl_trans *trans);
106 void (*rx_free)(struct iwl_priv *priv); 119 void (*tx_start)(struct iwl_trans *trans);
107 120
108 int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 121 void (*wake_any_queue)(struct iwl_trans *trans, u8 ctx);
109 122
110 int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len, 123 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
124
125 int (*send_cmd_pdu)(struct iwl_trans *trans, u8 id, u32 flags, u16 len,
111 const void *data); 126 const void *data);
112 struct iwl_tx_cmd * (*get_tx_cmd)(struct iwl_priv *priv, int txq_id); 127 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
113 int (*tx)(struct iwl_priv *priv, struct sk_buff *skb, 128 struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id);
114 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, 129 void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
115 struct iwl_rxon_context *ctx); 130 int txq_id, int ssn, u32 status,
131 struct sk_buff_head *skbs);
132
133 int (*tx_agg_disable)(struct iwl_trans *trans,
134 enum iwl_rxon_context_id ctx, int sta_id,
135 int tid);
136 int (*tx_agg_alloc)(struct iwl_trans *trans,
137 enum iwl_rxon_context_id ctx, int sta_id, int tid,
138 u16 *ssn);
139 void (*tx_agg_setup)(struct iwl_trans *trans,
140 enum iwl_rxon_context_id ctx, int sta_id, int tid,
141 int frame_limit);
116 142
117 int (*txq_agg_disable)(struct iwl_priv *priv, u16 txq_id, 143 void (*kick_nic)(struct iwl_trans *trans);
118 u16 ssn_idx, u8 tx_fifo);
119 void (*txq_agg_setup)(struct iwl_priv *priv, int sta_id, int tid,
120 int frame_limit);
121 144
122 void (*kick_nic)(struct iwl_priv *priv); 145 void (*free)(struct iwl_trans *trans);
123 146
124 void (*sync_irq)(struct iwl_priv *priv); 147 void (*stop_queue)(struct iwl_trans *trans, int q);
125 void (*free)(struct iwl_priv *priv); 148
149 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
150 int (*check_stuck_queue)(struct iwl_trans *trans, int q);
151 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
152
153 int (*suspend)(struct iwl_trans *trans);
154 int (*resume)(struct iwl_trans *trans);
126}; 155};
127 156
157/**
158 * struct iwl_trans - transport common data
159 * @ops - pointer to iwl_trans_ops
160 * @shrd - pointer to iwl_shared which holds shared data from the upper layer
161 * @hcmd_lock: protects HCMD
162 */
128struct iwl_trans { 163struct iwl_trans {
129 const struct iwl_trans_ops *ops; 164 const struct iwl_trans_ops *ops;
130 struct iwl_priv *priv; 165 struct iwl_shared *shrd;
166 spinlock_t hcmd_lock;
167
168 /* pointer to trans specific struct */
169 /*Ensure that this pointer will always be aligned to sizeof pointer */
170 char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
131}; 171};
132 172
133static inline int trans_start_device(struct iwl_trans *trans) 173static inline int iwl_trans_request_irq(struct iwl_trans *trans)
134{ 174{
135 return trans->ops->start_device(trans->priv); 175 return trans->ops->request_irq(trans);
136} 176}
137 177
138static inline int trans_prepare_card_hw(struct iwl_trans *trans) 178static inline int iwl_trans_start_device(struct iwl_trans *trans)
139{ 179{
140 return trans->ops->prepare_card_hw(trans->priv); 180 return trans->ops->start_device(trans);
141} 181}
142 182
143static inline void trans_stop_device(struct iwl_trans *trans) 183static inline int iwl_trans_prepare_card_hw(struct iwl_trans *trans)
144{ 184{
145 trans->ops->stop_device(trans->priv); 185 return trans->ops->prepare_card_hw(trans);
146} 186}
147 187
148static inline void trans_tx_start(struct iwl_trans *trans) 188static inline void iwl_trans_stop_device(struct iwl_trans *trans)
149{ 189{
150 trans->ops->tx_start(trans->priv); 190 trans->ops->stop_device(trans);
151} 191}
152 192
153static inline void trans_rx_free(struct iwl_trans *trans) 193static inline void iwl_trans_tx_start(struct iwl_trans *trans)
154{ 194{
155 trans->ops->rx_free(trans->priv); 195 trans->ops->tx_start(trans);
156} 196}
157 197
158static inline void trans_tx_free(struct iwl_trans *trans) 198static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans, u8 ctx)
159{ 199{
160 trans->ops->tx_free(trans->priv); 200 trans->ops->wake_any_queue(trans, ctx);
161} 201}
162 202
163static inline int trans_send_cmd(struct iwl_trans *trans, 203
204static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
164 struct iwl_host_cmd *cmd) 205 struct iwl_host_cmd *cmd)
165{ 206{
166 return trans->ops->send_cmd(trans->priv, cmd); 207 return trans->ops->send_cmd(trans, cmd);
208}
209
210static inline int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
211 u32 flags, u16 len, const void *data)
212{
213 return trans->ops->send_cmd_pdu(trans, id, flags, len, data);
167} 214}
168 215
169static inline int trans_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags, 216static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
170 u16 len, const void *data) 217 struct iwl_device_cmd *dev_cmd, u8 ctx, u8 sta_id)
171{ 218{
172 return trans->ops->send_cmd_pdu(trans->priv, id, flags, len, data); 219 return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
173} 220}
174 221
175static inline struct iwl_tx_cmd *trans_get_tx_cmd(struct iwl_trans *trans, 222static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
176 int txq_id) 223 int tid, int txq_id, int ssn, u32 status,
224 struct sk_buff_head *skbs)
177{ 225{
178 return trans->ops->get_tx_cmd(trans->priv, txq_id); 226 trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
179} 227}
180 228
181static inline int trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 229static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
182 struct iwl_tx_cmd *tx_cmd, int txq_id, __le16 fc, bool ampdu, 230 enum iwl_rxon_context_id ctx,
183 struct iwl_rxon_context *ctx) 231 int sta_id, int tid)
184{ 232{
185 return trans->ops->tx(trans->priv, skb, tx_cmd, txq_id, fc, ampdu, ctx); 233 return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
186} 234}
187 235
188static inline int trans_txq_agg_disable(struct iwl_trans *trans, u16 txq_id, 236static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
189 u16 ssn_idx, u8 tx_fifo) 237 enum iwl_rxon_context_id ctx,
238 int sta_id, int tid, u16 *ssn)
190{ 239{
191 return trans->ops->txq_agg_disable(trans->priv, txq_id, 240 return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
192 ssn_idx, tx_fifo);
193} 241}
194 242
195static inline void trans_txq_agg_setup(struct iwl_trans *trans, int sta_id, 243
196 int tid, int frame_limit) 244static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
245 enum iwl_rxon_context_id ctx,
246 int sta_id, int tid,
247 int frame_limit)
197{ 248{
198 trans->ops->txq_agg_setup(trans->priv, sta_id, tid, frame_limit); 249 trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
199} 250}
200 251
201static inline void trans_kick_nic(struct iwl_trans *trans) 252static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
202{ 253{
203 trans->ops->kick_nic(trans->priv); 254 trans->ops->kick_nic(trans);
204} 255}
205 256
206static inline void trans_sync_irq(struct iwl_trans *trans) 257static inline void iwl_trans_free(struct iwl_trans *trans)
207{ 258{
208 trans->ops->sync_irq(trans->priv); 259 trans->ops->free(trans);
209} 260}
210 261
211static inline void trans_free(struct iwl_trans *trans) 262static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
212{ 263{
213 trans->ops->free(trans->priv); 264 trans->ops->stop_queue(trans, q);
214} 265}
215 266
216int iwl_trans_register(struct iwl_trans *trans, struct iwl_priv *priv); 267static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
268{
269 return trans->ops->wait_tx_queue_empty(trans);
270}
217 271
218/*TODO: this functions should NOT be exported from trans module - export it 272static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
219 * until the reclaim flow will be brought to the transport module too */ 273{
274 return trans->ops->check_stuck_queue(trans, q);
275}
276static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
277 struct dentry *dir)
278{
279 return trans->ops->dbgfs_register(trans, dir);
280}
281
282static inline int iwl_trans_suspend(struct iwl_trans *trans)
283{
284 return trans->ops->suspend(trans);
285}
286
287static inline int iwl_trans_resume(struct iwl_trans *trans)
288{
289 return trans->ops->resume(trans);
290}
220 291
221struct iwl_tx_queue; 292/*****************************************************
222void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, 293* Transport layers implementations
223 struct iwl_tx_queue *txq); 294******************************************************/
295extern const struct iwl_trans_ops trans_ops_pcie;
224 296
225#endif /* __iwl_trans_h__ */ 297#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 6d9204fef90b..f18df82eeb92 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -41,7 +41,6 @@
41#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */ 41#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
42 42
43MODULE_FIRMWARE("3826.arm"); 43MODULE_FIRMWARE("3826.arm");
44MODULE_ALIAS("stlc45xx");
45 44
46/* 45/*
47 * gpios should be handled in board files and provided via platform data, 46 * gpios should be handled in board files and provided via platform data,
@@ -738,3 +737,4 @@ MODULE_LICENSE("GPL");
738MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); 737MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
739MODULE_ALIAS("spi:cx3110x"); 738MODULE_ALIAS("spi:cx3110x");
740MODULE_ALIAS("spi:p54spi"); 739MODULE_ALIAS("spi:p54spi");
740MODULE_ALIAS("spi:stlc45xx");
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 44a3bd4b0f43..2b97a89e7ff8 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/firmware.h> 20#include <linux/firmware.h>
21#include <linux/etherdevice.h> 21#include <linux/etherdevice.h>
22#include <asm/div64.h>
22 23
23#include <net/mac80211.h> 24#include <net/mac80211.h>
24 25
@@ -582,10 +583,13 @@ static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb)
582 if (chan) { 583 if (chan) {
583 struct survey_info *survey = &priv->survey[chan->hw_value]; 584 struct survey_info *survey = &priv->survey[chan->hw_value];
584 survey->noise = clamp_t(s8, priv->noise, -128, 127); 585 survey->noise = clamp_t(s8, priv->noise, -128, 127);
585 survey->channel_time = priv->survey_raw.active / 1024; 586 survey->channel_time = priv->survey_raw.active;
586 survey->channel_time_tx = priv->survey_raw.tx / 1024; 587 survey->channel_time_tx = priv->survey_raw.tx;
587 survey->channel_time_busy = priv->survey_raw.cca / 1024 + 588 survey->channel_time_busy = priv->survey_raw.tx +
588 survey->channel_time_tx; 589 priv->survey_raw.cca;
590 do_div(survey->channel_time, 1024);
591 do_div(survey->channel_time_tx, 1024);
592 do_div(survey->channel_time_busy, 1024);
589 } 593 }
590 594
591 tmp = p54_find_and_unlink_skb(priv, hdr->req_id); 595 tmp = p54_find_and_unlink_skb(priv, hdr->req_id);
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index 7e33f1f4f3d4..e047594794aa 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -46,6 +46,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
46 goto out; 46 goto out;
47 } 47 }
48 48
49 wake_up->role_id = wl->role_id;
49 wake_up->wake_up_event = wl->conf.conn.wake_up_event; 50 wake_up->wake_up_event = wl->conf.conn.wake_up_event;
50 wake_up->listen_interval = wl->conf.conn.listen_interval; 51 wake_up->listen_interval = wl->conf.conn.listen_interval;
51 52
@@ -101,6 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
101 goto out; 102 goto out;
102 } 103 }
103 104
105 acx->role_id = wl->role_id;
104 acx->current_tx_power = power * 10; 106 acx->current_tx_power = power * 10;
105 107
106 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 108 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -128,6 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
128 } 130 }
129 131
130 /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ 132 /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
133 feature->role_id = wl->role_id;
131 feature->data_flow_options = 0; 134 feature->data_flow_options = 0;
132 feature->options = 0; 135 feature->options = 0;
133 136
@@ -183,34 +186,6 @@ out:
183 return ret; 186 return ret;
184} 187}
185 188
186int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
187{
188 struct acx_rx_config *rx_config;
189 int ret;
190
191 wl1271_debug(DEBUG_ACX, "acx rx config");
192
193 rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
194 if (!rx_config) {
195 ret = -ENOMEM;
196 goto out;
197 }
198
199 rx_config->config_options = cpu_to_le32(config);
200 rx_config->filter_options = cpu_to_le32(filter);
201
202 ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
203 rx_config, sizeof(*rx_config));
204 if (ret < 0) {
205 wl1271_warning("failed to set rx config: %d", ret);
206 goto out;
207 }
208
209out:
210 kfree(rx_config);
211 return ret;
212}
213
214int wl1271_acx_pd_threshold(struct wl1271 *wl) 189int wl1271_acx_pd_threshold(struct wl1271 *wl)
215{ 190{
216 struct acx_packet_detection *pd; 191 struct acx_packet_detection *pd;
@@ -250,6 +225,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
250 goto out; 225 goto out;
251 } 226 }
252 227
228 slot->role_id = wl->role_id;
253 slot->wone_index = STATION_WONE_INDEX; 229 slot->wone_index = STATION_WONE_INDEX;
254 slot->slot_time = slot_time; 230 slot->slot_time = slot_time;
255 231
@@ -279,6 +255,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
279 } 255 }
280 256
281 /* MAC filtering */ 257 /* MAC filtering */
258 acx->role_id = wl->role_id;
282 acx->enabled = enable; 259 acx->enabled = enable;
283 acx->num_groups = mc_list_len; 260 acx->num_groups = mc_list_len;
284 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); 261 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -308,6 +285,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
308 285
309 wl1271_debug(DEBUG_ACX, "acx service period timeout"); 286 wl1271_debug(DEBUG_ACX, "acx service period timeout");
310 287
288 rx_timeout->role_id = wl->role_id;
311 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); 289 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
312 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); 290 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
313 291
@@ -344,6 +322,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
344 goto out; 322 goto out;
345 } 323 }
346 324
325 rts->role_id = wl->role_id;
347 rts->threshold = cpu_to_le16((u16)rts_threshold); 326 rts->threshold = cpu_to_le16((u16)rts_threshold);
348 327
349 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); 328 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -403,6 +382,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
403 goto out; 382 goto out;
404 } 383 }
405 384
385 beacon_filter->role_id = wl->role_id;
406 beacon_filter->enable = enable_filter; 386 beacon_filter->enable = enable_filter;
407 387
408 /* 388 /*
@@ -439,6 +419,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
439 } 419 }
440 420
441 /* configure default beacon pass-through rules */ 421 /* configure default beacon pass-through rules */
422 ie_table->role_id = wl->role_id;
442 ie_table->num_ie = 0; 423 ie_table->num_ie = 0;
443 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { 424 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
444 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); 425 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -500,6 +481,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
500 timeout = wl->conf.conn.bss_lose_timeout; 481 timeout = wl->conf.conn.bss_lose_timeout;
501 } 482 }
502 483
484 acx->role_id = wl->role_id;
503 acx->synch_fail_thold = cpu_to_le32(threshold); 485 acx->synch_fail_thold = cpu_to_le32(threshold);
504 acx->bss_lose_timeout = cpu_to_le32(timeout); 486 acx->bss_lose_timeout = cpu_to_le32(timeout);
505 487
@@ -546,43 +528,13 @@ out:
546 return ret; 528 return ret;
547} 529}
548 530
549int wl1271_acx_sta_sg_cfg(struct wl1271 *wl) 531int wl12xx_acx_sg_cfg(struct wl1271 *wl)
550{
551 struct acx_sta_bt_wlan_coex_param *param;
552 struct conf_sg_settings *c = &wl->conf.sg;
553 int i, ret;
554
555 wl1271_debug(DEBUG_ACX, "acx sg sta cfg");
556
557 param = kzalloc(sizeof(*param), GFP_KERNEL);
558 if (!param) {
559 ret = -ENOMEM;
560 goto out;
561 }
562
563 /* BT-WLAN coext parameters */
564 for (i = 0; i < CONF_SG_STA_PARAMS_MAX; i++)
565 param->params[i] = cpu_to_le32(c->sta_params[i]);
566 param->param_idx = CONF_SG_PARAMS_ALL;
567
568 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
569 if (ret < 0) {
570 wl1271_warning("failed to set sg config: %d", ret);
571 goto out;
572 }
573
574out:
575 kfree(param);
576 return ret;
577}
578
579int wl1271_acx_ap_sg_cfg(struct wl1271 *wl)
580{ 532{
581 struct acx_ap_bt_wlan_coex_param *param; 533 struct acx_bt_wlan_coex_param *param;
582 struct conf_sg_settings *c = &wl->conf.sg; 534 struct conf_sg_settings *c = &wl->conf.sg;
583 int i, ret; 535 int i, ret;
584 536
585 wl1271_debug(DEBUG_ACX, "acx sg ap cfg"); 537 wl1271_debug(DEBUG_ACX, "acx sg cfg");
586 538
587 param = kzalloc(sizeof(*param), GFP_KERNEL); 539 param = kzalloc(sizeof(*param), GFP_KERNEL);
588 if (!param) { 540 if (!param) {
@@ -591,8 +543,8 @@ int wl1271_acx_ap_sg_cfg(struct wl1271 *wl)
591 } 543 }
592 544
593 /* BT-WLAN coext parameters */ 545 /* BT-WLAN coext parameters */
594 for (i = 0; i < CONF_SG_AP_PARAMS_MAX; i++) 546 for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
595 param->params[i] = cpu_to_le32(c->ap_params[i]); 547 param->params[i] = cpu_to_le32(c->params[i]);
596 param->param_idx = CONF_SG_PARAMS_ALL; 548 param->param_idx = CONF_SG_PARAMS_ALL;
597 549
598 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 550 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
@@ -647,6 +599,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
647 goto out; 599 goto out;
648 } 600 }
649 601
602 bb->role_id = wl->role_id;
650 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); 603 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
651 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); 604 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
652 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; 605 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -676,6 +629,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
676 goto out; 629 goto out;
677 } 630 }
678 631
632 acx_aid->role_id = wl->role_id;
679 acx_aid->aid = cpu_to_le16(aid); 633 acx_aid->aid = cpu_to_le16(aid);
680 634
681 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); 635 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -731,6 +685,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
731 goto out; 685 goto out;
732 } 686 }
733 687
688 acx->role_id = wl->role_id;
734 acx->preamble = preamble; 689 acx->preamble = preamble;
735 690
736 ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); 691 ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -758,6 +713,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
758 goto out; 713 goto out;
759 } 714 }
760 715
716 acx->role_id = wl->role_id;
761 acx->ctsprotect = ctsprotect; 717 acx->ctsprotect = ctsprotect;
762 718
763 ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); 719 ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -789,9 +745,8 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
789 745
790int wl1271_acx_sta_rate_policies(struct wl1271 *wl) 746int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
791{ 747{
792 struct acx_sta_rate_policy *acx; 748 struct acx_rate_policy *acx;
793 struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; 749 struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
794 int idx = 0;
795 int ret = 0; 750 int ret = 0;
796 751
797 wl1271_debug(DEBUG_ACX, "acx rate policies"); 752 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -803,25 +758,30 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
803 goto out; 758 goto out;
804 } 759 }
805 760
761 wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
762 wl->basic_rate, wl->rate_set);
763
806 /* configure one basic rate class */ 764 /* configure one basic rate class */
807 idx = ACX_TX_BASIC_RATE; 765 acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
808 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate); 766 acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
809 acx->rate_class[idx].short_retry_limit = c->short_retry_limit; 767 acx->rate_policy.short_retry_limit = c->short_retry_limit;
810 acx->rate_class[idx].long_retry_limit = c->long_retry_limit; 768 acx->rate_policy.long_retry_limit = c->long_retry_limit;
811 acx->rate_class[idx].aflags = c->aflags; 769 acx->rate_policy.aflags = c->aflags;
770
771 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
772 if (ret < 0) {
773 wl1271_warning("Setting of rate policies failed: %d", ret);
774 goto out;
775 }
812 776
813 /* configure one AP supported rate class */ 777 /* configure one AP supported rate class */
814 idx = ACX_TX_AP_FULL_RATE; 778 acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
815 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set); 779 acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
816 acx->rate_class[idx].short_retry_limit = c->short_retry_limit; 780 acx->rate_policy.short_retry_limit = c->short_retry_limit;
817 acx->rate_class[idx].long_retry_limit = c->long_retry_limit; 781 acx->rate_policy.long_retry_limit = c->long_retry_limit;
818 acx->rate_class[idx].aflags = c->aflags; 782 acx->rate_policy.aflags = c->aflags;
819 783
820 acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
821 784
822 wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
823 acx->rate_class[ACX_TX_BASIC_RATE].enabled_rates,
824 acx->rate_class[ACX_TX_AP_FULL_RATE].enabled_rates);
825 785
826 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 786 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
827 if (ret < 0) { 787 if (ret < 0) {
@@ -837,7 +797,7 @@ out:
837int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, 797int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
838 u8 idx) 798 u8 idx)
839{ 799{
840 struct acx_ap_rate_policy *acx; 800 struct acx_rate_policy *acx;
841 int ret = 0; 801 int ret = 0;
842 802
843 wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x", 803 wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x",
@@ -883,6 +843,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
883 goto out; 843 goto out;
884 } 844 }
885 845
846 acx->role_id = wl->role_id;
886 acx->ac = ac; 847 acx->ac = ac;
887 acx->cw_min = cw_min; 848 acx->cw_min = cw_min;
888 acx->cw_max = cpu_to_le16(cw_max); 849 acx->cw_max = cpu_to_le16(cw_max);
@@ -916,6 +877,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
916 goto out; 877 goto out;
917 } 878 }
918 879
880 acx->role_id = wl->role_id;
919 acx->queue_id = queue_id; 881 acx->queue_id = queue_id;
920 acx->channel_type = channel_type; 882 acx->channel_type = channel_type;
921 acx->tsid = tsid; 883 acx->tsid = tsid;
@@ -995,52 +957,9 @@ out:
995 return ret; 957 return ret;
996} 958}
997 959
998int wl1271_acx_ap_mem_cfg(struct wl1271 *wl) 960int wl12xx_acx_mem_cfg(struct wl1271 *wl)
999{
1000 struct wl1271_acx_ap_config_memory *mem_conf;
1001 struct conf_memory_settings *mem;
1002 int ret;
1003
1004 wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
1005
1006 mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
1007 if (!mem_conf) {
1008 ret = -ENOMEM;
1009 goto out;
1010 }
1011
1012 if (wl->chip.id == CHIP_ID_1283_PG20)
1013 /*
1014 * FIXME: The 128x AP FW does not yet support dynamic memory.
1015 * Use the base memory configuration for 128x for now. This
1016 * should be fine tuned in the future.
1017 */
1018 mem = &wl->conf.mem_wl128x;
1019 else
1020 mem = &wl->conf.mem_wl127x;
1021
1022 /* memory config */
1023 mem_conf->num_stations = mem->num_stations;
1024 mem_conf->rx_mem_block_num = mem->rx_block_num;
1025 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
1026 mem_conf->num_ssid_profiles = mem->ssid_profiles;
1027 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
1028
1029 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
1030 sizeof(*mem_conf));
1031 if (ret < 0) {
1032 wl1271_warning("wl1271 mem config failed: %d", ret);
1033 goto out;
1034 }
1035
1036out:
1037 kfree(mem_conf);
1038 return ret;
1039}
1040
1041int wl1271_acx_sta_mem_cfg(struct wl1271 *wl)
1042{ 961{
1043 struct wl1271_acx_sta_config_memory *mem_conf; 962 struct wl12xx_acx_config_memory *mem_conf;
1044 struct conf_memory_settings *mem; 963 struct conf_memory_settings *mem;
1045 int ret; 964 int ret;
1046 965
@@ -1183,6 +1102,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1183 goto out; 1102 goto out;
1184 } 1103 }
1185 1104
1105 acx->role_id = wl->role_id;
1186 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; 1106 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
1187 acx->max_consecutive = wl->conf.conn.bet_max_consecutive; 1107 acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
1188 1108
@@ -1210,6 +1130,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
1210 goto out; 1130 goto out;
1211 } 1131 }
1212 1132
1133 acx->role_id = wl->role_id;
1213 acx->version = ACX_IPV4_VERSION; 1134 acx->version = ACX_IPV4_VERSION;
1214 acx->enable = enable; 1135 acx->enable = enable;
1215 1136
@@ -1269,6 +1190,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
1269 goto out; 1190 goto out;
1270 } 1191 }
1271 1192
1193 acx->role_id = wl->role_id;
1272 acx->enabled = enable; 1194 acx->enabled = enable;
1273 1195
1274 ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx)); 1196 ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1295,6 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
1295 goto out; 1217 goto out;
1296 } 1218 }
1297 1219
1220 acx->role_id = wl->role_id;
1298 acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval); 1221 acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
1299 acx->index = index; 1222 acx->index = index;
1300 acx->tpl_validation = tpl_valid; 1223 acx->tpl_validation = tpl_valid;
@@ -1328,6 +1251,7 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1328 1251
1329 wl->last_rssi_event = -1; 1252 wl->last_rssi_event = -1;
1330 1253
1254 acx->role_id = wl->role_id;
1331 acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing); 1255 acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
1332 acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON; 1256 acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
1333 acx->type = WL1271_ACX_TRIG_TYPE_EDGE; 1257 acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1366,6 +1290,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
1366 goto out; 1290 goto out;
1367 } 1291 }
1368 1292
1293 acx->role_id = wl->role_id;
1369 acx->rssi_beacon = c->avg_weight_rssi_beacon; 1294 acx->rssi_beacon = c->avg_weight_rssi_beacon;
1370 acx->rssi_data = c->avg_weight_rssi_data; 1295 acx->rssi_data = c->avg_weight_rssi_data;
1371 acx->snr_beacon = c->avg_weight_snr_beacon; 1296 acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1384,14 +1309,15 @@ out:
1384 1309
1385int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, 1310int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1386 struct ieee80211_sta_ht_cap *ht_cap, 1311 struct ieee80211_sta_ht_cap *ht_cap,
1387 bool allow_ht_operation) 1312 bool allow_ht_operation, u8 hlid)
1388{ 1313{
1389 struct wl1271_acx_ht_capabilities *acx; 1314 struct wl1271_acx_ht_capabilities *acx;
1390 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1391 int ret = 0; 1315 int ret = 0;
1392 u32 ht_capabilites = 0; 1316 u32 ht_capabilites = 0;
1393 1317
1394 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting"); 1318 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting "
1319 "sta supp: %d sta cap: %d", ht_cap->ht_supported,
1320 ht_cap->cap);
1395 1321
1396 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 1322 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1397 if (!acx) { 1323 if (!acx) {
@@ -1399,26 +1325,22 @@ int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1399 goto out; 1325 goto out;
1400 } 1326 }
1401 1327
1402 /* Allow HT Operation ? */ 1328 if (allow_ht_operation && ht_cap->ht_supported) {
1403 if (allow_ht_operation) { 1329 /* no need to translate capabilities - use the spec values */
1404 ht_capabilites = 1330 ht_capabilites = ht_cap->cap;
1405 WL1271_ACX_FW_CAP_HT_OPERATION; 1331
1406 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD) 1332 /*
1407 ht_capabilites |= 1333 * this bit is not employed by the spec but only by FW to
1408 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT; 1334 * indicate peer HT support
1409 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 1335 */
1410 ht_capabilites |= 1336 ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
1411 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
1412 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
1413 ht_capabilites |=
1414 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
1415 1337
1416 /* get data from A-MPDU parameters field */ 1338 /* get data from A-MPDU parameters field */
1417 acx->ampdu_max_length = ht_cap->ampdu_factor; 1339 acx->ampdu_max_length = ht_cap->ampdu_factor;
1418 acx->ampdu_min_spacing = ht_cap->ampdu_density; 1340 acx->ampdu_min_spacing = ht_cap->ampdu_density;
1419 } 1341 }
1420 1342
1421 memcpy(acx->mac_address, mac_address, ETH_ALEN); 1343 acx->hlid = hlid;
1422 acx->ht_capabilites = cpu_to_le32(ht_capabilites); 1344 acx->ht_capabilites = cpu_to_le32(ht_capabilites);
1423 1345
1424 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); 1346 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
@@ -1446,6 +1368,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
1446 goto out; 1368 goto out;
1447 } 1369 }
1448 1370
1371 acx->role_id = wl->role_id;
1449 acx->ht_protection = 1372 acx->ht_protection =
1450 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); 1373 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
1451 acx->rifs_mode = 0; 1374 acx->rifs_mode = 0;
@@ -1467,14 +1390,12 @@ out:
1467} 1390}
1468 1391
1469/* Configure BA session initiator/receiver parameters setting in the FW. */ 1392/* Configure BA session initiator/receiver parameters setting in the FW. */
1470int wl1271_acx_set_ba_session(struct wl1271 *wl, 1393int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
1471 enum ieee80211_back_parties direction,
1472 u8 tid_index, u8 policy)
1473{ 1394{
1474 struct wl1271_acx_ba_session_policy *acx; 1395 struct wl1271_acx_ba_initiator_policy *acx;
1475 int ret; 1396 int ret;
1476 1397
1477 wl1271_debug(DEBUG_ACX, "acx ba session setting"); 1398 wl1271_debug(DEBUG_ACX, "acx ba initiator policy");
1478 1399
1479 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 1400 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1480 if (!acx) { 1401 if (!acx) {
@@ -1482,33 +1403,18 @@ int wl1271_acx_set_ba_session(struct wl1271 *wl,
1482 goto out; 1403 goto out;
1483 } 1404 }
1484 1405
1485 /* ANY role */ 1406 /* set for the current role */
1486 acx->role_id = 0xff; 1407 acx->role_id = wl->role_id;
1487 acx->tid = tid_index; 1408 acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
1488 acx->enable = policy; 1409 acx->win_size = wl->conf.ht.tx_ba_win_size;
1489 acx->ba_direction = direction; 1410 acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
1490
1491 switch (direction) {
1492 case WLAN_BACK_INITIATOR:
1493 acx->win_size = wl->conf.ht.tx_ba_win_size;
1494 acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
1495 break;
1496 case WLAN_BACK_RECIPIENT:
1497 acx->win_size = RX_BA_WIN_SIZE;
1498 acx->inactivity_timeout = 0;
1499 break;
1500 default:
1501 wl1271_error("Incorrect acx command id=%x\n", direction);
1502 ret = -EINVAL;
1503 goto out;
1504 }
1505 1411
1506 ret = wl1271_cmd_configure(wl, 1412 ret = wl1271_cmd_configure(wl,
1507 ACX_BA_SESSION_POLICY_CFG, 1413 ACX_BA_SESSION_INIT_POLICY,
1508 acx, 1414 acx,
1509 sizeof(*acx)); 1415 sizeof(*acx));
1510 if (ret < 0) { 1416 if (ret < 0) {
1511 wl1271_warning("acx ba session setting failed: %d", ret); 1417 wl1271_warning("acx ba initiator policy failed: %d", ret);
1512 goto out; 1418 goto out;
1513 } 1419 }
1514 1420
@@ -1518,8 +1424,8 @@ out:
1518} 1424}
1519 1425
1520/* setup BA session receiver setting in the FW. */ 1426/* setup BA session receiver setting in the FW. */
1521int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, 1427int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
1522 bool enable) 1428 u16 ssn, bool enable, u8 peer_hlid)
1523{ 1429{
1524 struct wl1271_acx_ba_receiver_setup *acx; 1430 struct wl1271_acx_ba_receiver_setup *acx;
1525 int ret; 1431 int ret;
@@ -1532,11 +1438,10 @@ int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1532 goto out; 1438 goto out;
1533 } 1439 }
1534 1440
1535 /* Single link for now */ 1441 acx->hlid = peer_hlid;
1536 acx->link_id = 1;
1537 acx->tid = tid_index; 1442 acx->tid = tid_index;
1538 acx->enable = enable; 1443 acx->enable = enable;
1539 acx->win_size = 0; 1444 acx->win_size = wl->conf.ht.rx_ba_win_size;
1540 acx->ssn = ssn; 1445 acx->ssn = ssn;
1541 1446
1542 ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx, 1447 ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx,
@@ -1606,6 +1511,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
1606 if (!(conf_queues & BIT(i))) 1511 if (!(conf_queues & BIT(i)))
1607 continue; 1512 continue;
1608 1513
1514 rx_streaming->role_id = wl->role_id;
1609 rx_streaming->tid = i; 1515 rx_streaming->tid = i;
1610 rx_streaming->enable = enable_queues & BIT(i); 1516 rx_streaming->enable = enable_queues & BIT(i);
1611 rx_streaming->period = wl->conf.rx_streaming.interval; 1517 rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1635,6 +1541,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
1635 if (!acx) 1541 if (!acx)
1636 return -ENOMEM; 1542 return -ENOMEM;
1637 1543
1544 acx->role_id = wl->role_id;
1638 acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries); 1545 acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
1639 1546
1640 ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); 1547 ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1703,31 +1610,6 @@ out:
1703 return ret; 1610 return ret;
1704} 1611}
1705 1612
1706int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable)
1707{
1708 struct acx_ap_beacon_filter *acx = NULL;
1709 int ret;
1710
1711 wl1271_debug(DEBUG_ACX, "acx set ap beacon filter: %d", enable);
1712
1713 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1714 if (!acx)
1715 return -ENOMEM;
1716
1717 acx->enable = enable ? 1 : 0;
1718
1719 ret = wl1271_cmd_configure(wl, ACX_AP_BEACON_FILTER_OPT,
1720 acx, sizeof(*acx));
1721 if (ret < 0) {
1722 wl1271_warning("acx set ap beacon filter failed: %d", ret);
1723 goto out;
1724 }
1725
1726out:
1727 kfree(acx);
1728 return ret;
1729}
1730
1731int wl1271_acx_fm_coex(struct wl1271 *wl) 1613int wl1271_acx_fm_coex(struct wl1271 *wl)
1732{ 1614{
1733 struct wl1271_acx_fm_coex *acx; 1615 struct wl1271_acx_fm_coex *acx;
@@ -1767,3 +1649,45 @@ out:
1767 kfree(acx); 1649 kfree(acx);
1768 return ret; 1650 return ret;
1769} 1651}
1652
1653int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl)
1654{
1655 struct wl12xx_acx_set_rate_mgmt_params *acx = NULL;
1656 struct conf_rate_policy_settings *conf = &wl->conf.rate;
1657 int ret;
1658
1659 wl1271_debug(DEBUG_ACX, "acx set rate mgmt params");
1660
1661 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1662 if (!acx)
1663 return -ENOMEM;
1664
1665 acx->index = ACX_RATE_MGMT_ALL_PARAMS;
1666 acx->rate_retry_score = cpu_to_le16(conf->rate_retry_score);
1667 acx->per_add = cpu_to_le16(conf->per_add);
1668 acx->per_th1 = cpu_to_le16(conf->per_th1);
1669 acx->per_th2 = cpu_to_le16(conf->per_th2);
1670 acx->max_per = cpu_to_le16(conf->max_per);
1671 acx->inverse_curiosity_factor = conf->inverse_curiosity_factor;
1672 acx->tx_fail_low_th = conf->tx_fail_low_th;
1673 acx->tx_fail_high_th = conf->tx_fail_high_th;
1674 acx->per_alpha_shift = conf->per_alpha_shift;
1675 acx->per_add_shift = conf->per_add_shift;
1676 acx->per_beta1_shift = conf->per_beta1_shift;
1677 acx->per_beta2_shift = conf->per_beta2_shift;
1678 acx->rate_check_up = conf->rate_check_up;
1679 acx->rate_check_down = conf->rate_check_down;
1680 memcpy(acx->rate_retry_policy, conf->rate_retry_policy,
1681 sizeof(acx->rate_retry_policy));
1682
1683 ret = wl1271_cmd_configure(wl, ACX_SET_RATE_MGMT_PARAMS,
1684 acx, sizeof(*acx));
1685 if (ret < 0) {
1686 wl1271_warning("acx set rate mgmt params failed: %d", ret);
1687 goto out;
1688 }
1689
1690out:
1691 kfree(acx);
1692 return ret;
1693}
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index d2eb86eccc04..758c596f62f6 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -101,6 +101,17 @@ struct acx_error_counter {
101 __le32 seq_num_miss; 101 __le32 seq_num_miss;
102} __packed; 102} __packed;
103 103
104enum wl12xx_role {
105 WL1271_ROLE_STA = 0,
106 WL1271_ROLE_IBSS,
107 WL1271_ROLE_AP,
108 WL1271_ROLE_DEVICE,
109 WL1271_ROLE_P2P_CL,
110 WL1271_ROLE_P2P_GO,
111
112 WL12XX_INVALID_ROLE_TYPE = 0xff
113};
114
104enum wl1271_psm_mode { 115enum wl1271_psm_mode {
105 /* Active mode */ 116 /* Active mode */
106 WL1271_PSM_CAM = 0, 117 WL1271_PSM_CAM = 0,
@@ -160,94 +171,6 @@ struct acx_rx_msdu_lifetime {
160 __le32 lifetime; 171 __le32 lifetime;
161} __packed; 172} __packed;
162 173
163/*
164 * RX Config Options Table
165 * Bit Definition
166 * === ==========
167 * 31:14 Reserved
168 * 13 Copy RX Status - when set, write three receive status words
169 * to top of rx'd MPDUs.
170 * When cleared, do not write three status words (added rev 1.5)
171 * 12 Reserved
172 * 11 RX Complete upon FCS error - when set, give rx complete
173 * interrupt for FCS errors, after the rx filtering, e.g. unicast
174 * frames not to us with FCS error will not generate an interrupt.
175 * 10 SSID Filter Enable - When set, the WiLink discards all beacon,
176 * probe request, and probe response frames with an SSID that does
177 * not match the SSID specified by the host in the START/JOIN
178 * command.
179 * When clear, the WiLink receives frames with any SSID.
180 * 9 Broadcast Filter Enable - When set, the WiLink discards all
181 * broadcast frames. When clear, the WiLink receives all received
182 * broadcast frames.
183 * 8:6 Reserved
184 * 5 BSSID Filter Enable - When set, the WiLink discards any frames
185 * with a BSSID that does not match the BSSID specified by the
186 * host.
187 * When clear, the WiLink receives frames from any BSSID.
188 * 4 MAC Addr Filter - When set, the WiLink discards any frames
189 * with a destination address that does not match the MAC address
190 * of the adaptor.
191 * When clear, the WiLink receives frames destined to any MAC
192 * address.
193 * 3 Promiscuous - When set, the WiLink receives all valid frames
194 * (i.e., all frames that pass the FCS check).
195 * When clear, only frames that pass the other filters specified
196 * are received.
197 * 2 FCS - When set, the WiLink includes the FCS with the received
198 * frame.
199 * When cleared, the FCS is discarded.
200 * 1 PLCP header - When set, write all data from baseband to frame
201 * buffer including PHY header.
202 * 0 Reserved - Always equal to 0.
203 *
204 * RX Filter Options Table
205 * Bit Definition
206 * === ==========
207 * 31:12 Reserved - Always equal to 0.
208 * 11 Association - When set, the WiLink receives all association
209 * related frames (association request/response, reassocation
210 * request/response, and disassociation). When clear, these frames
211 * are discarded.
212 * 10 Auth/De auth - When set, the WiLink receives all authentication
213 * and de-authentication frames. When clear, these frames are
214 * discarded.
215 * 9 Beacon - When set, the WiLink receives all beacon frames.
216 * When clear, these frames are discarded.
217 * 8 Contention Free - When set, the WiLink receives all contention
218 * free frames.
219 * When clear, these frames are discarded.
220 * 7 Control - When set, the WiLink receives all control frames.
221 * When clear, these frames are discarded.
222 * 6 Data - When set, the WiLink receives all data frames.
223 * When clear, these frames are discarded.
224 * 5 FCS Error - When set, the WiLink receives frames that have FCS
225 * errors.
226 * When clear, these frames are discarded.
227 * 4 Management - When set, the WiLink receives all management
228 * frames.
229 * When clear, these frames are discarded.
230 * 3 Probe Request - When set, the WiLink receives all probe request
231 * frames.
232 * When clear, these frames are discarded.
233 * 2 Probe Response - When set, the WiLink receives all probe
234 * response frames.
235 * When clear, these frames are discarded.
236 * 1 RTS/CTS/ACK - When set, the WiLink receives all RTS, CTS and ACK
237 * frames.
238 * When clear, these frames are discarded.
239 * 0 Rsvd Type/Sub Type - When set, the WiLink receives all frames
240 * that have reserved frame types and sub types as defined by the
241 * 802.11 specification.
242 * When clear, these frames are discarded.
243 */
244struct acx_rx_config {
245 struct acx_header header;
246
247 __le32 config_options;
248 __le32 filter_options;
249} __packed;
250
251struct acx_packet_detection { 174struct acx_packet_detection {
252 struct acx_header header; 175 struct acx_header header;
253 176
@@ -267,9 +190,10 @@ enum acx_slot_type {
267struct acx_slot { 190struct acx_slot {
268 struct acx_header header; 191 struct acx_header header;
269 192
193 u8 role_id;
270 u8 wone_index; /* Reserved */ 194 u8 wone_index; /* Reserved */
271 u8 slot_time; 195 u8 slot_time;
272 u8 reserved[6]; 196 u8 reserved[5];
273} __packed; 197} __packed;
274 198
275 199
@@ -279,29 +203,35 @@ struct acx_slot {
279struct acx_dot11_grp_addr_tbl { 203struct acx_dot11_grp_addr_tbl {
280 struct acx_header header; 204 struct acx_header header;
281 205
206 u8 role_id;
282 u8 enabled; 207 u8 enabled;
283 u8 num_groups; 208 u8 num_groups;
284 u8 pad[2]; 209 u8 pad[1];
285 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 210 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
286} __packed; 211} __packed;
287 212
288struct acx_rx_timeout { 213struct acx_rx_timeout {
289 struct acx_header header; 214 struct acx_header header;
290 215
216 u8 role_id;
217 u8 reserved;
291 __le16 ps_poll_timeout; 218 __le16 ps_poll_timeout;
292 __le16 upsd_timeout; 219 __le16 upsd_timeout;
220 u8 padding[2];
293} __packed; 221} __packed;
294 222
295struct acx_rts_threshold { 223struct acx_rts_threshold {
296 struct acx_header header; 224 struct acx_header header;
297 225
226 u8 role_id;
227 u8 reserved;
298 __le16 threshold; 228 __le16 threshold;
299 u8 pad[2];
300} __packed; 229} __packed;
301 230
302struct acx_beacon_filter_option { 231struct acx_beacon_filter_option {
303 struct acx_header header; 232 struct acx_header header;
304 233
234 u8 role_id;
305 u8 enable; 235 u8 enable;
306 /* 236 /*
307 * The number of beacons without the unicast TIM 237 * The number of beacons without the unicast TIM
@@ -311,7 +241,7 @@ struct acx_beacon_filter_option {
311 * without the unicast TIM bit set are dropped. 241 * without the unicast TIM bit set are dropped.
312 */ 242 */
313 u8 max_num_beacons; 243 u8 max_num_beacons;
314 u8 pad[2]; 244 u8 pad[1];
315} __packed; 245} __packed;
316 246
317/* 247/*
@@ -350,14 +280,17 @@ struct acx_beacon_filter_option {
350struct acx_beacon_filter_ie_table { 280struct acx_beacon_filter_ie_table {
351 struct acx_header header; 281 struct acx_header header;
352 282
283 u8 role_id;
353 u8 num_ie; 284 u8 num_ie;
354 u8 pad[3]; 285 u8 pad[2];
355 u8 table[BEACON_FILTER_TABLE_MAX_SIZE]; 286 u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
356} __packed; 287} __packed;
357 288
358struct acx_conn_monit_params { 289struct acx_conn_monit_params {
359 struct acx_header header; 290 struct acx_header header;
360 291
292 u8 role_id;
293 u8 padding[3];
361 __le32 synch_fail_thold; /* number of beacons missed */ 294 __le32 synch_fail_thold; /* number of beacons missed */
362 __le32 bss_lose_timeout; /* number of TU's from synch fail */ 295 __le32 bss_lose_timeout; /* number of TU's from synch fail */
363} __packed; 296} __packed;
@@ -369,23 +302,14 @@ struct acx_bt_wlan_coex {
369 u8 pad[3]; 302 u8 pad[3];
370} __packed; 303} __packed;
371 304
372struct acx_sta_bt_wlan_coex_param { 305struct acx_bt_wlan_coex_param {
373 struct acx_header header;
374
375 __le32 params[CONF_SG_STA_PARAMS_MAX];
376 u8 param_idx;
377 u8 padding[3];
378} __packed;
379
380struct acx_ap_bt_wlan_coex_param {
381 struct acx_header header; 306 struct acx_header header;
382 307
383 __le32 params[CONF_SG_AP_PARAMS_MAX]; 308 __le32 params[CONF_SG_PARAMS_MAX];
384 u8 param_idx; 309 u8 param_idx;
385 u8 padding[3]; 310 u8 padding[3];
386} __packed; 311} __packed;
387 312
388
389struct acx_dco_itrim_params { 313struct acx_dco_itrim_params {
390 struct acx_header header; 314 struct acx_header header;
391 315
@@ -406,15 +330,16 @@ struct acx_energy_detection {
406struct acx_beacon_broadcast { 330struct acx_beacon_broadcast {
407 struct acx_header header; 331 struct acx_header header;
408 332
409 __le16 beacon_rx_timeout; 333 u8 role_id;
410 __le16 broadcast_timeout;
411
412 /* Enables receiving of broadcast packets in PS mode */ 334 /* Enables receiving of broadcast packets in PS mode */
413 u8 rx_broadcast_in_ps; 335 u8 rx_broadcast_in_ps;
414 336
337 __le16 beacon_rx_timeout;
338 __le16 broadcast_timeout;
339
415 /* Consecutive PS Poll failures before updating the host */ 340 /* Consecutive PS Poll failures before updating the host */
416 u8 ps_poll_threshold; 341 u8 ps_poll_threshold;
417 u8 pad[2]; 342 u8 pad[1];
418} __packed; 343} __packed;
419 344
420struct acx_event_mask { 345struct acx_event_mask {
@@ -424,35 +349,6 @@ struct acx_event_mask {
424 __le32 high_event_mask; /* Unused */ 349 __le32 high_event_mask; /* Unused */
425} __packed; 350} __packed;
426 351
427#define CFG_RX_FCS BIT(2)
428#define CFG_RX_ALL_GOOD BIT(3)
429#define CFG_UNI_FILTER_EN BIT(4)
430#define CFG_BSSID_FILTER_EN BIT(5)
431#define CFG_MC_FILTER_EN BIT(6)
432#define CFG_MC_ADDR0_EN BIT(7)
433#define CFG_MC_ADDR1_EN BIT(8)
434#define CFG_BC_REJECT_EN BIT(9)
435#define CFG_SSID_FILTER_EN BIT(10)
436#define CFG_RX_INT_FCS_ERROR BIT(11)
437#define CFG_RX_INT_ENCRYPTED BIT(12)
438#define CFG_RX_WR_RX_STATUS BIT(13)
439#define CFG_RX_FILTER_NULTI BIT(14)
440#define CFG_RX_RESERVE BIT(15)
441#define CFG_RX_TIMESTAMP_TSF BIT(16)
442
443#define CFG_RX_RSV_EN BIT(0)
444#define CFG_RX_RCTS_ACK BIT(1)
445#define CFG_RX_PRSP_EN BIT(2)
446#define CFG_RX_PREQ_EN BIT(3)
447#define CFG_RX_MGMT_EN BIT(4)
448#define CFG_RX_FCS_ERROR BIT(5)
449#define CFG_RX_DATA_EN BIT(6)
450#define CFG_RX_CTL_EN BIT(7)
451#define CFG_RX_CF_EN BIT(8)
452#define CFG_RX_BCN_EN BIT(9)
453#define CFG_RX_AUTH_EN BIT(10)
454#define CFG_RX_ASSOC_EN BIT(11)
455
456#define SCAN_PASSIVE BIT(0) 352#define SCAN_PASSIVE BIT(0)
457#define SCAN_5GHZ_BAND BIT(1) 353#define SCAN_5GHZ_BAND BIT(1)
458#define SCAN_TRIGGERED BIT(2) 354#define SCAN_TRIGGERED BIT(2)
@@ -465,6 +361,8 @@ struct acx_event_mask {
465struct acx_feature_config { 361struct acx_feature_config {
466 struct acx_header header; 362 struct acx_header header;
467 363
364 u8 role_id;
365 u8 padding[3];
468 __le32 options; 366 __le32 options;
469 __le32 data_flow_options; 367 __le32 data_flow_options;
470} __packed; 368} __packed;
@@ -472,16 +370,18 @@ struct acx_feature_config {
472struct acx_current_tx_power { 370struct acx_current_tx_power {
473 struct acx_header header; 371 struct acx_header header;
474 372
373 u8 role_id;
475 u8 current_tx_power; 374 u8 current_tx_power;
476 u8 padding[3]; 375 u8 padding[2];
477} __packed; 376} __packed;
478 377
479struct acx_wake_up_condition { 378struct acx_wake_up_condition {
480 struct acx_header header; 379 struct acx_header header;
481 380
381 u8 role_id;
482 u8 wake_up_event; /* Only one bit can be set */ 382 u8 wake_up_event; /* Only one bit can be set */
483 u8 listen_interval; 383 u8 listen_interval;
484 u8 pad[2]; 384 u8 pad[1];
485} __packed; 385} __packed;
486 386
487struct acx_aid { 387struct acx_aid {
@@ -490,8 +390,9 @@ struct acx_aid {
490 /* 390 /*
491 * To be set when associated with an AP. 391 * To be set when associated with an AP.
492 */ 392 */
393 u8 role_id;
394 u8 reserved;
493 __le16 aid; 395 __le16 aid;
494 u8 pad[2];
495} __packed; 396} __packed;
496 397
497enum acx_preamble_type { 398enum acx_preamble_type {
@@ -506,8 +407,9 @@ struct acx_preamble {
506 * When set, the WiLink transmits the frames with a short preamble and 407 * When set, the WiLink transmits the frames with a short preamble and
507 * when cleared, the WiLink transmits the frames with a long preamble. 408 * when cleared, the WiLink transmits the frames with a long preamble.
508 */ 409 */
410 u8 role_id;
509 u8 preamble; 411 u8 preamble;
510 u8 padding[3]; 412 u8 padding[2];
511} __packed; 413} __packed;
512 414
513enum acx_ctsprotect_type { 415enum acx_ctsprotect_type {
@@ -517,8 +419,9 @@ enum acx_ctsprotect_type {
517 419
518struct acx_ctsprotect { 420struct acx_ctsprotect {
519 struct acx_header header; 421 struct acx_header header;
422 u8 role_id;
520 u8 ctsprotect; 423 u8 ctsprotect;
521 u8 padding[3]; 424 u8 padding[2];
522} __packed; 425} __packed;
523 426
524struct acx_tx_statistics { 427struct acx_tx_statistics {
@@ -753,18 +656,9 @@ struct acx_rate_class {
753 656
754#define ACX_TX_BASIC_RATE 0 657#define ACX_TX_BASIC_RATE 0
755#define ACX_TX_AP_FULL_RATE 1 658#define ACX_TX_AP_FULL_RATE 1
756#define ACX_TX_RATE_POLICY_CNT 2
757struct acx_sta_rate_policy {
758 struct acx_header header;
759
760 __le32 rate_class_cnt;
761 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
762} __packed;
763
764
765#define ACX_TX_AP_MODE_MGMT_RATE 4 659#define ACX_TX_AP_MODE_MGMT_RATE 4
766#define ACX_TX_AP_MODE_BCST_RATE 5 660#define ACX_TX_AP_MODE_BCST_RATE 5
767struct acx_ap_rate_policy { 661struct acx_rate_policy {
768 struct acx_header header; 662 struct acx_header header;
769 663
770 __le32 rate_policy_idx; 664 __le32 rate_policy_idx;
@@ -773,22 +667,23 @@ struct acx_ap_rate_policy {
773 667
774struct acx_ac_cfg { 668struct acx_ac_cfg {
775 struct acx_header header; 669 struct acx_header header;
670 u8 role_id;
776 u8 ac; 671 u8 ac;
672 u8 aifsn;
777 u8 cw_min; 673 u8 cw_min;
778 __le16 cw_max; 674 __le16 cw_max;
779 u8 aifsn;
780 u8 reserved;
781 __le16 tx_op_limit; 675 __le16 tx_op_limit;
782} __packed; 676} __packed;
783 677
784struct acx_tid_config { 678struct acx_tid_config {
785 struct acx_header header; 679 struct acx_header header;
680 u8 role_id;
786 u8 queue_id; 681 u8 queue_id;
787 u8 channel_type; 682 u8 channel_type;
788 u8 tsid; 683 u8 tsid;
789 u8 ps_scheme; 684 u8 ps_scheme;
790 u8 ack_policy; 685 u8 ack_policy;
791 u8 padding[3]; 686 u8 padding[2];
792 __le32 apsd_conf[2]; 687 __le32 apsd_conf[2];
793} __packed; 688} __packed;
794 689
@@ -804,19 +699,7 @@ struct acx_tx_config_options {
804 __le16 tx_compl_threshold; /* number of packets */ 699 __le16 tx_compl_threshold; /* number of packets */
805} __packed; 700} __packed;
806 701
807#define ACX_TX_DESCRIPTORS 32 702struct wl12xx_acx_config_memory {
808
809struct wl1271_acx_ap_config_memory {
810 struct acx_header header;
811
812 u8 rx_mem_block_num;
813 u8 tx_min_mem_block_num;
814 u8 num_stations;
815 u8 num_ssid_profiles;
816 __le32 total_tx_descriptors;
817} __packed;
818
819struct wl1271_acx_sta_config_memory {
820 struct acx_header header; 703 struct acx_header header;
821 704
822 u8 rx_mem_block_num; 705 u8 rx_mem_block_num;
@@ -890,9 +773,10 @@ struct wl1271_acx_rx_config_opt {
890struct wl1271_acx_bet_enable { 773struct wl1271_acx_bet_enable {
891 struct acx_header header; 774 struct acx_header header;
892 775
776 u8 role_id;
893 u8 enable; 777 u8 enable;
894 u8 max_consecutive; 778 u8 max_consecutive;
895 u8 padding[2]; 779 u8 padding[1];
896} __packed; 780} __packed;
897 781
898#define ACX_IPV4_VERSION 4 782#define ACX_IPV4_VERSION 4
@@ -905,9 +789,10 @@ struct wl1271_acx_bet_enable {
905 789
906struct wl1271_acx_arp_filter { 790struct wl1271_acx_arp_filter {
907 struct acx_header header; 791 struct acx_header header;
792 u8 role_id;
908 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */ 793 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
909 u8 enable; /* bitmap of enabled ARP filtering features */ 794 u8 enable; /* bitmap of enabled ARP filtering features */
910 u8 padding[2]; 795 u8 padding[1];
911 u8 address[16]; /* The configured device IP address - all ARP 796 u8 address[16]; /* The configured device IP address - all ARP
912 requests directed to this IP address will pass 797 requests directed to this IP address will pass
913 through. For IPv4, the first four bytes are 798 through. For IPv4, the first four bytes are
@@ -925,8 +810,9 @@ struct wl1271_acx_pm_config {
925struct wl1271_acx_keep_alive_mode { 810struct wl1271_acx_keep_alive_mode {
926 struct acx_header header; 811 struct acx_header header;
927 812
813 u8 role_id;
928 u8 enabled; 814 u8 enabled;
929 u8 padding[3]; 815 u8 padding[2];
930} __packed; 816} __packed;
931 817
932enum { 818enum {
@@ -942,11 +828,11 @@ enum {
942struct wl1271_acx_keep_alive_config { 828struct wl1271_acx_keep_alive_config {
943 struct acx_header header; 829 struct acx_header header;
944 830
945 __le32 period; 831 u8 role_id;
946 u8 index; 832 u8 index;
947 u8 tpl_validation; 833 u8 tpl_validation;
948 u8 trigger; 834 u8 trigger;
949 u8 padding; 835 __le32 period;
950} __packed; 836} __packed;
951 837
952#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) 838#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
@@ -990,26 +876,33 @@ enum {
990struct wl1271_acx_rssi_snr_trigger { 876struct wl1271_acx_rssi_snr_trigger {
991 struct acx_header header; 877 struct acx_header header;
992 878
993 __le16 threshold; 879 u8 role_id;
994 __le16 pacing; /* 0 - 60000 ms */
995 u8 metric; 880 u8 metric;
996 u8 type; 881 u8 type;
997 u8 dir; 882 u8 dir;
883 __le16 threshold;
884 __le16 pacing; /* 0 - 60000 ms */
998 u8 hysteresis; 885 u8 hysteresis;
999 u8 index; 886 u8 index;
1000 u8 enable; 887 u8 enable;
1001 u8 padding[2]; 888 u8 padding[1];
1002}; 889};
1003 890
1004struct wl1271_acx_rssi_snr_avg_weights { 891struct wl1271_acx_rssi_snr_avg_weights {
1005 struct acx_header header; 892 struct acx_header header;
1006 893
894 u8 role_id;
895 u8 padding[3];
1007 u8 rssi_beacon; 896 u8 rssi_beacon;
1008 u8 rssi_data; 897 u8 rssi_data;
1009 u8 snr_beacon; 898 u8 snr_beacon;
1010 u8 snr_data; 899 u8 snr_data;
1011}; 900};
1012 901
902
903/* special capability bit (not employed by the 802.11n spec) */
904#define WL12XX_HT_CAP_HT_OPERATION BIT(16)
905
1013/* 906/*
1014 * ACX_PEER_HT_CAP 907 * ACX_PEER_HT_CAP
1015 * Configure HT capabilities - declare the capabilities of the peer 908 * Configure HT capabilities - declare the capabilities of the peer
@@ -1018,28 +911,11 @@ struct wl1271_acx_rssi_snr_avg_weights {
1018struct wl1271_acx_ht_capabilities { 911struct wl1271_acx_ht_capabilities {
1019 struct acx_header header; 912 struct acx_header header;
1020 913
1021 /* 914 /* bitmask of capability bits supported by the peer */
1022 * bit 0 - Allow HT Operation
1023 * bit 1 - Allow Greenfield format in TX
1024 * bit 2 - Allow Short GI in TX
1025 * bit 3 - Allow L-SIG TXOP Protection in TX
1026 * bit 4 - Allow HT Control fields in TX.
1027 * Note, driver will still leave space for HT control in packets
1028 * regardless of the value of this field. FW will be responsible
1029 * to drop the HT field from any frame when this Bit set to 0.
1030 * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
1031 * Exact policy setting for this feature is TBD.
1032 * Note, this bit can only be set to 1 if bit 3 is set to 1.
1033 */
1034 __le32 ht_capabilites; 915 __le32 ht_capabilites;
1035 916
1036 /* 917 /* Indicates to which link these capabilities apply. */
1037 * Indicates to which peer these capabilities apply. 918 u8 hlid;
1038 * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
1039 * for all peers.
1040 * Only valid for IBSS/DLS operation.
1041 */
1042 u8 mac_address[ETH_ALEN];
1043 919
1044 /* 920 /*
1045 * This the maximum A-MPDU length supported by the AP. The FW may not 921 * This the maximum A-MPDU length supported by the AP. The FW may not
@@ -1049,16 +925,9 @@ struct wl1271_acx_ht_capabilities {
1049 925
1050 /* This is the minimal spacing required when sending A-MPDUs to the AP*/ 926 /* This is the minimal spacing required when sending A-MPDUs to the AP*/
1051 u8 ampdu_min_spacing; 927 u8 ampdu_min_spacing;
1052} __packed;
1053
1054/* HT Capabilites Fw Bit Mask Mapping */
1055#define WL1271_ACX_FW_CAP_HT_OPERATION BIT(0)
1056#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT BIT(1)
1057#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS BIT(2)
1058#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION BIT(3)
1059#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS BIT(4)
1060#define WL1271_ACX_FW_CAP_RD_INITIATION BIT(5)
1061 928
929 u8 padding;
930} __packed;
1062 931
1063/* 932/*
1064 * ACX_HT_BSS_OPERATION 933 * ACX_HT_BSS_OPERATION
@@ -1067,6 +936,8 @@ struct wl1271_acx_ht_capabilities {
1067struct wl1271_acx_ht_information { 936struct wl1271_acx_ht_information {
1068 struct acx_header header; 937 struct acx_header header;
1069 938
939 u8 role_id;
940
1070 /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */ 941 /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
1071 u8 rifs_mode; 942 u8 rifs_mode;
1072 943
@@ -1088,60 +959,51 @@ struct wl1271_acx_ht_information {
1088 */ 959 */
1089 u8 dual_cts_protection; 960 u8 dual_cts_protection;
1090 961
1091 u8 padding[3]; 962 u8 padding[2];
1092} __packed; 963} __packed;
1093 964
1094#define RX_BA_WIN_SIZE 8 965#define RX_BA_MAX_SESSIONS 2
1095 966
1096struct wl1271_acx_ba_session_policy { 967struct wl1271_acx_ba_initiator_policy {
1097 struct acx_header header; 968 struct acx_header header;
1098 /* 969
1099 * Specifies role Id, Range 0-7, 0xFF means ANY role. 970 /* Specifies role Id, Range 0-7, 0xFF means ANY role. */
1100 * Future use. For now this field is irrelevant
1101 */
1102 u8 role_id; 971 u8 role_id;
972
1103 /* 973 /*
1104 * Specifies Link Id, Range 0-31, 0xFF means ANY Link Id. 974 * Per TID setting for allowing TX BA. Set a bit to 1 to allow
1105 * Not applicable if Role Id is set to ANY. 975 * TX BA sessions for the corresponding TID.
1106 */ 976 */
1107 u8 link_id; 977 u8 tid_bitmap;
1108
1109 u8 tid;
1110
1111 u8 enable;
1112 978
1113 /* Windows size in number of packets */ 979 /* Windows size in number of packets */
1114 u16 win_size; 980 u8 win_size;
1115 981
1116 /* 982 u8 padding1[1];
1117 * As initiator inactivity timeout in time units(TU) of 1024us.
1118 * As receiver reserved
1119 */
1120 u16 inactivity_timeout;
1121 983
1122 /* Initiator = 1/Receiver = 0 */ 984 /* As initiator inactivity timeout in time units(TU) of 1024us */
1123 u8 ba_direction; 985 u16 inactivity_timeout;
1124 986
1125 u8 padding[3]; 987 u8 padding[2];
1126} __packed; 988} __packed;
1127 989
1128struct wl1271_acx_ba_receiver_setup { 990struct wl1271_acx_ba_receiver_setup {
1129 struct acx_header header; 991 struct acx_header header;
1130 992
1131 /* Specifies Link Id, Range 0-31, 0xFF means ANY Link Id */ 993 /* Specifies link id, range 0-31 */
1132 u8 link_id; 994 u8 hlid;
1133 995
1134 u8 tid; 996 u8 tid;
1135 997
1136 u8 enable; 998 u8 enable;
1137 999
1138 u8 padding[1];
1139
1140 /* Windows size in number of packets */ 1000 /* Windows size in number of packets */
1141 u16 win_size; 1001 u8 win_size;
1142 1002
1143 /* BA session starting sequence number. RANGE 0-FFF */ 1003 /* BA session starting sequence number. RANGE 0-FFF */
1144 u16 ssn; 1004 u16 ssn;
1005
1006 u8 padding[2];
1145} __packed; 1007} __packed;
1146 1008
1147struct wl1271_acx_fw_tsf_information { 1009struct wl1271_acx_fw_tsf_information {
@@ -1158,6 +1020,7 @@ struct wl1271_acx_fw_tsf_information {
1158struct wl1271_acx_ps_rx_streaming { 1020struct wl1271_acx_ps_rx_streaming {
1159 struct acx_header header; 1021 struct acx_header header;
1160 1022
1023 u8 role_id;
1161 u8 tid; 1024 u8 tid;
1162 u8 enable; 1025 u8 enable;
1163 1026
@@ -1166,17 +1029,20 @@ struct wl1271_acx_ps_rx_streaming {
1166 1029
1167 /* timeout before first trigger (0-200 msec) */ 1030 /* timeout before first trigger (0-200 msec) */
1168 u8 timeout; 1031 u8 timeout;
1032 u8 padding[3];
1169} __packed; 1033} __packed;
1170 1034
1171struct wl1271_acx_ap_max_tx_retry { 1035struct wl1271_acx_ap_max_tx_retry {
1172 struct acx_header header; 1036 struct acx_header header;
1173 1037
1038 u8 role_id;
1039 u8 padding_1;
1040
1174 /* 1041 /*
1175 * the number of frames transmission failures before 1042 * the number of frames transmission failures before
1176 * issuing the aging event. 1043 * issuing the aging event.
1177 */ 1044 */
1178 __le16 max_tx_retry; 1045 __le16 max_tx_retry;
1179 u8 padding_1[2];
1180} __packed; 1046} __packed;
1181 1047
1182struct wl1271_acx_config_ps { 1048struct wl1271_acx_config_ps {
@@ -1195,13 +1061,6 @@ struct wl1271_acx_inconnection_sta {
1195 u8 padding1[2]; 1061 u8 padding1[2];
1196} __packed; 1062} __packed;
1197 1063
1198struct acx_ap_beacon_filter {
1199 struct acx_header header;
1200
1201 u8 enable;
1202 u8 pad[3];
1203} __packed;
1204
1205/* 1064/*
1206 * ACX_FM_COEX_CFG 1065 * ACX_FM_COEX_CFG
1207 * set the FM co-existence parameters. 1066 * set the FM co-existence parameters.
@@ -1261,6 +1120,30 @@ struct wl1271_acx_fm_coex {
1261 u8 swallow_clk_diff; 1120 u8 swallow_clk_diff;
1262} __packed; 1121} __packed;
1263 1122
1123#define ACX_RATE_MGMT_ALL_PARAMS 0xff
1124struct wl12xx_acx_set_rate_mgmt_params {
1125 struct acx_header header;
1126
1127 u8 index; /* 0xff to configure all params */
1128 u8 padding1;
1129 __le16 rate_retry_score;
1130 __le16 per_add;
1131 __le16 per_th1;
1132 __le16 per_th2;
1133 __le16 max_per;
1134 u8 inverse_curiosity_factor;
1135 u8 tx_fail_low_th;
1136 u8 tx_fail_high_th;
1137 u8 per_alpha_shift;
1138 u8 per_add_shift;
1139 u8 per_beta1_shift;
1140 u8 per_beta2_shift;
1141 u8 rate_check_up;
1142 u8 rate_check_down;
1143 u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
1144 u8 padding2[2];
1145} __packed;
1146
1264enum { 1147enum {
1265 ACX_WAKE_UP_CONDITIONS = 0x0002, 1148 ACX_WAKE_UP_CONDITIONS = 0x0002,
1266 ACX_MEM_CFG = 0x0003, 1149 ACX_MEM_CFG = 0x0003,
@@ -1268,10 +1151,7 @@ enum {
1268 ACX_AC_CFG = 0x0007, 1151 ACX_AC_CFG = 0x0007,
1269 ACX_MEM_MAP = 0x0008, 1152 ACX_MEM_MAP = 0x0008,
1270 ACX_AID = 0x000A, 1153 ACX_AID = 0x000A,
1271 /* ACX_FW_REV is missing in the ref driver, but seems to work */
1272 ACX_FW_REV = 0x000D,
1273 ACX_MEDIUM_USAGE = 0x000F, 1154 ACX_MEDIUM_USAGE = 0x000F,
1274 ACX_RX_CFG = 0x0010,
1275 ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */ 1155 ACX_TX_QUEUE_CFG = 0x0011, /* FIXME: only used by wl1251 */
1276 ACX_STATISTICS = 0x0013, /* Debug API */ 1156 ACX_STATISTICS = 0x0013, /* Debug API */
1277 ACX_PWR_CONSUMPTION_STATISTICS = 0x0014, 1157 ACX_PWR_CONSUMPTION_STATISTICS = 0x0014,
@@ -1279,7 +1159,6 @@ enum {
1279 ACX_TID_CFG = 0x001A, 1159 ACX_TID_CFG = 0x001A,
1280 ACX_PS_RX_STREAMING = 0x001B, 1160 ACX_PS_RX_STREAMING = 0x001B,
1281 ACX_BEACON_FILTER_OPT = 0x001F, 1161 ACX_BEACON_FILTER_OPT = 0x001F,
1282 ACX_AP_BEACON_FILTER_OPT = 0x0020,
1283 ACX_NOISE_HIST = 0x0021, 1162 ACX_NOISE_HIST = 0x0021,
1284 ACX_HDK_VERSION = 0x0022, /* ??? */ 1163 ACX_HDK_VERSION = 0x0022, /* ??? */
1285 ACX_PD_THRESHOLD = 0x0023, 1164 ACX_PD_THRESHOLD = 0x0023,
@@ -1287,7 +1166,6 @@ enum {
1287 ACX_CCA_THRESHOLD = 0x0025, 1166 ACX_CCA_THRESHOLD = 0x0025,
1288 ACX_EVENT_MBOX_MASK = 0x0026, 1167 ACX_EVENT_MBOX_MASK = 0x0026,
1289 ACX_CONN_MONIT_PARAMS = 0x002D, 1168 ACX_CONN_MONIT_PARAMS = 0x002D,
1290 ACX_CONS_TX_FAILURE = 0x002F,
1291 ACX_BCN_DTIM_OPTIONS = 0x0031, 1169 ACX_BCN_DTIM_OPTIONS = 0x0031,
1292 ACX_SG_ENABLE = 0x0032, 1170 ACX_SG_ENABLE = 0x0032,
1293 ACX_SG_CFG = 0x0033, 1171 ACX_SG_CFG = 0x0033,
@@ -1314,11 +1192,14 @@ enum {
1314 ACX_RSSI_SNR_WEIGHTS = 0x0052, 1192 ACX_RSSI_SNR_WEIGHTS = 0x0052,
1315 ACX_KEEP_ALIVE_MODE = 0x0053, 1193 ACX_KEEP_ALIVE_MODE = 0x0053,
1316 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, 1194 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
1317 ACX_BA_SESSION_POLICY_CFG = 0x0055, 1195 ACX_BA_SESSION_INIT_POLICY = 0x0055,
1318 ACX_BA_SESSION_RX_SETUP = 0x0056, 1196 ACX_BA_SESSION_RX_SETUP = 0x0056,
1319 ACX_PEER_HT_CAP = 0x0057, 1197 ACX_PEER_HT_CAP = 0x0057,
1320 ACX_HT_BSS_OPERATION = 0x0058, 1198 ACX_HT_BSS_OPERATION = 0x0058,
1321 ACX_COEX_ACTIVITY = 0x0059, 1199 ACX_COEX_ACTIVITY = 0x0059,
1200 ACX_BURST_MODE = 0x005C,
1201 ACX_SET_RATE_MGMT_PARAMS = 0x005D,
1202 ACX_SET_RATE_ADAPT_PARAMS = 0x0060,
1322 ACX_SET_DCO_ITRIM_PARAMS = 0x0061, 1203 ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
1323 ACX_GEN_FW_CMD = 0x0070, 1204 ACX_GEN_FW_CMD = 0x0070,
1324 ACX_HOST_IF_CFG_BITMAP = 0x0071, 1205 ACX_HOST_IF_CFG_BITMAP = 0x0071,
@@ -1342,7 +1223,6 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl);
1342int wl1271_acx_mem_map(struct wl1271 *wl, 1223int wl1271_acx_mem_map(struct wl1271 *wl,
1343 struct acx_header *mem_map, size_t len); 1224 struct acx_header *mem_map, size_t len);
1344int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl); 1225int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
1345int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
1346int wl1271_acx_pd_threshold(struct wl1271 *wl); 1226int wl1271_acx_pd_threshold(struct wl1271 *wl);
1347int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); 1227int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
1348int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable, 1228int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
@@ -1354,8 +1234,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1354int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1234int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1355int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable); 1235int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
1356int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable); 1236int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
1357int wl1271_acx_sta_sg_cfg(struct wl1271 *wl); 1237int wl12xx_acx_sg_cfg(struct wl1271 *wl);
1358int wl1271_acx_ap_sg_cfg(struct wl1271 *wl);
1359int wl1271_acx_cca_threshold(struct wl1271 *wl); 1238int wl1271_acx_cca_threshold(struct wl1271 *wl);
1360int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); 1239int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
1361int wl1271_acx_aid(struct wl1271 *wl, u16 aid); 1240int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
@@ -1374,8 +1253,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1374 u32 apsd_conf0, u32 apsd_conf1); 1253 u32 apsd_conf0, u32 apsd_conf1);
1375int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold); 1254int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
1376int wl1271_acx_tx_config_options(struct wl1271 *wl); 1255int wl1271_acx_tx_config_options(struct wl1271 *wl);
1377int wl1271_acx_ap_mem_cfg(struct wl1271 *wl); 1256int wl12xx_acx_mem_cfg(struct wl1271 *wl);
1378int wl1271_acx_sta_mem_cfg(struct wl1271 *wl);
1379int wl1271_acx_init_mem_config(struct wl1271 *wl); 1257int wl1271_acx_init_mem_config(struct wl1271 *wl);
1380int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap); 1258int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
1381int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1259int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
@@ -1390,20 +1268,18 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1390int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); 1268int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
1391int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, 1269int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1392 struct ieee80211_sta_ht_cap *ht_cap, 1270 struct ieee80211_sta_ht_cap *ht_cap,
1393 bool allow_ht_operation); 1271 bool allow_ht_operation, u8 hlid);
1394int wl1271_acx_set_ht_information(struct wl1271 *wl, 1272int wl1271_acx_set_ht_information(struct wl1271 *wl,
1395 u16 ht_operation_mode); 1273 u16 ht_operation_mode);
1396int wl1271_acx_set_ba_session(struct wl1271 *wl, 1274int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
1397 enum ieee80211_back_parties direction, 1275int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
1398 u8 tid_index, u8 policy); 1276 u16 ssn, bool enable, u8 peer_hlid);
1399int wl1271_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn,
1400 bool enable);
1401int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1277int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1402int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable); 1278int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
1403int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl); 1279int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
1404int wl1271_acx_config_ps(struct wl1271 *wl); 1280int wl1271_acx_config_ps(struct wl1271 *wl);
1405int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr); 1281int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1406int wl1271_acx_set_ap_beacon_filter(struct wl1271 *wl, bool enable);
1407int wl1271_acx_fm_coex(struct wl1271 *wl); 1282int wl1271_acx_fm_coex(struct wl1271 *wl);
1283int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
1408 1284
1409#endif /* __WL1271_ACX_H__ */ 1285#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 5ebc64d89407..cc70422c0575 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -107,16 +107,6 @@ static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
107 unsigned int quirks = 0; 107 unsigned int quirks = 0;
108 unsigned int *fw_ver = wl->chip.fw_ver; 108 unsigned int *fw_ver = wl->chip.fw_ver;
109 109
110 /* Only for wl127x */
111 if ((fw_ver[FW_VER_CHIP] == FW_VER_CHIP_WL127X) &&
112 /* Check STA version */
113 (((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
114 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_STA_MIN)) ||
115 /* Check AP version */
116 ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP) &&
117 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_1_SPARE_AP_MIN))))
118 quirks |= WL12XX_QUIRK_USE_2_SPARE_BLOCKS;
119
120 /* Only new station firmwares support routing fw logs to the host */ 110 /* Only new station firmwares support routing fw logs to the host */
121 if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) && 111 if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
122 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN)) 112 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
@@ -504,21 +494,18 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
504 wl->event_mask = BSS_LOSE_EVENT_ID | 494 wl->event_mask = BSS_LOSE_EVENT_ID |
505 SCAN_COMPLETE_EVENT_ID | 495 SCAN_COMPLETE_EVENT_ID |
506 PS_REPORT_EVENT_ID | 496 PS_REPORT_EVENT_ID |
507 JOIN_EVENT_COMPLETE_ID |
508 DISCONNECT_EVENT_COMPLETE_ID | 497 DISCONNECT_EVENT_COMPLETE_ID |
509 RSSI_SNR_TRIGGER_0_EVENT_ID | 498 RSSI_SNR_TRIGGER_0_EVENT_ID |
510 PSPOLL_DELIVERY_FAILURE_EVENT_ID | 499 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
511 SOFT_GEMINI_SENSE_EVENT_ID | 500 SOFT_GEMINI_SENSE_EVENT_ID |
512 PERIODIC_SCAN_REPORT_EVENT_ID | 501 PERIODIC_SCAN_REPORT_EVENT_ID |
513 PERIODIC_SCAN_COMPLETE_EVENT_ID; 502 PERIODIC_SCAN_COMPLETE_EVENT_ID |
514 503 DUMMY_PACKET_EVENT_ID |
515 if (wl->bss_type == BSS_TYPE_AP_BSS) 504 PEER_REMOVE_COMPLETE_EVENT_ID |
516 wl->event_mask |= STA_REMOVE_COMPLETE_EVENT_ID | 505 BA_SESSION_RX_CONSTRAINT_EVENT_ID |
517 INACTIVE_STA_EVENT_ID | 506 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
518 MAX_TX_RETRY_EVENT_ID; 507 INACTIVE_STA_EVENT_ID |
519 else 508 MAX_TX_RETRY_EVENT_ID;
520 wl->event_mask |= DUMMY_PACKET_EVENT_ID |
521 BA_SESSION_RX_CONSTRAINT_EVENT_ID;
522 509
523 ret = wl1271_event_unmask(wl); 510 ret = wl1271_event_unmask(wl);
524 if (ret < 0) { 511 if (ret < 0) {
@@ -549,13 +536,13 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
549{ 536{
550 u32 fuse; 537 u32 fuse;
551 538
552 fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1); 539 if (wl->chip.id == CHIP_ID_1283_PG20)
540 fuse = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
541 else
542 fuse = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
553 fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET; 543 fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
554 544
555 wl->hw_pg_ver = (s8)fuse; 545 wl->hw_pg_ver = (s8)fuse;
556
557 if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
558 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
559} 546}
560 547
561static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl) 548static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
@@ -696,7 +683,8 @@ static int wl127x_boot_clk(struct wl1271 *wl)
696 u32 pause; 683 u32 pause;
697 u32 clk; 684 u32 clk;
698 685
699 wl1271_boot_hw_version(wl); 686 if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3)
687 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
700 688
701 if (wl->ref_clock == CONF_REF_CLK_19_2_E || 689 if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
702 wl->ref_clock == CONF_REF_CLK_38_4_E || 690 wl->ref_clock == CONF_REF_CLK_38_4_E ||
@@ -750,6 +738,8 @@ int wl1271_load_firmware(struct wl1271 *wl)
750 u32 tmp, clk; 738 u32 tmp, clk;
751 int selected_clock = -1; 739 int selected_clock = -1;
752 740
741 wl1271_boot_hw_version(wl);
742
753 if (wl->chip.id == CHIP_ID_1283_PG20) { 743 if (wl->chip.id == CHIP_ID_1283_PG20) {
754 ret = wl128x_boot_clk(wl, &selected_clock); 744 ret = wl128x_boot_clk(wl, &selected_clock);
755 if (ret < 0) 745 if (ret < 0)
@@ -852,9 +842,6 @@ int wl1271_boot(struct wl1271 *wl)
852 /* Enable firmware interrupts now */ 842 /* Enable firmware interrupts now */
853 wl1271_boot_enable_interrupts(wl); 843 wl1271_boot_enable_interrupts(wl);
854 844
855 /* set the wl1271 default filters */
856 wl1271_set_default_filters(wl);
857
858 wl1271_event_mbox_config(wl); 845 wl1271_event_mbox_config(wl);
859 846
860out: 847out:
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
index e8f8255bbabe..06dad9380fa7 100644
--- a/drivers/net/wireless/wl12xx/boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -55,7 +55,8 @@ struct wl1271_static_data {
55#define OCP_REG_CLK_POLARITY 0x0cb2 55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_REG_CLK_PULL 0x0cb4 56#define OCP_REG_CLK_PULL 0x0cb4
57 57
58#define REG_FUSE_DATA_2_1 0x050a 58#define WL127X_REG_FUSE_DATA_2_1 0x050a
59#define WL128X_REG_FUSE_DATA_2_1 0x2152
59#define PG_VER_MASK 0x3c 60#define PG_VER_MASK 0x3c
60#define PG_VER_OFFSET 2 61#define PG_VER_OFFSET 2
61 62
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 97dd237a9580..817bc183bc83 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -363,63 +363,470 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
363 return 0; 363 return 0;
364} 364}
365 365
366int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type) 366int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
367{ 367{
368 struct wl1271_cmd_join *join; 368 struct wl12xx_cmd_role_enable *cmd;
369 int ret, i; 369 int ret;
370 u8 *bssid; 370
371 wl1271_debug(DEBUG_CMD, "cmd role enable");
371 372
372 join = kzalloc(sizeof(*join), GFP_KERNEL); 373 if (WARN_ON(*role_id != WL12XX_INVALID_ROLE_ID))
373 if (!join) { 374 return -EBUSY;
375
376 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
377 if (!cmd) {
374 ret = -ENOMEM; 378 ret = -ENOMEM;
375 goto out; 379 goto out;
376 } 380 }
377 381
378 wl1271_debug(DEBUG_CMD, "cmd join"); 382 /* get role id */
383 cmd->role_id = find_first_zero_bit(wl->roles_map, WL12XX_MAX_ROLES);
384 if (cmd->role_id >= WL12XX_MAX_ROLES) {
385 ret = -EBUSY;
386 goto out_free;
387 }
388
389 memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
390 cmd->role_type = role_type;
391
392 ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
393 if (ret < 0) {
394 wl1271_error("failed to initiate cmd role enable");
395 goto out_free;
396 }
397
398 __set_bit(cmd->role_id, wl->roles_map);
399 *role_id = cmd->role_id;
400
401out_free:
402 kfree(cmd);
403
404out:
405 return ret;
406}
407
408int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id)
409{
410 struct wl12xx_cmd_role_disable *cmd;
411 int ret;
412
413 wl1271_debug(DEBUG_CMD, "cmd role disable");
414
415 if (WARN_ON(*role_id == WL12XX_INVALID_ROLE_ID))
416 return -ENOENT;
417
418 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
419 if (!cmd) {
420 ret = -ENOMEM;
421 goto out;
422 }
423 cmd->role_id = *role_id;
424
425 ret = wl1271_cmd_send(wl, CMD_ROLE_DISABLE, cmd, sizeof(*cmd), 0);
426 if (ret < 0) {
427 wl1271_error("failed to initiate cmd role disable");
428 goto out_free;
429 }
430
431 __clear_bit(*role_id, wl->roles_map);
432 *role_id = WL12XX_INVALID_ROLE_ID;
433
434out_free:
435 kfree(cmd);
436
437out:
438 return ret;
439}
440
441static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
442{
443 u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
444 if (link >= WL12XX_MAX_LINKS)
445 return -EBUSY;
446
447 __set_bit(link, wl->links_map);
448 *hlid = link;
449 return 0;
450}
451
452static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
453{
454 if (*hlid == WL12XX_INVALID_LINK_ID)
455 return;
456
457 __clear_bit(*hlid, wl->links_map);
458 *hlid = WL12XX_INVALID_LINK_ID;
459}
460
461static int wl12xx_get_new_session_id(struct wl1271 *wl)
462{
463 if (wl->session_counter >= SESSION_COUNTER_MAX)
464 wl->session_counter = 0;
465
466 wl->session_counter++;
379 467
380 /* Reverse order BSSID */ 468 return wl->session_counter;
381 bssid = (u8 *) &join->bssid_lsb; 469}
382 for (i = 0; i < ETH_ALEN; i++) 470
383 bssid[i] = wl->bssid[ETH_ALEN - i - 1]; 471int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
472{
473 struct wl12xx_cmd_role_start *cmd;
474 int ret;
475
476 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
477 if (!cmd) {
478 ret = -ENOMEM;
479 goto out;
480 }
384 481
385 join->rx_config_options = cpu_to_le32(wl->rx_config); 482 wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
386 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
387 join->bss_type = bss_type;
388 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
389 join->supported_rate_set = cpu_to_le32(wl->rate_set);
390 483
484 cmd->role_id = wl->dev_role_id;
391 if (wl->band == IEEE80211_BAND_5GHZ) 485 if (wl->band == IEEE80211_BAND_5GHZ)
392 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; 486 cmd->band = WL12XX_BAND_5GHZ;
487 cmd->channel = wl->channel;
488
489 if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
490 ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
491 if (ret)
492 goto out_free;
493 }
494 cmd->device.hlid = wl->dev_hlid;
495 cmd->device.session = wl->session_counter;
496
497 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
498 cmd->role_id, cmd->device.hlid, cmd->device.session);
393 499
394 join->beacon_interval = cpu_to_le16(wl->beacon_int); 500 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
395 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD; 501 if (ret < 0) {
502 wl1271_error("failed to initiate cmd role enable");
503 goto err_hlid;
504 }
505
506 goto out_free;
507
508err_hlid:
509 /* clear links on error */
510 __clear_bit(wl->dev_hlid, wl->links_map);
511 wl->dev_hlid = WL12XX_INVALID_LINK_ID;
512
513
514out_free:
515 kfree(cmd);
396 516
397 join->channel = wl->channel; 517out:
398 join->ssid_len = wl->ssid_len; 518 return ret;
399 memcpy(join->ssid, wl->ssid, wl->ssid_len); 519}
400 520
401 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; 521int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
522{
523 struct wl12xx_cmd_role_stop *cmd;
524 int ret;
402 525
403 wl1271_debug(DEBUG_CMD, "cmd join: basic_rate_set=0x%x, rate_set=0x%x", 526 if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
404 join->basic_rate_set, join->supported_rate_set); 527 return -EINVAL;
405 528
406 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); 529 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
530 if (!cmd) {
531 ret = -ENOMEM;
532 goto out;
533 }
534
535 wl1271_debug(DEBUG_CMD, "cmd role stop dev");
536
537 cmd->role_id = wl->dev_role_id;
538 cmd->disc_type = DISCONNECT_IMMEDIATE;
539 cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
540
541 ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
407 if (ret < 0) { 542 if (ret < 0) {
408 wl1271_error("failed to initiate cmd join"); 543 wl1271_error("failed to initiate cmd role stop");
409 goto out_free; 544 goto out_free;
410 } 545 }
411 546
412 ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID); 547 ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
548 if (ret < 0) {
549 wl1271_error("cmd role stop dev event completion error");
550 goto out_free;
551 }
552
553 wl12xx_free_link(wl, &wl->dev_hlid);
554
555out_free:
556 kfree(cmd);
557
558out:
559 return ret;
560}
561
562int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
563{
564 struct wl12xx_cmd_role_start *cmd;
565 int ret;
566
567 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
568 if (!cmd) {
569 ret = -ENOMEM;
570 goto out;
571 }
572
573 wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
574
575 cmd->role_id = wl->role_id;
576 if (wl->band == IEEE80211_BAND_5GHZ)
577 cmd->band = WL12XX_BAND_5GHZ;
578 cmd->channel = wl->channel;
579 cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
580 cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
581 cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
582 cmd->sta.ssid_len = wl->ssid_len;
583 memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
584 memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
585 cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
586
587 if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
588 ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
589 if (ret)
590 goto out_free;
591 }
592 cmd->sta.hlid = wl->sta_hlid;
593 cmd->sta.session = wl12xx_get_new_session_id(wl);
594 cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
595
596 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
597 "basic_rate_set: 0x%x, remote_rates: 0x%x",
598 wl->role_id, cmd->sta.hlid, cmd->sta.session,
599 wl->basic_rate_set, wl->rate_set);
600
601 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
602 if (ret < 0) {
603 wl1271_error("failed to initiate cmd role start sta");
604 goto err_hlid;
605 }
606
607 goto out_free;
608
609err_hlid:
610 /* clear links on error. */
611 wl12xx_free_link(wl, &wl->sta_hlid);
612
613out_free:
614 kfree(cmd);
615
616out:
617 return ret;
618}
619
620/* use this function to stop ibss as well */
621int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
622{
623 struct wl12xx_cmd_role_stop *cmd;
624 int ret;
625
626 if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
627 return -EINVAL;
628
629 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
630 if (!cmd) {
631 ret = -ENOMEM;
632 goto out;
633 }
634
635 wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
636
637 cmd->role_id = wl->role_id;
638 cmd->disc_type = DISCONNECT_IMMEDIATE;
639 cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
640
641 ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
642 if (ret < 0) {
643 wl1271_error("failed to initiate cmd role stop sta");
644 goto out_free;
645 }
646
647 wl12xx_free_link(wl, &wl->sta_hlid);
648
649out_free:
650 kfree(cmd);
651
652out:
653 return ret;
654}
655
656int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
657{
658 struct wl12xx_cmd_role_start *cmd;
659 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
660 int ret;
661
662 wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
663
664 /*
665 * We currently do not support hidden SSID. The real SSID
666 * should be fetched from mac80211 first.
667 */
668 if (wl->ssid_len == 0) {
669 wl1271_warning("Hidden SSID currently not supported for AP");
670 ret = -EINVAL;
671 goto out;
672 }
673
674 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
675 if (!cmd) {
676 ret = -ENOMEM;
677 goto out;
678 }
679
680 ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
413 if (ret < 0) 681 if (ret < 0)
414 wl1271_error("cmd join event completion error"); 682 goto out_free;
683
684 ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
685 if (ret < 0)
686 goto out_free_global;
687
688 cmd->role_id = wl->role_id;
689 cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
690 cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
691 cmd->ap.global_hlid = wl->ap_global_hlid;
692 cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
693 cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
694 cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
695 cmd->ap.dtim_interval = bss_conf->dtim_period;
696 cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
697 cmd->channel = wl->channel;
698 cmd->ap.ssid_len = wl->ssid_len;
699 cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
700 memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
701 cmd->ap.local_rates = cpu_to_le32(0xffffffff);
702
703 switch (wl->band) {
704 case IEEE80211_BAND_2GHZ:
705 cmd->band = RADIO_BAND_2_4GHZ;
706 break;
707 case IEEE80211_BAND_5GHZ:
708 cmd->band = RADIO_BAND_5GHZ;
709 break;
710 default:
711 wl1271_warning("ap start - unknown band: %d", (int)wl->band);
712 cmd->band = RADIO_BAND_2_4GHZ;
713 break;
714 }
715
716 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
717 if (ret < 0) {
718 wl1271_error("failed to initiate cmd role start ap");
719 goto out_free_bcast;
720 }
721
722 goto out_free;
723
724out_free_bcast:
725 wl12xx_free_link(wl, &wl->ap_bcast_hlid);
726
727out_free_global:
728 wl12xx_free_link(wl, &wl->ap_global_hlid);
729
730out_free:
731 kfree(cmd);
732
733out:
734 return ret;
735}
736
737int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
738{
739 struct wl12xx_cmd_role_stop *cmd;
740 int ret;
741
742 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
743 if (!cmd) {
744 ret = -ENOMEM;
745 goto out;
746 }
747
748 wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
749
750 cmd->role_id = wl->role_id;
751
752 ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
753 if (ret < 0) {
754 wl1271_error("failed to initiate cmd role stop ap");
755 goto out_free;
756 }
757
758 wl12xx_free_link(wl, &wl->ap_bcast_hlid);
759 wl12xx_free_link(wl, &wl->ap_global_hlid);
415 760
416out_free: 761out_free:
417 kfree(join); 762 kfree(cmd);
418 763
419out: 764out:
420 return ret; 765 return ret;
421} 766}
422 767
768int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
769{
770 struct wl12xx_cmd_role_start *cmd;
771 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
772 int ret;
773
774 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
775 if (!cmd) {
776 ret = -ENOMEM;
777 goto out;
778 }
779
780 wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
781
782 cmd->role_id = wl->role_id;
783 if (wl->band == IEEE80211_BAND_5GHZ)
784 cmd->band = WL12XX_BAND_5GHZ;
785 cmd->channel = wl->channel;
786 cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
787 cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
788 cmd->ibss.dtim_interval = bss_conf->dtim_period;
789 cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
790 cmd->ibss.ssid_len = wl->ssid_len;
791 memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
792 memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
793 cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
794
795 if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
796 ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
797 if (ret)
798 goto out_free;
799 }
800 cmd->ibss.hlid = wl->sta_hlid;
801 cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
802
803 wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
804 "basic_rate_set: 0x%x, remote_rates: 0x%x",
805 wl->role_id, cmd->sta.hlid, cmd->sta.session,
806 wl->basic_rate_set, wl->rate_set);
807
808 wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
809
810 ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
811 if (ret < 0) {
812 wl1271_error("failed to initiate cmd role enable");
813 goto err_hlid;
814 }
815
816 goto out_free;
817
818err_hlid:
819 /* clear links on error. */
820 wl12xx_free_link(wl, &wl->sta_hlid);
821
822out_free:
823 kfree(cmd);
824
825out:
826 return ret;
827}
828
829
423/** 830/**
424 * send test command to firmware 831 * send test command to firmware
425 * 832 *
@@ -567,6 +974,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
567 goto out; 974 goto out;
568 } 975 }
569 976
977 ps_params->role_id = wl->role_id;
570 ps_params->ps_mode = ps_mode; 978 ps_params->ps_mode = ps_mode;
571 979
572 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 980 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -813,9 +1221,9 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
813 wl->basic_rate); 1221 wl->basic_rate);
814} 1222}
815 1223
816int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id) 1224int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
817{ 1225{
818 struct wl1271_cmd_set_sta_keys *cmd; 1226 struct wl1271_cmd_set_keys *cmd;
819 int ret = 0; 1227 int ret = 0;
820 1228
821 wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id); 1229 wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
@@ -826,36 +1234,7 @@ int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id)
826 goto out; 1234 goto out;
827 } 1235 }
828 1236
829 cmd->id = id; 1237 cmd->hlid = hlid;
830 cmd->key_action = cpu_to_le16(KEY_SET_ID);
831 cmd->key_type = KEY_WEP;
832
833 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
834 if (ret < 0) {
835 wl1271_warning("cmd set_default_wep_key failed: %d", ret);
836 goto out;
837 }
838
839out:
840 kfree(cmd);
841
842 return ret;
843}
844
845int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
846{
847 struct wl1271_cmd_set_ap_keys *cmd;
848 int ret = 0;
849
850 wl1271_debug(DEBUG_CMD, "cmd set_ap_default_wep_key %d", id);
851
852 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
853 if (!cmd) {
854 ret = -ENOMEM;
855 goto out;
856 }
857
858 cmd->hlid = WL1271_AP_BROADCAST_HLID;
859 cmd->key_id = id; 1238 cmd->key_id = id;
860 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE; 1239 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
861 cmd->key_action = cpu_to_le16(KEY_SET_ID); 1240 cmd->key_action = cpu_to_le16(KEY_SET_ID);
@@ -863,7 +1242,7 @@ int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id)
863 1242
864 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0); 1243 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
865 if (ret < 0) { 1244 if (ret < 0) {
866 wl1271_warning("cmd set_ap_default_wep_key failed: %d", ret); 1245 wl1271_warning("cmd set_default_wep_key failed: %d", ret);
867 goto out; 1246 goto out;
868 } 1247 }
869 1248
@@ -877,17 +1256,27 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
877 u8 key_size, const u8 *key, const u8 *addr, 1256 u8 key_size, const u8 *key, const u8 *addr,
878 u32 tx_seq_32, u16 tx_seq_16) 1257 u32 tx_seq_32, u16 tx_seq_16)
879{ 1258{
880 struct wl1271_cmd_set_sta_keys *cmd; 1259 struct wl1271_cmd_set_keys *cmd;
881 int ret = 0; 1260 int ret = 0;
882 1261
1262 /* hlid might have already been deleted */
1263 if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
1264 return 0;
1265
883 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1266 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
884 if (!cmd) { 1267 if (!cmd) {
885 ret = -ENOMEM; 1268 ret = -ENOMEM;
886 goto out; 1269 goto out;
887 } 1270 }
888 1271
889 if (key_type != KEY_WEP) 1272 cmd->hlid = wl->sta_hlid;
890 memcpy(cmd->addr, addr, ETH_ALEN); 1273
1274 if (key_type == KEY_WEP)
1275 cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
1276 else if (is_broadcast_ether_addr(addr))
1277 cmd->lid_key_type = BROADCAST_LID_TYPE;
1278 else
1279 cmd->lid_key_type = UNICAST_LID_TYPE;
891 1280
892 cmd->key_action = cpu_to_le16(action); 1281 cmd->key_action = cpu_to_le16(action);
893 cmd->key_size = key_size; 1282 cmd->key_size = key_size;
@@ -896,10 +1285,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
896 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16); 1285 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
897 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32); 1286 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
898 1287
899 /* we have only one SSID profile */ 1288 cmd->key_id = id;
900 cmd->ssid_profile = 0;
901
902 cmd->id = id;
903 1289
904 if (key_type == KEY_TKIP) { 1290 if (key_type == KEY_TKIP) {
905 /* 1291 /*
@@ -930,11 +1316,15 @@ out:
930 return ret; 1316 return ret;
931} 1317}
932 1318
1319/*
1320 * TODO: merge with sta/ibss into 1 set_key function.
1321 * note there are slight diffs
1322 */
933int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 1323int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
934 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, 1324 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
935 u16 tx_seq_16) 1325 u16 tx_seq_16)
936{ 1326{
937 struct wl1271_cmd_set_ap_keys *cmd; 1327 struct wl1271_cmd_set_keys *cmd;
938 int ret = 0; 1328 int ret = 0;
939 u8 lid_type; 1329 u8 lid_type;
940 1330
@@ -942,7 +1332,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
942 if (!cmd) 1332 if (!cmd)
943 return -ENOMEM; 1333 return -ENOMEM;
944 1334
945 if (hlid == WL1271_AP_BROADCAST_HLID) { 1335 if (hlid == wl->ap_bcast_hlid) {
946 if (key_type == KEY_WEP) 1336 if (key_type == KEY_WEP)
947 lid_type = WEP_DEFAULT_LID_TYPE; 1337 lid_type = WEP_DEFAULT_LID_TYPE;
948 else 1338 else
@@ -991,12 +1381,12 @@ out:
991 return ret; 1381 return ret;
992} 1382}
993 1383
994int wl1271_cmd_disconnect(struct wl1271 *wl) 1384int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid)
995{ 1385{
996 struct wl1271_cmd_disconnect *cmd; 1386 struct wl12xx_cmd_set_peer_state *cmd;
997 int ret = 0; 1387 int ret = 0;
998 1388
999 wl1271_debug(DEBUG_CMD, "cmd disconnect"); 1389 wl1271_debug(DEBUG_CMD, "cmd set peer state (hlid=%d)", hlid);
1000 1390
1001 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1391 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1002 if (!cmd) { 1392 if (!cmd) {
@@ -1004,21 +1394,15 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
1004 goto out; 1394 goto out;
1005 } 1395 }
1006 1396
1007 cmd->rx_config_options = cpu_to_le32(wl->rx_config); 1397 cmd->hlid = hlid;
1008 cmd->rx_filter_options = cpu_to_le32(wl->rx_filter); 1398 cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
1009 /* disconnect reason is not used in immediate disconnections */
1010 cmd->type = DISCONNECT_IMMEDIATE;
1011 1399
1012 ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0); 1400 ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
1013 if (ret < 0) { 1401 if (ret < 0) {
1014 wl1271_error("failed to send disconnect command"); 1402 wl1271_error("failed to send set peer state command");
1015 goto out_free; 1403 goto out_free;
1016 } 1404 }
1017 1405
1018 ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
1019 if (ret < 0)
1020 wl1271_error("cmd disconnect event completion error");
1021
1022out_free: 1406out_free:
1023 kfree(cmd); 1407 kfree(cmd);
1024 1408
@@ -1026,12 +1410,13 @@ out:
1026 return ret; 1410 return ret;
1027} 1411}
1028 1412
1029int wl1271_cmd_set_sta_state(struct wl1271 *wl) 1413int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
1030{ 1414{
1031 struct wl1271_cmd_set_sta_state *cmd; 1415 struct wl12xx_cmd_add_peer *cmd;
1032 int ret = 0; 1416 int ret;
1417 u32 sta_rates;
1033 1418
1034 wl1271_debug(DEBUG_CMD, "cmd set sta state"); 1419 wl1271_debug(DEBUG_CMD, "cmd add peer %d", (int)hlid);
1035 1420
1036 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1421 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1037 if (!cmd) { 1422 if (!cmd) {
@@ -1039,11 +1424,27 @@ int wl1271_cmd_set_sta_state(struct wl1271 *wl)
1039 goto out; 1424 goto out;
1040 } 1425 }
1041 1426
1042 cmd->state = WL1271_CMD_STA_STATE_CONNECTED; 1427 /* currently we don't support UAPSD */
1428 cmd->sp_len = 0;
1429
1430 memcpy(cmd->addr, sta->addr, ETH_ALEN);
1431 cmd->bss_index = WL1271_AP_BSS_INDEX;
1432 cmd->aid = sta->aid;
1433 cmd->hlid = hlid;
1434 cmd->wmm = sta->wme ? 1 : 0;
1435
1436 sta_rates = sta->supp_rates[wl->band];
1437 if (sta->ht_cap.ht_supported)
1438 sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
1439
1440 cmd->supported_rates =
1441 cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates));
1043 1442
1044 ret = wl1271_cmd_send(wl, CMD_SET_STA_STATE, cmd, sizeof(*cmd), 0); 1443 wl1271_debug(DEBUG_CMD, "new peer rates: 0x%x", cmd->supported_rates);
1444
1445 ret = wl1271_cmd_send(wl, CMD_ADD_PEER, cmd, sizeof(*cmd), 0);
1045 if (ret < 0) { 1446 if (ret < 0) {
1046 wl1271_error("failed to send set STA state command"); 1447 wl1271_error("failed to initiate cmd add peer");
1047 goto out_free; 1448 goto out_free;
1048 } 1449 }
1049 1450
@@ -1054,23 +1455,12 @@ out:
1054 return ret; 1455 return ret;
1055} 1456}
1056 1457
1057int wl1271_cmd_start_bss(struct wl1271 *wl) 1458int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
1058{ 1459{
1059 struct wl1271_cmd_bss_start *cmd; 1460 struct wl12xx_cmd_remove_peer *cmd;
1060 struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
1061 int ret; 1461 int ret;
1062 1462
1063 wl1271_debug(DEBUG_CMD, "cmd start bss"); 1463 wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
1064
1065 /*
1066 * FIXME: We currently do not support hidden SSID. The real SSID
1067 * should be fetched from mac80211 first.
1068 */
1069 if (wl->ssid_len == 0) {
1070 wl1271_warning("Hidden SSID currently not supported for AP");
1071 ret = -EINVAL;
1072 goto out;
1073 }
1074 1464
1075 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1465 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1076 if (!cmd) { 1466 if (!cmd) {
@@ -1078,40 +1468,24 @@ int wl1271_cmd_start_bss(struct wl1271 *wl)
1078 goto out; 1468 goto out;
1079 } 1469 }
1080 1470
1081 memcpy(cmd->bssid, bss_conf->bssid, ETH_ALEN); 1471 cmd->hlid = hlid;
1082 1472 /* We never send a deauth, mac80211 is in charge of this */
1083 cmd->aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period); 1473 cmd->reason_opcode = 0;
1084 cmd->bss_index = WL1271_AP_BSS_INDEX; 1474 cmd->send_deauth_flag = 0;
1085 cmd->global_hlid = WL1271_AP_GLOBAL_HLID;
1086 cmd->broadcast_hlid = WL1271_AP_BROADCAST_HLID;
1087 cmd->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
1088 cmd->beacon_interval = cpu_to_le16(wl->beacon_int);
1089 cmd->dtim_interval = bss_conf->dtim_period;
1090 cmd->beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
1091 cmd->channel = wl->channel;
1092 cmd->ssid_len = wl->ssid_len;
1093 cmd->ssid_type = SSID_TYPE_PUBLIC;
1094 memcpy(cmd->ssid, wl->ssid, wl->ssid_len);
1095
1096 switch (wl->band) {
1097 case IEEE80211_BAND_2GHZ:
1098 cmd->band = RADIO_BAND_2_4GHZ;
1099 break;
1100 case IEEE80211_BAND_5GHZ:
1101 cmd->band = RADIO_BAND_5GHZ;
1102 break;
1103 default:
1104 wl1271_warning("bss start - unknown band: %d", (int)wl->band);
1105 cmd->band = RADIO_BAND_2_4GHZ;
1106 break;
1107 }
1108 1475
1109 ret = wl1271_cmd_send(wl, CMD_BSS_START, cmd, sizeof(*cmd), 0); 1476 ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
1110 if (ret < 0) { 1477 if (ret < 0) {
1111 wl1271_error("failed to initiate cmd start bss"); 1478 wl1271_error("failed to initiate cmd remove peer");
1112 goto out_free; 1479 goto out_free;
1113 } 1480 }
1114 1481
1482 /*
1483 * We are ok with a timeout here. The event is sometimes not sent
1484 * due to a firmware bug.
1485 */
1486 wl1271_cmd_wait_for_event_or_timeout(wl,
1487 PEER_REMOVE_COMPLETE_EVENT_ID);
1488
1115out_free: 1489out_free:
1116 kfree(cmd); 1490 kfree(cmd);
1117 1491
@@ -1119,12 +1493,12 @@ out:
1119 return ret; 1493 return ret;
1120} 1494}
1121 1495
1122int wl1271_cmd_stop_bss(struct wl1271 *wl) 1496int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
1123{ 1497{
1124 struct wl1271_cmd_bss_start *cmd; 1498 struct wl12xx_cmd_config_fwlog *cmd;
1125 int ret; 1499 int ret = 0;
1126 1500
1127 wl1271_debug(DEBUG_CMD, "cmd stop bss"); 1501 wl1271_debug(DEBUG_CMD, "cmd config firmware logger");
1128 1502
1129 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1503 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1130 if (!cmd) { 1504 if (!cmd) {
@@ -1132,11 +1506,15 @@ int wl1271_cmd_stop_bss(struct wl1271 *wl)
1132 goto out; 1506 goto out;
1133 } 1507 }
1134 1508
1135 cmd->bss_index = WL1271_AP_BSS_INDEX; 1509 cmd->logger_mode = wl->conf.fwlog.mode;
1510 cmd->log_severity = wl->conf.fwlog.severity;
1511 cmd->timestamp = wl->conf.fwlog.timestamp;
1512 cmd->output = wl->conf.fwlog.output;
1513 cmd->threshold = wl->conf.fwlog.threshold;
1136 1514
1137 ret = wl1271_cmd_send(wl, CMD_BSS_STOP, cmd, sizeof(*cmd), 0); 1515 ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0);
1138 if (ret < 0) { 1516 if (ret < 0) {
1139 wl1271_error("failed to initiate cmd stop bss"); 1517 wl1271_error("failed to send config firmware logger command");
1140 goto out_free; 1518 goto out_free;
1141 } 1519 }
1142 1520
@@ -1147,12 +1525,12 @@ out:
1147 return ret; 1525 return ret;
1148} 1526}
1149 1527
1150int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid) 1528int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
1151{ 1529{
1152 struct wl1271_cmd_add_sta *cmd; 1530 struct wl12xx_cmd_start_fwlog *cmd;
1153 int ret; 1531 int ret = 0;
1154 1532
1155 wl1271_debug(DEBUG_CMD, "cmd add sta %d", (int)hlid); 1533 wl1271_debug(DEBUG_CMD, "cmd start firmware logger");
1156 1534
1157 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1535 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1158 if (!cmd) { 1536 if (!cmd) {
@@ -1160,23 +1538,9 @@ int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
1160 goto out; 1538 goto out;
1161 } 1539 }
1162 1540
1163 /* currently we don't support UAPSD */ 1541 ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0);
1164 cmd->sp_len = 0;
1165
1166 memcpy(cmd->addr, sta->addr, ETH_ALEN);
1167 cmd->bss_index = WL1271_AP_BSS_INDEX;
1168 cmd->aid = sta->aid;
1169 cmd->hlid = hlid;
1170 cmd->wmm = sta->wme ? 1 : 0;
1171
1172 cmd->supported_rates = cpu_to_le32(wl1271_tx_enabled_rates_get(wl,
1173 sta->supp_rates[wl->band]));
1174
1175 wl1271_debug(DEBUG_CMD, "new sta rates: 0x%x", cmd->supported_rates);
1176
1177 ret = wl1271_cmd_send(wl, CMD_ADD_STA, cmd, sizeof(*cmd), 0);
1178 if (ret < 0) { 1542 if (ret < 0) {
1179 wl1271_error("failed to initiate cmd add sta"); 1543 wl1271_error("failed to send start firmware logger command");
1180 goto out_free; 1544 goto out_free;
1181 } 1545 }
1182 1546
@@ -1187,12 +1551,12 @@ out:
1187 return ret; 1551 return ret;
1188} 1552}
1189 1553
1190int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid) 1554int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
1191{ 1555{
1192 struct wl1271_cmd_remove_sta *cmd; 1556 struct wl12xx_cmd_stop_fwlog *cmd;
1193 int ret; 1557 int ret = 0;
1194 1558
1195 wl1271_debug(DEBUG_CMD, "cmd remove sta %d", (int)hlid); 1559 wl1271_debug(DEBUG_CMD, "cmd stop firmware logger");
1196 1560
1197 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1561 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1198 if (!cmd) { 1562 if (!cmd) {
@@ -1200,23 +1564,12 @@ int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid)
1200 goto out; 1564 goto out;
1201 } 1565 }
1202 1566
1203 cmd->hlid = hlid; 1567 ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0);
1204 /* We never send a deauth, mac80211 is in charge of this */
1205 cmd->reason_opcode = 0;
1206 cmd->send_deauth_flag = 0;
1207
1208 ret = wl1271_cmd_send(wl, CMD_REMOVE_STA, cmd, sizeof(*cmd), 0);
1209 if (ret < 0) { 1568 if (ret < 0) {
1210 wl1271_error("failed to initiate cmd remove sta"); 1569 wl1271_error("failed to send stop firmware logger command");
1211 goto out_free; 1570 goto out_free;
1212 } 1571 }
1213 1572
1214 /*
1215 * We are ok with a timeout here. The event is sometimes not sent
1216 * due to a firmware bug.
1217 */
1218 wl1271_cmd_wait_for_event_or_timeout(wl, STA_REMOVE_COMPLETE_EVENT_ID);
1219
1220out_free: 1573out_free:
1221 kfree(cmd); 1574 kfree(cmd);
1222 1575
@@ -1224,12 +1577,15 @@ out:
1224 return ret; 1577 return ret;
1225} 1578}
1226 1579
1227int wl12xx_cmd_config_fwlog(struct wl1271 *wl) 1580static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
1228{ 1581{
1229 struct wl12xx_cmd_config_fwlog *cmd; 1582 struct wl12xx_cmd_roc *cmd;
1230 int ret = 0; 1583 int ret = 0;
1231 1584
1232 wl1271_debug(DEBUG_CMD, "cmd config firmware logger"); 1585 wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
1586
1587 if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
1588 return -EINVAL;
1233 1589
1234 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1590 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1235 if (!cmd) { 1591 if (!cmd) {
@@ -1237,15 +1593,25 @@ int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
1237 goto out; 1593 goto out;
1238 } 1594 }
1239 1595
1240 cmd->logger_mode = wl->conf.fwlog.mode; 1596 cmd->role_id = role_id;
1241 cmd->log_severity = wl->conf.fwlog.severity; 1597 cmd->channel = wl->channel;
1242 cmd->timestamp = wl->conf.fwlog.timestamp; 1598 switch (wl->band) {
1243 cmd->output = wl->conf.fwlog.output; 1599 case IEEE80211_BAND_2GHZ:
1244 cmd->threshold = wl->conf.fwlog.threshold; 1600 cmd->band = RADIO_BAND_2_4GHZ;
1601 break;
1602 case IEEE80211_BAND_5GHZ:
1603 cmd->band = RADIO_BAND_5GHZ;
1604 break;
1605 default:
1606 wl1271_error("roc - unknown band: %d", (int)wl->band);
1607 ret = -EINVAL;
1608 goto out_free;
1609 }
1245 1610
1246 ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0); 1611
1612 ret = wl1271_cmd_send(wl, CMD_REMAIN_ON_CHANNEL, cmd, sizeof(*cmd), 0);
1247 if (ret < 0) { 1613 if (ret < 0) {
1248 wl1271_error("failed to send config firmware logger command"); 1614 wl1271_error("failed to send ROC command");
1249 goto out_free; 1615 goto out_free;
1250 } 1616 }
1251 1617
@@ -1256,22 +1622,24 @@ out:
1256 return ret; 1622 return ret;
1257} 1623}
1258 1624
1259int wl12xx_cmd_start_fwlog(struct wl1271 *wl) 1625static int wl12xx_cmd_croc(struct wl1271 *wl, u8 role_id)
1260{ 1626{
1261 struct wl12xx_cmd_start_fwlog *cmd; 1627 struct wl12xx_cmd_croc *cmd;
1262 int ret = 0; 1628 int ret = 0;
1263 1629
1264 wl1271_debug(DEBUG_CMD, "cmd start firmware logger"); 1630 wl1271_debug(DEBUG_CMD, "cmd croc (%d)", role_id);
1265 1631
1266 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1632 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1267 if (!cmd) { 1633 if (!cmd) {
1268 ret = -ENOMEM; 1634 ret = -ENOMEM;
1269 goto out; 1635 goto out;
1270 } 1636 }
1637 cmd->role_id = role_id;
1271 1638
1272 ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0); 1639 ret = wl1271_cmd_send(wl, CMD_CANCEL_REMAIN_ON_CHANNEL, cmd,
1640 sizeof(*cmd), 0);
1273 if (ret < 0) { 1641 if (ret < 0) {
1274 wl1271_error("failed to send start firmware logger command"); 1642 wl1271_error("failed to send ROC command");
1275 goto out_free; 1643 goto out_free;
1276 } 1644 }
1277 1645
@@ -1282,28 +1650,41 @@ out:
1282 return ret; 1650 return ret;
1283} 1651}
1284 1652
1285int wl12xx_cmd_stop_fwlog(struct wl1271 *wl) 1653int wl12xx_roc(struct wl1271 *wl, u8 role_id)
1286{ 1654{
1287 struct wl12xx_cmd_stop_fwlog *cmd;
1288 int ret = 0; 1655 int ret = 0;
1289 1656
1290 wl1271_debug(DEBUG_CMD, "cmd stop firmware logger"); 1657 if (WARN_ON(test_bit(role_id, wl->roc_map)))
1658 return 0;
1291 1659
1292 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1660 ret = wl12xx_cmd_roc(wl, role_id);
1293 if (!cmd) { 1661 if (ret < 0)
1294 ret = -ENOMEM;
1295 goto out; 1662 goto out;
1296 }
1297 1663
1298 ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0); 1664 ret = wl1271_cmd_wait_for_event(wl,
1665 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
1299 if (ret < 0) { 1666 if (ret < 0) {
1300 wl1271_error("failed to send stop firmware logger command"); 1667 wl1271_error("cmd roc event completion error");
1301 goto out_free; 1668 goto out;
1302 } 1669 }
1303 1670
1304out_free: 1671 __set_bit(role_id, wl->roc_map);
1305 kfree(cmd); 1672out:
1673 return ret;
1674}
1675
1676int wl12xx_croc(struct wl1271 *wl, u8 role_id)
1677{
1678 int ret = 0;
1679
1680 if (WARN_ON(!test_bit(role_id, wl->roc_map)))
1681 return 0;
1682
1683 ret = wl12xx_cmd_croc(wl, role_id);
1684 if (ret < 0)
1685 goto out;
1306 1686
1687 __clear_bit(role_id, wl->roc_map);
1307out: 1688out:
1308 return ret; 1689 return ret;
1309} 1690}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index bba077ecd945..22c2f373dd04 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,7 +36,15 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
36int wl1271_cmd_radio_parms(struct wl1271 *wl); 36int wl1271_cmd_radio_parms(struct wl1271 *wl);
37int wl128x_cmd_radio_parms(struct wl1271 *wl); 37int wl128x_cmd_radio_parms(struct wl1271 *wl);
38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl); 38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
39int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type); 39int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
40int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
41int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
42int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
43int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
44int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
45int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
46int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
47int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
40int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 48int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
41int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 49int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
42int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 50int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -56,20 +64,18 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
56int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr); 64int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
57int wl1271_build_qos_null_data(struct wl1271 *wl); 65int wl1271_build_qos_null_data(struct wl1271 *wl);
58int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); 66int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
59int wl1271_cmd_set_sta_default_wep_key(struct wl1271 *wl, u8 id); 67int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
60int wl1271_cmd_set_ap_default_wep_key(struct wl1271 *wl, u8 id);
61int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 68int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
62 u8 key_size, const u8 *key, const u8 *addr, 69 u8 key_size, const u8 *key, const u8 *addr,
63 u32 tx_seq_32, u16 tx_seq_16); 70 u32 tx_seq_32, u16 tx_seq_16);
64int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 71int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
65 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, 72 u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
66 u16 tx_seq_16); 73 u16 tx_seq_16);
67int wl1271_cmd_disconnect(struct wl1271 *wl); 74int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
68int wl1271_cmd_set_sta_state(struct wl1271 *wl); 75int wl12xx_roc(struct wl1271 *wl, u8 role_id);
69int wl1271_cmd_start_bss(struct wl1271 *wl); 76int wl12xx_croc(struct wl1271 *wl, u8 role_id);
70int wl1271_cmd_stop_bss(struct wl1271 *wl); 77int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
71int wl1271_cmd_add_sta(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid); 78int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
72int wl1271_cmd_remove_sta(struct wl1271 *wl, u8 hlid);
73int wl12xx_cmd_config_fwlog(struct wl1271 *wl); 79int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
74int wl12xx_cmd_start_fwlog(struct wl1271 *wl); 80int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
75int wl12xx_cmd_stop_fwlog(struct wl1271 *wl); 81int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
@@ -83,25 +89,21 @@ enum wl1271_commands {
83 CMD_DISABLE_TX = 6, 89 CMD_DISABLE_TX = 6,
84 CMD_SCAN = 8, 90 CMD_SCAN = 8,
85 CMD_STOP_SCAN = 9, 91 CMD_STOP_SCAN = 9,
86 CMD_START_JOIN = 11,
87 CMD_SET_KEYS = 12, 92 CMD_SET_KEYS = 12,
88 CMD_READ_MEMORY = 13, 93 CMD_READ_MEMORY = 13,
89 CMD_WRITE_MEMORY = 14, 94 CMD_WRITE_MEMORY = 14,
90 CMD_SET_TEMPLATE = 19, 95 CMD_SET_TEMPLATE = 19,
91 CMD_TEST = 23, 96 CMD_TEST = 23,
92 CMD_NOISE_HIST = 28, 97 CMD_NOISE_HIST = 28,
93 CMD_LNA_CONTROL = 32, 98 CMD_QUIET_ELEMENT_SET_STATE = 29,
94 CMD_SET_BCN_MODE = 33, 99 CMD_SET_BCN_MODE = 33,
95 CMD_MEASUREMENT = 34, 100 CMD_MEASUREMENT = 34,
96 CMD_STOP_MEASUREMENT = 35, 101 CMD_STOP_MEASUREMENT = 35,
97 CMD_DISCONNECT = 36,
98 CMD_SET_PS_MODE = 37, 102 CMD_SET_PS_MODE = 37,
99 CMD_CHANNEL_SWITCH = 38, 103 CMD_CHANNEL_SWITCH = 38,
100 CMD_STOP_CHANNEL_SWICTH = 39, 104 CMD_STOP_CHANNEL_SWICTH = 39,
101 CMD_AP_DISCOVERY = 40, 105 CMD_AP_DISCOVERY = 40,
102 CMD_STOP_AP_DISCOVERY = 41, 106 CMD_STOP_AP_DISCOVERY = 41,
103 CMD_SPS_SCAN = 42,
104 CMD_STOP_SPS_SCAN = 43,
105 CMD_HEALTH_CHECK = 45, 107 CMD_HEALTH_CHECK = 45,
106 CMD_DEBUG = 46, 108 CMD_DEBUG = 46,
107 CMD_TRIGGER_SCAN_TO = 47, 109 CMD_TRIGGER_SCAN_TO = 47,
@@ -109,16 +111,30 @@ enum wl1271_commands {
109 CMD_CONNECTION_SCAN_SSID_CFG = 49, 111 CMD_CONNECTION_SCAN_SSID_CFG = 49,
110 CMD_START_PERIODIC_SCAN = 50, 112 CMD_START_PERIODIC_SCAN = 50,
111 CMD_STOP_PERIODIC_SCAN = 51, 113 CMD_STOP_PERIODIC_SCAN = 51,
112 CMD_SET_STA_STATE = 52, 114 CMD_SET_PEER_STATE = 52,
113 CMD_CONFIG_FWLOGGER = 53, 115 CMD_REMAIN_ON_CHANNEL = 53,
114 CMD_START_FWLOGGER = 54, 116 CMD_CANCEL_REMAIN_ON_CHANNEL = 54,
115 CMD_STOP_FWLOGGER = 55,
116 117
117 /* AP mode commands */ 118 CMD_CONFIG_FWLOGGER = 55,
118 CMD_BSS_START = 60, 119 CMD_START_FWLOGGER = 56,
119 CMD_BSS_STOP = 61, 120 CMD_STOP_FWLOGGER = 57,
120 CMD_ADD_STA = 62, 121
121 CMD_REMOVE_STA = 63, 122 /* AP commands */
123 CMD_ADD_PEER = 62,
124 CMD_REMOVE_PEER = 63,
125
126 /* Role API */
127 CMD_ROLE_ENABLE = 70,
128 CMD_ROLE_DISABLE = 71,
129 CMD_ROLE_START = 72,
130 CMD_ROLE_STOP = 73,
131
132 /* WIFI Direct */
133 CMD_WFD_START_DISCOVERY = 80,
134 CMD_WFD_STOP_DISCOVERY = 81,
135 CMD_WFD_ATTRIBUTE_CONFIG = 82,
136
137 CMD_NOP = 100,
122 138
123 NUM_COMMANDS, 139 NUM_COMMANDS,
124 MAX_COMMAND_ID = 0xFFFF, 140 MAX_COMMAND_ID = 0xFFFF,
@@ -147,21 +163,20 @@ enum cmd_templ {
147 CMD_TEMPL_CTS, /* 163 CMD_TEMPL_CTS, /*
148 * For CTS-to-self (FastCTS) mechanism 164 * For CTS-to-self (FastCTS) mechanism
149 * for BT/WLAN coexistence (SoftGemini). */ 165 * for BT/WLAN coexistence (SoftGemini). */
150 CMD_TEMPL_ARP_RSP, 166 CMD_TEMPL_AP_BEACON,
151 CMD_TEMPL_LINK_MEASUREMENT_REPORT,
152
153 /* AP-mode specific */
154 CMD_TEMPL_AP_BEACON = 13,
155 CMD_TEMPL_AP_PROBE_RESPONSE, 167 CMD_TEMPL_AP_PROBE_RESPONSE,
156 CMD_TEMPL_AP_ARP_RSP, 168 CMD_TEMPL_ARP_RSP,
157 CMD_TEMPL_DEAUTH_AP, 169 CMD_TEMPL_DEAUTH_AP,
170 CMD_TEMPL_TEMPORARY,
171 CMD_TEMPL_LINK_MEASUREMENT_REPORT,
158 172
159 CMD_TEMPL_MAX = 0xff 173 CMD_TEMPL_MAX = 0xff
160}; 174};
161 175
162/* unit ms */ 176/* unit ms */
163#define WL1271_COMMAND_TIMEOUT 2000 177#define WL1271_COMMAND_TIMEOUT 2000
164#define WL1271_CMD_TEMPL_MAX_SIZE 252 178#define WL1271_CMD_TEMPL_DFLT_SIZE 252
179#define WL1271_CMD_TEMPL_MAX_SIZE 548
165#define WL1271_EVENT_TIMEOUT 750 180#define WL1271_EVENT_TIMEOUT 750
166 181
167struct wl1271_cmd_header { 182struct wl1271_cmd_header {
@@ -193,6 +208,8 @@ enum {
193 CMD_STATUS_WRONG_NESTING = 19, 208 CMD_STATUS_WRONG_NESTING = 19,
194 CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/ 209 CMD_STATUS_TIMEOUT = 21, /* Driver internal use.*/
195 CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/ 210 CMD_STATUS_FW_RESET = 22, /* Driver internal use.*/
211 CMD_STATUS_TEMPLATE_OOM = 23,
212 CMD_STATUS_NO_RX_BA_SESSION = 24,
196 MAX_COMMAND_STATUS = 0xff 213 MAX_COMMAND_STATUS = 0xff
197}; 214};
198 215
@@ -210,38 +227,114 @@ enum {
210#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 227#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
211#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10 228#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
212 229
213struct wl1271_cmd_join { 230struct wl12xx_cmd_role_enable {
214 struct wl1271_cmd_header header; 231 struct wl1271_cmd_header header;
215 232
216 __le32 bssid_lsb; 233 u8 role_id;
217 __le16 bssid_msb; 234 u8 role_type;
218 __le16 beacon_interval; /* in TBTTs */ 235 u8 mac_address[ETH_ALEN];
219 __le32 rx_config_options; 236} __packed;
220 __le32 rx_filter_options;
221 237
222 /* 238struct wl12xx_cmd_role_disable {
223 * The target uses this field to determine the rate at 239 struct wl1271_cmd_header header;
224 * which to transmit control frame responses (such as 240
225 * ACK or CTS frames). 241 u8 role_id;
226 */ 242 u8 padding[3];
227 __le32 basic_rate_set; 243} __packed;
228 __le32 supported_rate_set; 244
229 u8 dtim_interval; 245enum wl12xx_band {
230 /* 246 WL12XX_BAND_2_4GHZ = 0,
231 * bits 0-2: This bitwise field specifies the type 247 WL12XX_BAND_5GHZ = 1,
232 * of BSS to start or join (BSS_TYPE_*). 248 WL12XX_BAND_JAPAN_4_9_GHZ = 2,
233 * bit 4: Band - The radio band in which to join 249 WL12XX_BAND_DEFAULT = WL12XX_BAND_2_4GHZ,
234 * or start. 250 WL12XX_BAND_INVALID = 0x7E,
235 * 0 - 2.4GHz band 251 WL12XX_BAND_MAX_RADIO = 0x7F,
236 * 1 - 5GHz band 252};
237 * bits 3, 5-7: Reserved 253
238 */ 254struct wl12xx_cmd_role_start {
239 u8 bss_type; 255 struct wl1271_cmd_header header;
256
257 u8 role_id;
258 u8 band;
240 u8 channel; 259 u8 channel;
241 u8 ssid_len; 260 u8 padding;
242 u8 ssid[IEEE80211_MAX_SSID_LEN]; 261
243 u8 ctrl; /* JOIN_CMD_CTRL_* */ 262 union {
244 u8 reserved[3]; 263 struct {
264 u8 hlid;
265 u8 session;
266 u8 padding_1[54];
267 } __packed device;
268 /* sta & p2p_cli use the same struct */
269 struct {
270 u8 bssid[ETH_ALEN];
271 u8 hlid; /* data hlid */
272 u8 session;
273 __le32 remote_rates; /* remote supported rates */
274
275 /*
276 * The target uses this field to determine the rate at
277 * which to transmit control frame responses (such as
278 * ACK or CTS frames).
279 */
280 __le32 basic_rate_set;
281 __le32 local_rates; /* local supported rates */
282
283 u8 ssid_type;
284 u8 ssid_len;
285 u8 ssid[IEEE80211_MAX_SSID_LEN];
286
287 __le16 beacon_interval; /* in TBTTs */
288 } __packed sta;
289 struct {
290 u8 bssid[ETH_ALEN];
291 u8 hlid; /* data hlid */
292 u8 dtim_interval;
293 __le32 remote_rates; /* remote supported rates */
294
295 __le32 basic_rate_set;
296 __le32 local_rates; /* local supported rates */
297
298 u8 ssid_type;
299 u8 ssid_len;
300 u8 ssid[IEEE80211_MAX_SSID_LEN];
301
302 __le16 beacon_interval; /* in TBTTs */
303
304 u8 padding_1[4];
305 } __packed ibss;
306 /* ap & p2p_go use the same struct */
307 struct {
308 __le16 aging_period; /* in secs */
309 u8 beacon_expiry; /* in ms */
310 u8 bss_index;
311 /* The host link id for the AP's global queue */
312 u8 global_hlid;
313 /* The host link id for the AP's broadcast queue */
314 u8 broadcast_hlid;
315
316 __le16 beacon_interval; /* in TBTTs */
317
318 __le32 basic_rate_set;
319 __le32 local_rates; /* local supported rates */
320
321 u8 dtim_interval;
322
323 u8 ssid_type;
324 u8 ssid_len;
325 u8 ssid[IEEE80211_MAX_SSID_LEN];
326
327 u8 padding_1[5];
328 } __packed ap;
329 };
330} __packed;
331
332struct wl12xx_cmd_role_stop {
333 struct wl1271_cmd_header header;
334
335 u8 role_id;
336 u8 disc_type; /* only STA and P2P_CLI */
337 __le16 reason; /* only STA and P2P_CLI */
245} __packed; 338} __packed;
246 339
247struct cmd_enabledisable_path { 340struct cmd_enabledisable_path {
@@ -287,8 +380,9 @@ enum wl1271_cmd_ps_mode {
287struct wl1271_cmd_ps_params { 380struct wl1271_cmd_ps_params {
288 struct wl1271_cmd_header header; 381 struct wl1271_cmd_header header;
289 382
383 u8 role_id;
290 u8 ps_mode; /* STATION_* */ 384 u8 ps_mode; /* STATION_* */
291 u8 padding[3]; 385 u8 padding[2];
292} __packed; 386} __packed;
293 387
294/* HW encryption keys */ 388/* HW encryption keys */
@@ -301,6 +395,12 @@ enum wl1271_cmd_key_action {
301 MAX_KEY_ACTION = 0xffff, 395 MAX_KEY_ACTION = 0xffff,
302}; 396};
303 397
398enum wl1271_cmd_lid_key_type {
399 UNICAST_LID_TYPE = 0,
400 BROADCAST_LID_TYPE = 1,
401 WEP_DEFAULT_LID_TYPE = 2
402};
403
304enum wl1271_cmd_key_type { 404enum wl1271_cmd_key_type {
305 KEY_NONE = 0, 405 KEY_NONE = 0,
306 KEY_WEP = 1, 406 KEY_WEP = 1,
@@ -309,44 +409,7 @@ enum wl1271_cmd_key_type {
309 KEY_GEM = 4, 409 KEY_GEM = 4,
310}; 410};
311 411
312/* FIXME: Add description for key-types */ 412struct wl1271_cmd_set_keys {
313
314struct wl1271_cmd_set_sta_keys {
315 struct wl1271_cmd_header header;
316
317 /* Ignored for default WEP key */
318 u8 addr[ETH_ALEN];
319
320 /* key_action_e */
321 __le16 key_action;
322
323 __le16 reserved_1;
324
325 /* key size in bytes */
326 u8 key_size;
327
328 /* key_type_e */
329 u8 key_type;
330 u8 ssid_profile;
331
332 /*
333 * TKIP, AES: frame's key id field.
334 * For WEP default key: key id;
335 */
336 u8 id;
337 u8 reserved_2[6];
338 u8 key[MAX_KEY_SIZE];
339 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
340 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
341} __packed;
342
343enum wl1271_cmd_lid_key_type {
344 UNICAST_LID_TYPE = 0,
345 BROADCAST_LID_TYPE = 1,
346 WEP_DEFAULT_LID_TYPE = 2
347};
348
349struct wl1271_cmd_set_ap_keys {
350 struct wl1271_cmd_header header; 413 struct wl1271_cmd_header header;
351 414
352 /* 415 /*
@@ -496,69 +559,39 @@ enum wl1271_disconnect_type {
496 DISCONNECT_DISASSOC 559 DISCONNECT_DISASSOC
497}; 560};
498 561
499struct wl1271_cmd_disconnect {
500 struct wl1271_cmd_header header;
501
502 __le32 rx_config_options;
503 __le32 rx_filter_options;
504
505 __le16 reason;
506 u8 type;
507
508 u8 padding;
509} __packed;
510
511#define WL1271_CMD_STA_STATE_CONNECTED 1 562#define WL1271_CMD_STA_STATE_CONNECTED 1
512 563
513struct wl1271_cmd_set_sta_state { 564struct wl12xx_cmd_set_peer_state {
514 struct wl1271_cmd_header header; 565 struct wl1271_cmd_header header;
515 566
567 u8 hlid;
516 u8 state; 568 u8 state;
517 u8 padding[3]; 569 u8 padding[2];
518} __packed; 570} __packed;
519 571
520enum wl1271_ssid_type { 572struct wl12xx_cmd_roc {
521 SSID_TYPE_PUBLIC = 0, 573 struct wl1271_cmd_header header;
522 SSID_TYPE_HIDDEN = 1 574
575 u8 role_id;
576 u8 channel;
577 u8 band;
578 u8 padding;
523}; 579};
524 580
525struct wl1271_cmd_bss_start { 581struct wl12xx_cmd_croc {
526 struct wl1271_cmd_header header; 582 struct wl1271_cmd_header header;
527 583
528 /* wl1271_ssid_type */ 584 u8 role_id;
529 u8 ssid_type; 585 u8 padding[3];
530 u8 ssid_len; 586};
531 u8 ssid[IEEE80211_MAX_SSID_LEN];
532 u8 padding_1[2];
533
534 /* Basic rate set */
535 __le32 basic_rate_set;
536 /* Aging period in seconds*/
537 __le16 aging_period;
538 587
539 /* 588enum wl12xx_ssid_type {
540 * This field specifies the time between target beacon 589 WL12XX_SSID_TYPE_PUBLIC = 0,
541 * transmission times (TBTTs), in time units (TUs). 590 WL12XX_SSID_TYPE_HIDDEN = 1,
542 * Valid values are 1 to 1024. 591 WL12XX_SSID_TYPE_ANY = 2,
543 */ 592};
544 __le16 beacon_interval;
545 u8 bssid[ETH_ALEN];
546 u8 bss_index;
547 /* Radio band */
548 u8 band;
549 u8 channel;
550 /* The host link id for the AP's global queue */
551 u8 global_hlid;
552 /* The host link id for the AP's broadcast queue */
553 u8 broadcast_hlid;
554 /* DTIM count */
555 u8 dtim_interval;
556 /* Beacon expiry time in ms */
557 u8 beacon_expiry;
558 u8 padding_2[3];
559} __packed;
560 593
561struct wl1271_cmd_add_sta { 594struct wl12xx_cmd_add_peer {
562 struct wl1271_cmd_header header; 595 struct wl1271_cmd_header header;
563 596
564 u8 addr[ETH_ALEN]; 597 u8 addr[ETH_ALEN];
@@ -572,7 +605,7 @@ struct wl1271_cmd_add_sta {
572 u8 padding1; 605 u8 padding1;
573} __packed; 606} __packed;
574 607
575struct wl1271_cmd_remove_sta { 608struct wl12xx_cmd_remove_peer {
576 struct wl1271_cmd_header header; 609 struct wl1271_cmd_header header;
577 610
578 u8 hlid; 611 u8 hlid;
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 6080e01d92c6..82f205c43342 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -99,40 +99,75 @@ enum {
99 99
100enum { 100enum {
101 /* 101 /*
102 * PER threshold in PPM of the BT voice 102 * Configure the min and max time BT gains the antenna
103 * in WLAN / BT master basic rate
103 * 104 *
104 * Range: 0 - 10000000 105 * Range: 0 - 255 (ms)
105 */ 106 */
106 CONF_SG_BT_PER_THRESHOLD = 0, 107 CONF_SG_ACL_BT_MASTER_MIN_BR = 0,
108 CONF_SG_ACL_BT_MASTER_MAX_BR,
107 109
108 /* 110 /*
109 * Number of consequent RX_ACTIVE activities to override BT voice 111 * Configure the min and max time BT gains the antenna
110 * frames to ensure WLAN connection 112 * in WLAN / BT slave basic rate
111 * 113 *
112 * Range: 0 - 100 114 * Range: 0 - 255 (ms)
113 */ 115 */
114 CONF_SG_HV3_MAX_OVERRIDE, 116 CONF_SG_ACL_BT_SLAVE_MIN_BR,
117 CONF_SG_ACL_BT_SLAVE_MAX_BR,
115 118
116 /* 119 /*
117 * Defines the PER threshold of the BT voice 120 * Configure the min and max time BT gains the antenna
121 * in WLAN / BT master EDR
118 * 122 *
119 * Range: 0 - 65000 123 * Range: 0 - 255 (ms)
120 */ 124 */
121 CONF_SG_BT_NFS_SAMPLE_INTERVAL, 125 CONF_SG_ACL_BT_MASTER_MIN_EDR,
126 CONF_SG_ACL_BT_MASTER_MAX_EDR,
122 127
123 /* 128 /*
124 * Defines the load ratio of BT 129 * Configure the min and max time BT gains the antenna
130 * in WLAN / BT slave EDR
125 * 131 *
126 * Range: 0 - 100 (%) 132 * Range: 0 - 255 (ms)
127 */ 133 */
128 CONF_SG_BT_LOAD_RATIO, 134 CONF_SG_ACL_BT_SLAVE_MIN_EDR,
135 CONF_SG_ACL_BT_SLAVE_MAX_EDR,
129 136
130 /* 137 /*
131 * Defines whether the SG will force WLAN host to enter/exit PSM 138 * The maximum time WLAN can gain the antenna
139 * in WLAN PSM / BT master/slave BR
132 * 140 *
133 * Range: 1 - SG can force, 0 - host handles PSM 141 * Range: 0 - 255 (ms)
134 */ 142 */
135 CONF_SG_AUTO_PS_MODE, 143 CONF_SG_ACL_WLAN_PS_MASTER_BR,
144 CONF_SG_ACL_WLAN_PS_SLAVE_BR,
145
146 /*
147 * The maximum time WLAN can gain the antenna
148 * in WLAN PSM / BT master/slave EDR
149 *
150 * Range: 0 - 255 (ms)
151 */
152 CONF_SG_ACL_WLAN_PS_MASTER_EDR,
153 CONF_SG_ACL_WLAN_PS_SLAVE_EDR,
154
155 /* TODO: explain these values */
156 CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR,
157 CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR,
158 CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR,
159 CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR,
160 CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR,
161 CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR,
162 CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR,
163 CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR,
164
165 CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR,
166 CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR,
167 CONF_SG_ACL_PASSIVE_SCAN_BT_BR,
168 CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR,
169 CONF_SG_ACL_PASSIVE_SCAN_BT_EDR,
170 CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR,
136 171
137 /* 172 /*
138 * Compensation percentage of probe requests when scan initiated 173 * Compensation percentage of probe requests when scan initiated
@@ -151,102 +186,70 @@ enum {
151 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3, 186 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
152 187
153 /* 188 /*
154 * Defines antenna configuration (single/dual antenna) 189 * Compensation percentage of WLAN active scan window if initiated
155 * 190 * during BT A2DP
156 * Range: 0 - single antenna, 1 - dual antenna
157 */
158 CONF_SG_ANTENNA_CONFIGURATION,
159
160 /*
161 * The threshold (percent) of max consequtive beacon misses before
162 * increasing priority of beacon reception.
163 *
164 * Range: 0 - 100 (%)
165 */
166 CONF_SG_BEACON_MISS_PERCENT,
167
168 /*
169 * The rate threshold below which receiving a data frame from the AP
170 * will increase the priority of the data frame above BT traffic.
171 *
172 * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
173 */
174 CONF_SG_RATE_ADAPT_THRESH,
175
176 /*
177 * Not used currently.
178 * 191 *
179 * Range: 0 192 * Range: 0 - 1000 (%)
180 */ 193 */
181 CONF_SG_RATE_ADAPT_SNR, 194 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
182 195
183 /* 196 /*
184 * Configure the min and max time BT gains the antenna 197 * Compensation percentage of WLAN passive scan window if initiated
185 * in WLAN PSM / BT master basic rate 198 * during BT A2DP BR
186 * 199 *
187 * Range: 0 - 255 (ms) 200 * Range: 0 - 1000 (%)
188 */ 201 */
189 CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR, 202 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR,
190 CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
191 203
192 /* 204 /*
193 * The time after it expires no new WLAN trigger frame is trasmitted 205 * Compensation percentage of WLAN passive scan window if initiated
194 * in WLAN PSM / BT master basic rate 206 * during BT A2DP EDR
195 * 207 *
196 * Range: 0 - 255 (ms) 208 * Range: 0 - 1000 (%)
197 */ 209 */
198 CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR, 210 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR,
199 211
200 /* 212 /*
201 * Configure the min and max time BT gains the antenna 213 * Compensation percentage of WLAN passive scan window if initiated
202 * in WLAN PSM / BT slave basic rate 214 * during BT voice
203 * 215 *
204 * Range: 0 - 255 (ms) 216 * Range: 0 - 1000 (%)
205 */ 217 */
206 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR, 218 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
207 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
208 219
209 /* 220 /* TODO: explain these values */
210 * The time after it expires no new WLAN trigger frame is trasmitted 221 CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN,
211 * in WLAN PSM / BT slave basic rate 222 CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN,
212 * 223 CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN,
213 * Range: 0 - 255 (ms)
214 */
215 CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
216 224
217 /* 225 /*
218 * Configure the min and max time BT gains the antenna 226 * Defines whether the SG will force WLAN host to enter/exit PSM
219 * in WLAN PSM / BT master EDR
220 * 227 *
221 * Range: 0 - 255 (ms) 228 * Range: 1 - SG can force, 0 - host handles PSM
222 */ 229 */
223 CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR, 230 CONF_SG_STA_FORCE_PS_IN_BT_SCO,
224 CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
225 231
226 /* 232 /*
227 * The time after it expires no new WLAN trigger frame is trasmitted 233 * Defines antenna configuration (single/dual antenna)
228 * in WLAN PSM / BT master EDR
229 * 234 *
230 * Range: 0 - 255 (ms) 235 * Range: 0 - single antenna, 1 - dual antenna
231 */ 236 */
232 CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR, 237 CONF_SG_ANTENNA_CONFIGURATION,
233 238
234 /* 239 /*
235 * Configure the min and max time BT gains the antenna 240 * The threshold (percent) of max consecutive beacon misses before
236 * in WLAN PSM / BT slave EDR 241 * increasing priority of beacon reception.
237 * 242 *
238 * Range: 0 - 255 (ms) 243 * Range: 0 - 100 (%)
239 */ 244 */
240 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR, 245 CONF_SG_BEACON_MISS_PERCENT,
241 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
242 246
243 /* 247 /*
244 * The time after it expires no new WLAN trigger frame is trasmitted 248 * Protection time of the DHCP procedure.
245 * in WLAN PSM / BT slave EDR
246 * 249 *
247 * Range: 0 - 255 (ms) 250 * Range: 0 - 100000 (ms)
248 */ 251 */
249 CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR, 252 CONF_SG_DHCP_TIME,
250 253
251 /* 254 /*
252 * RX guard time before the beginning of a new BT voice frame during 255 * RX guard time before the beginning of a new BT voice frame during
@@ -273,166 +276,59 @@ enum {
273 */ 276 */
274 CONF_SG_ADAPTIVE_RXT_TXT, 277 CONF_SG_ADAPTIVE_RXT_TXT,
275 278
276 /* 279 /* TODO: explain this value */
277 * The used WLAN legacy service period during active BT ACL link 280 CONF_SG_GENERAL_USAGE_BIT_MAP,
278 *
279 * Range: 0 - 255 (ms)
280 */
281 CONF_SG_PS_POLL_TIMEOUT,
282
283 /*
284 * The used WLAN UPSD service period during active BT ACL link
285 *
286 * Range: 0 - 255 (ms)
287 */
288 CONF_SG_UPSD_TIMEOUT,
289
290 /*
291 * Configure the min and max time BT gains the antenna
292 * in WLAN Active / BT master EDR
293 *
294 * Range: 0 - 255 (ms)
295 */
296 CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
297 CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
298
299 /*
300 * The maximum time WLAN can gain the antenna for
301 * in WLAN Active / BT master EDR
302 *
303 * Range: 0 - 255 (ms)
304 */
305 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
306
307 /*
308 * Configure the min and max time BT gains the antenna
309 * in WLAN Active / BT slave EDR
310 *
311 * Range: 0 - 255 (ms)
312 */
313 CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
314 CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
315 281
316 /* 282 /*
317 * The maximum time WLAN can gain the antenna for 283 * Number of consecutive BT voice frames not interrupted by WLAN
318 * in WLAN Active / BT slave EDR
319 * 284 *
320 * Range: 0 - 255 (ms) 285 * Range: 0 - 100
321 */ 286 */
322 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR, 287 CONF_SG_HV3_MAX_SERVED,
323 288
324 /* 289 /*
325 * Configure the min and max time BT gains the antenna 290 * The used WLAN legacy service period during active BT ACL link
326 * in WLAN Active / BT basic rate
327 * 291 *
328 * Range: 0 - 255 (ms) 292 * Range: 0 - 255 (ms)
329 */ 293 */
330 CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR, 294 CONF_SG_PS_POLL_TIMEOUT,
331 CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
332 295
333 /* 296 /*
334 * The maximum time WLAN can gain the antenna for 297 * The used WLAN UPSD service period during active BT ACL link
335 * in WLAN Active / BT basic rate
336 * 298 *
337 * Range: 0 - 255 (ms) 299 * Range: 0 - 255 (ms)
338 */ 300 */
339 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR, 301 CONF_SG_UPSD_TIMEOUT,
340
341 /*
342 * Compensation percentage of WLAN passive scan window if initiated
343 * during BT voice
344 *
345 * Range: 0 - 1000 (%)
346 */
347 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
348
349 /*
350 * Compensation percentage of WLAN passive scan window if initiated
351 * during BT A2DP
352 *
353 * Range: 0 - 1000 (%)
354 */
355 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
356
357 /*
358 * Fixed time ensured for BT traffic to gain the antenna during WLAN
359 * passive scan.
360 *
361 * Range: 0 - 1000 ms
362 */
363 CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
364
365 /*
366 * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
367 * passive scan.
368 *
369 * Range: 0 - 1000 ms
370 */
371 CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
372 302
373 /* 303 CONF_SG_CONSECUTIVE_CTS_THRESHOLD,
374 * Number of consequent BT voice frames not interrupted by WLAN 304 CONF_SG_STA_RX_WINDOW_AFTER_DTIM,
375 * 305 CONF_SG_STA_CONNECTION_PROTECTION_TIME,
376 * Range: 0 - 100
377 */
378 CONF_SG_HV3_MAX_SERVED,
379 306
380 /* 307 /* AP params */
381 * Protection time of the DHCP procedure. 308 CONF_AP_BEACON_MISS_TX,
382 * 309 CONF_AP_RX_WINDOW_AFTER_BEACON,
383 * Range: 0 - 100000 (ms) 310 CONF_AP_BEACON_WINDOW_INTERVAL,
384 */ 311 CONF_AP_CONNECTION_PROTECTION_TIME,
385 CONF_SG_DHCP_TIME, 312 CONF_AP_BT_ACL_VAL_BT_SERVE_TIME,
313 CONF_AP_BT_ACL_VAL_WL_SERVE_TIME,
386 314
387 /*
388 * Compensation percentage of WLAN active scan window if initiated
389 * during BT A2DP
390 *
391 * Range: 0 - 1000 (%)
392 */
393 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
394 CONF_SG_TEMP_PARAM_1, 315 CONF_SG_TEMP_PARAM_1,
395 CONF_SG_TEMP_PARAM_2, 316 CONF_SG_TEMP_PARAM_2,
396 CONF_SG_TEMP_PARAM_3, 317 CONF_SG_TEMP_PARAM_3,
397 CONF_SG_TEMP_PARAM_4, 318 CONF_SG_TEMP_PARAM_4,
398 CONF_SG_TEMP_PARAM_5, 319 CONF_SG_TEMP_PARAM_5,
399
400 /*
401 * AP beacon miss
402 *
403 * Range: 0 - 255
404 */
405 CONF_SG_AP_BEACON_MISS_TX,
406
407 /*
408 * AP RX window length
409 *
410 * Range: 0 - 50
411 */
412 CONF_SG_RX_WINDOW_LENGTH,
413
414 /*
415 * AP connection protection time
416 *
417 * Range: 0 - 5000
418 */
419 CONF_SG_AP_CONNECTION_PROTECTION_TIME,
420
421 CONF_SG_TEMP_PARAM_6, 320 CONF_SG_TEMP_PARAM_6,
422 CONF_SG_TEMP_PARAM_7, 321 CONF_SG_TEMP_PARAM_7,
423 CONF_SG_TEMP_PARAM_8, 322 CONF_SG_TEMP_PARAM_8,
424 CONF_SG_TEMP_PARAM_9, 323 CONF_SG_TEMP_PARAM_9,
425 CONF_SG_TEMP_PARAM_10, 324 CONF_SG_TEMP_PARAM_10,
426 325
427 CONF_SG_STA_PARAMS_MAX = CONF_SG_TEMP_PARAM_5 + 1, 326 CONF_SG_PARAMS_MAX,
428 CONF_SG_AP_PARAMS_MAX = CONF_SG_TEMP_PARAM_10 + 1,
429
430 CONF_SG_PARAMS_ALL = 0xff 327 CONF_SG_PARAMS_ALL = 0xff
431}; 328};
432 329
433struct conf_sg_settings { 330struct conf_sg_settings {
434 u32 sta_params[CONF_SG_STA_PARAMS_MAX]; 331 u32 params[CONF_SG_PARAMS_MAX];
435 u32 ap_params[CONF_SG_AP_PARAMS_MAX];
436 u8 state; 332 u8 state;
437}; 333};
438 334
@@ -545,6 +441,11 @@ struct conf_rx_settings {
545 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \ 441 CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
546 CONF_HW_BIT_RATE_54MBPS) 442 CONF_HW_BIT_RATE_54MBPS)
547 443
444#define CONF_TX_MCS_RATES (CONF_HW_BIT_RATE_MCS_0 | \
445 CONF_HW_BIT_RATE_MCS_1 | CONF_HW_BIT_RATE_MCS_2 | \
446 CONF_HW_BIT_RATE_MCS_3 | CONF_HW_BIT_RATE_MCS_4 | \
447 CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
448 CONF_HW_BIT_RATE_MCS_7)
548 449
549/* 450/*
550 * Default rates for management traffic when operating in AP mode. This 451 * Default rates for management traffic when operating in AP mode. This
@@ -661,6 +562,9 @@ struct conf_tx_ac_category {
661 562
662#define CONF_TX_MAX_TID_COUNT 8 563#define CONF_TX_MAX_TID_COUNT 8
663 564
565/* Allow TX BA on all TIDs but 6,7. These are currently reserved in the FW */
566#define CONF_TX_BA_ENABLED_TID_BITMAP 0x3F
567
664enum { 568enum {
665 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/ 569 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
666 CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/ 570 CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/
@@ -913,7 +817,7 @@ struct conf_conn_settings {
913 struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT]; 817 struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
914 818
915 /* 819 /*
916 * The number of consequtive beacons to lose, before the firmware 820 * The number of consecutive beacons to lose, before the firmware
917 * becomes out of synch. 821 * becomes out of synch.
918 * 822 *
919 * Range: u32 823 * Range: u32
@@ -951,7 +855,7 @@ struct conf_conn_settings {
951 u8 rx_broadcast_in_ps; 855 u8 rx_broadcast_in_ps;
952 856
953 /* 857 /*
954 * Consequtive PS Poll failures before sending event to driver 858 * Consecutive PS Poll failures before sending event to driver
955 * 859 *
956 * Range: u8 860 * Range: u8
957 */ 861 */
@@ -1199,8 +1103,12 @@ struct conf_rf_settings {
1199}; 1103};
1200 1104
1201struct conf_ht_setting { 1105struct conf_ht_setting {
1202 u16 tx_ba_win_size; 1106 u8 rx_ba_win_size;
1107 u8 tx_ba_win_size;
1203 u16 inactivity_timeout; 1108 u16 inactivity_timeout;
1109
1110 /* bitmap of enabled TIDs for TX BA sessions */
1111 u8 tx_ba_tid_bitmap;
1204}; 1112};
1205 1113
1206struct conf_memory_settings { 1114struct conf_memory_settings {
@@ -1309,6 +1217,25 @@ struct conf_fwlog {
1309 u8 threshold; 1217 u8 threshold;
1310}; 1218};
1311 1219
1220#define ACX_RATE_MGMT_NUM_OF_RATES 13
1221struct conf_rate_policy_settings {
1222 u16 rate_retry_score;
1223 u16 per_add;
1224 u16 per_th1;
1225 u16 per_th2;
1226 u16 max_per;
1227 u8 inverse_curiosity_factor;
1228 u8 tx_fail_low_th;
1229 u8 tx_fail_high_th;
1230 u8 per_alpha_shift;
1231 u8 per_add_shift;
1232 u8 per_beta1_shift;
1233 u8 per_beta2_shift;
1234 u8 rate_check_up;
1235 u8 rate_check_down;
1236 u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
1237};
1238
1312struct conf_drv_settings { 1239struct conf_drv_settings {
1313 struct conf_sg_settings sg; 1240 struct conf_sg_settings sg;
1314 struct conf_rx_settings rx; 1241 struct conf_rx_settings rx;
@@ -1326,6 +1253,7 @@ struct conf_drv_settings {
1326 struct conf_fm_coex fm_coex; 1253 struct conf_fm_coex fm_coex;
1327 struct conf_rx_streaming_settings rx_streaming; 1254 struct conf_rx_streaming_settings rx_streaming;
1328 struct conf_fwlog fwlog; 1255 struct conf_fwlog fwlog;
1256 struct conf_rate_policy_settings rate;
1329 u8 hci_io_ds; 1257 u8 hci_io_ds;
1330}; 1258};
1331 1259
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 37934b5601cd..d59354f53702 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -339,10 +339,11 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
339#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x") 339#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
340 340
341 DRIVER_STATE_PRINT_INT(tx_blocks_available); 341 DRIVER_STATE_PRINT_INT(tx_blocks_available);
342 DRIVER_STATE_PRINT_INT(tx_allocated_blocks[0]); 342 DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
343 DRIVER_STATE_PRINT_INT(tx_allocated_blocks[1]); 343 DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
344 DRIVER_STATE_PRINT_INT(tx_allocated_blocks[2]); 344 DRIVER_STATE_PRINT_INT(tx_allocated_pkts[1]);
345 DRIVER_STATE_PRINT_INT(tx_allocated_blocks[3]); 345 DRIVER_STATE_PRINT_INT(tx_allocated_pkts[2]);
346 DRIVER_STATE_PRINT_INT(tx_allocated_pkts[3]);
346 DRIVER_STATE_PRINT_INT(tx_frames_cnt); 347 DRIVER_STATE_PRINT_INT(tx_frames_cnt);
347 DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]); 348 DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
348 DRIVER_STATE_PRINT_INT(tx_queue_count[0]); 349 DRIVER_STATE_PRINT_INT(tx_queue_count[0]);
@@ -352,10 +353,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
352 DRIVER_STATE_PRINT_INT(tx_packets_count); 353 DRIVER_STATE_PRINT_INT(tx_packets_count);
353 DRIVER_STATE_PRINT_INT(tx_results_count); 354 DRIVER_STATE_PRINT_INT(tx_results_count);
354 DRIVER_STATE_PRINT_LHEX(flags); 355 DRIVER_STATE_PRINT_LHEX(flags);
355 DRIVER_STATE_PRINT_INT(tx_blocks_freed[0]); 356 DRIVER_STATE_PRINT_INT(tx_blocks_freed);
356 DRIVER_STATE_PRINT_INT(tx_blocks_freed[1]);
357 DRIVER_STATE_PRINT_INT(tx_blocks_freed[2]);
358 DRIVER_STATE_PRINT_INT(tx_blocks_freed[3]);
359 DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb); 357 DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
360 DRIVER_STATE_PRINT_INT(rx_counter); 358 DRIVER_STATE_PRINT_INT(rx_counter);
361 DRIVER_STATE_PRINT_INT(session_counter); 359 DRIVER_STATE_PRINT_INT(session_counter);
@@ -369,9 +367,6 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
369 DRIVER_STATE_PRINT_INT(beacon_int); 367 DRIVER_STATE_PRINT_INT(beacon_int);
370 DRIVER_STATE_PRINT_INT(psm_entry_retry); 368 DRIVER_STATE_PRINT_INT(psm_entry_retry);
371 DRIVER_STATE_PRINT_INT(ps_poll_failures); 369 DRIVER_STATE_PRINT_INT(ps_poll_failures);
372 DRIVER_STATE_PRINT_HEX(filters);
373 DRIVER_STATE_PRINT_HEX(rx_config);
374 DRIVER_STATE_PRINT_HEX(rx_filter);
375 DRIVER_STATE_PRINT_INT(power_level); 370 DRIVER_STATE_PRINT_INT(power_level);
376 DRIVER_STATE_PRINT_INT(rssi_thold); 371 DRIVER_STATE_PRINT_INT(rssi_thold);
377 DRIVER_STATE_PRINT_INT(last_rssi_event); 372 DRIVER_STATE_PRINT_INT(last_rssi_event);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 304aaa2ee011..0bd7b020a420 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -285,13 +285,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
285 285
286 if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) && !is_ap) { 286 if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) && !is_ap) {
287 wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. " 287 wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
288 "ba_allowed = 0x%x", mbox->ba_allowed); 288 "ba_allowed = 0x%x", mbox->rx_ba_allowed);
289 289
290 if (wl->vif) 290 if (wl->vif)
291 wl1271_stop_ba_event(wl, mbox->ba_allowed); 291 wl1271_stop_ba_event(wl, mbox->rx_ba_allowed);
292 } 292 }
293 293
294 if ((vector & DUMMY_PACKET_EVENT_ID) && !is_ap) { 294 if ((vector & DUMMY_PACKET_EVENT_ID)) {
295 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 295 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
296 if (wl->vif) 296 if (wl->vif)
297 wl1271_tx_dummy_packet(wl); 297 wl1271_tx_dummy_packet(wl);
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index e524ad6fe4e3..49c1a0ede5b1 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -49,32 +49,27 @@ enum {
49 MEASUREMENT_START_EVENT_ID = BIT(8), 49 MEASUREMENT_START_EVENT_ID = BIT(8),
50 MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), 50 MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
51 SCAN_COMPLETE_EVENT_ID = BIT(10), 51 SCAN_COMPLETE_EVENT_ID = BIT(10),
52 SCHEDULED_SCAN_COMPLETE_EVENT_ID = BIT(11), 52 WFD_DISCOVERY_COMPLETE_EVENT_ID = BIT(11),
53 AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12), 53 AP_DISCOVERY_COMPLETE_EVENT_ID = BIT(12),
54 PS_REPORT_EVENT_ID = BIT(13), 54 PS_REPORT_EVENT_ID = BIT(13),
55 PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14), 55 PSPOLL_DELIVERY_FAILURE_EVENT_ID = BIT(14),
56 DISCONNECT_EVENT_COMPLETE_ID = BIT(15), 56 DISCONNECT_EVENT_COMPLETE_ID = BIT(15),
57 JOIN_EVENT_COMPLETE_ID = BIT(16), 57 /* BIT(16) is reserved */
58 CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17), 58 CHANNEL_SWITCH_COMPLETE_EVENT_ID = BIT(17),
59 BSS_LOSE_EVENT_ID = BIT(18), 59 BSS_LOSE_EVENT_ID = BIT(18),
60 REGAINED_BSS_EVENT_ID = BIT(19), 60 REGAINED_BSS_EVENT_ID = BIT(19),
61 MAX_TX_RETRY_EVENT_ID = BIT(20), 61 MAX_TX_RETRY_EVENT_ID = BIT(20),
62 /* STA: dummy paket for dynamic mem blocks */ 62 DUMMY_PACKET_EVENT_ID = BIT(21),
63 DUMMY_PACKET_EVENT_ID = BIT(21),
64 /* AP: STA remove complete */
65 STA_REMOVE_COMPLETE_EVENT_ID = BIT(21),
66 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22), 63 SOFT_GEMINI_SENSE_EVENT_ID = BIT(22),
67 /* STA: SG prediction */ 64 CHANGE_AUTO_MODE_TIMEOUT_EVENT_ID = BIT(23),
68 SOFT_GEMINI_PREDICTION_EVENT_ID = BIT(23),
69 /* AP: Inactive STA */
70 INACTIVE_STA_EVENT_ID = BIT(23),
71 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24), 65 SOFT_GEMINI_AVALANCHE_EVENT_ID = BIT(24),
72 PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25), 66 PLT_RX_CALIBRATION_COMPLETE_EVENT_ID = BIT(25),
73 DBG_EVENT_ID = BIT(26), 67 INACTIVE_STA_EVENT_ID = BIT(26),
74 HEALTH_CHECK_REPLY_EVENT_ID = BIT(27), 68 PEER_REMOVE_COMPLETE_EVENT_ID = BIT(27),
75 PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28), 69 PERIODIC_SCAN_COMPLETE_EVENT_ID = BIT(28),
76 PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29), 70 PERIODIC_SCAN_REPORT_EVENT_ID = BIT(29),
77 BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30), 71 BA_SESSION_RX_CONSTRAINT_EVENT_ID = BIT(30),
72 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(31),
78 EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, 73 EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
79}; 74};
80 75
@@ -83,15 +78,6 @@ enum {
83 EVENT_ENTER_POWER_SAVE_SUCCESS, 78 EVENT_ENTER_POWER_SAVE_SUCCESS,
84}; 79};
85 80
86struct event_debug_report {
87 u8 debug_event_id;
88 u8 num_params;
89 __le16 pad;
90 __le32 report_1;
91 __le32 report_2;
92 __le32 report_3;
93} __packed;
94
95#define NUM_OF_RSSI_SNR_TRIGGERS 8 81#define NUM_OF_RSSI_SNR_TRIGGERS 8
96 82
97struct event_mailbox { 83struct event_mailbox {
@@ -100,49 +86,45 @@ struct event_mailbox {
100 __le32 reserved_1; 86 __le32 reserved_1;
101 __le32 reserved_2; 87 __le32 reserved_2;
102 88
103 u8 dbg_event_id;
104 u8 num_relevant_params;
105 __le16 reserved_3;
106 __le32 event_report_p1;
107 __le32 event_report_p2;
108 __le32 event_report_p3;
109
110 u8 number_of_scan_results; 89 u8 number_of_scan_results;
111 u8 scan_tag; 90 u8 scan_tag;
112 u8 reserved_4[2]; 91 u8 completed_scan_status;
113 __le32 compl_scheduled_scan_status; 92 u8 reserved_3;
114 93
115 __le16 scheduled_scan_attended_channels;
116 u8 soft_gemini_sense_info; 94 u8 soft_gemini_sense_info;
117 u8 soft_gemini_protective_info; 95 u8 soft_gemini_protective_info;
118 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; 96 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
119 u8 channel_switch_status; 97 u8 channel_switch_status;
120 u8 scheduled_scan_status; 98 u8 scheduled_scan_status;
121 u8 ps_status; 99 u8 ps_status;
100 /* tuned channel (roc) */
101 u8 roc_channel;
122 102
123 /* AP FW only */ 103 __le16 hlid_removed_bitmap;
124 u8 hlid_removed;
125 104
126 /* a bitmap of hlids for stations that have been inactive too long */ 105 /* bitmap of aged stations (by HLID) */
127 __le16 sta_aging_status; 106 __le16 sta_aging_status;
128 107
129 /* a bitmap of hlids for stations which didn't respond to TX */ 108 /* bitmap of stations (by HLID) which exceeded max tx retries */
130 __le16 sta_tx_retry_exceeded; 109 __le16 sta_tx_retry_exceeded;
131 110
132 /* 111 /* discovery completed results */
133 * Bitmap, Each bit set represents the Role ID for which this constraint 112 u8 discovery_tag;
134 * is set. Range: 0 - FF, FF means ANY role 113 u8 number_of_preq_results;
135 */ 114 u8 number_of_prsp_results;
136 u8 ba_role_id; 115 u8 reserved_5;
137 /* 116
138 * Bitmap, Each bit set represents the Link ID for which this constraint 117 /* rx ba constraint */
139 * is set. Not applicable if ba_role_id is set to ANY role (FF). 118 u8 role_id; /* 0xFF means any role. */
140 * Range: 0 - FFFF, FFFF means ANY link in that role 119 u8 rx_ba_allowed;
141 */ 120 u8 reserved_6[2];
142 u8 ba_link_id; 121
143 u8 ba_allowed; 122 u8 ps_poll_delivery_failure_role_ids;
144 123 u8 stopped_role_ids;
145 u8 reserved_5[21]; 124 u8 started_role_ids;
125 u8 change_auto_mode_timeout;
126
127 u8 reserved_7[12];
146} __packed; 128} __packed;
147 129
148int wl1271_event_unmask(struct wl1271 *wl); 130int wl1271_event_unmask(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index c3e9a2e4410e..b13bebea95e0 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -39,13 +39,13 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
39 39
40 /* send empty templates for fw memory reservation */ 40 /* send empty templates for fw memory reservation */
41 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, 41 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
42 WL1271_CMD_TEMPL_MAX_SIZE, 42 WL1271_CMD_TEMPL_DFLT_SIZE,
43 0, WL1271_RATE_AUTOMATIC); 43 0, WL1271_RATE_AUTOMATIC);
44 if (ret < 0) 44 if (ret < 0)
45 return ret; 45 return ret;
46 46
47 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 47 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
48 NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0, 48 NULL, WL1271_CMD_TEMPL_DFLT_SIZE, 0,
49 WL1271_RATE_AUTOMATIC); 49 WL1271_RATE_AUTOMATIC);
50 if (ret < 0) 50 if (ret < 0)
51 return ret; 51 return ret;
@@ -70,15 +70,13 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
70 return ret; 70 return ret;
71 71
72 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL, 72 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
73 sizeof 73 WL1271_CMD_TEMPL_DFLT_SIZE,
74 (struct wl12xx_probe_resp_template),
75 0, WL1271_RATE_AUTOMATIC); 74 0, WL1271_RATE_AUTOMATIC);
76 if (ret < 0) 75 if (ret < 0)
77 return ret; 76 return ret;
78 77
79 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL, 78 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
80 sizeof 79 WL1271_CMD_TEMPL_DFLT_SIZE,
81 (struct wl12xx_beacon_template),
82 0, WL1271_RATE_AUTOMATIC); 80 0, WL1271_RATE_AUTOMATIC);
83 if (ret < 0) 81 if (ret < 0)
84 return ret; 82 return ret;
@@ -92,7 +90,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
92 90
93 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 91 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
94 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL, 92 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
95 WL1271_CMD_TEMPL_MAX_SIZE, i, 93 WL1271_CMD_TEMPL_DFLT_SIZE, i,
96 WL1271_RATE_AUTOMATIC); 94 WL1271_RATE_AUTOMATIC);
97 if (ret < 0) 95 if (ret < 0)
98 return ret; 96 return ret;
@@ -191,15 +189,13 @@ static int wl1271_ap_init_templates_config(struct wl1271 *wl)
191 * reserve memory for later. 189 * reserve memory for later.
192 */ 190 */
193 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL, 191 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
194 sizeof 192 WL1271_CMD_TEMPL_MAX_SIZE,
195 (struct wl12xx_probe_resp_template),
196 0, WL1271_RATE_AUTOMATIC); 193 0, WL1271_RATE_AUTOMATIC);
197 if (ret < 0) 194 if (ret < 0)
198 return ret; 195 return ret;
199 196
200 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL, 197 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
201 sizeof 198 WL1271_CMD_TEMPL_MAX_SIZE,
202 (struct wl12xx_beacon_template),
203 0, WL1271_RATE_AUTOMATIC); 199 0, WL1271_RATE_AUTOMATIC);
204 if (ret < 0) 200 if (ret < 0)
205 return ret; 201 return ret;
@@ -227,7 +223,7 @@ static int wl1271_ap_init_templates_config(struct wl1271 *wl)
227 return 0; 223 return 0;
228} 224}
229 225
230static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter) 226static int wl12xx_init_rx_config(struct wl1271 *wl)
231{ 227{
232 int ret; 228 int ret;
233 229
@@ -235,10 +231,6 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
235 if (ret < 0) 231 if (ret < 0)
236 return ret; 232 return ret;
237 233
238 ret = wl1271_acx_rx_config(wl, config, filter);
239 if (ret < 0)
240 return ret;
241
242 return 0; 234 return 0;
243} 235}
244 236
@@ -285,10 +277,7 @@ int wl1271_init_pta(struct wl1271 *wl)
285{ 277{
286 int ret; 278 int ret;
287 279
288 if (wl->bss_type == BSS_TYPE_AP_BSS) 280 ret = wl12xx_acx_sg_cfg(wl);
289 ret = wl1271_acx_ap_sg_cfg(wl);
290 else
291 ret = wl1271_acx_sta_sg_cfg(wl);
292 if (ret < 0) 281 if (ret < 0)
293 return ret; 282 return ret;
294 283
@@ -392,7 +381,7 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
392 if (ret < 0) 381 if (ret < 0)
393 return ret; 382 return ret;
394 383
395 ret = wl1271_acx_sta_mem_cfg(wl); 384 ret = wl12xx_acx_mem_cfg(wl);
396 if (ret < 0) 385 if (ret < 0)
397 return ret; 386 return ret;
398 387
@@ -408,12 +397,6 @@ static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
408{ 397{
409 int ret, i; 398 int ret, i;
410 399
411 ret = wl1271_cmd_set_sta_default_wep_key(wl, wl->default_key);
412 if (ret < 0) {
413 wl1271_warning("couldn't set default key");
414 return ret;
415 }
416
417 /* disable all keep-alive templates */ 400 /* disable all keep-alive templates */
418 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 401 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
419 ret = wl1271_acx_keep_alive_config(wl, i, 402 ret = wl1271_acx_keep_alive_config(wl, i,
@@ -451,7 +434,7 @@ static int wl1271_ap_hw_init(struct wl1271 *wl)
451 if (ret < 0) 434 if (ret < 0)
452 return ret; 435 return ret;
453 436
454 ret = wl1271_acx_ap_mem_cfg(wl); 437 ret = wl12xx_acx_mem_cfg(wl);
455 if (ret < 0) 438 if (ret < 0)
456 return ret; 439 return ret;
457 440
@@ -483,7 +466,7 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
483 * when operating as AP we want to receive external beacons for 466 * when operating as AP we want to receive external beacons for
484 * configuring ERP protection. 467 * configuring ERP protection.
485 */ 468 */
486 ret = wl1271_acx_set_ap_beacon_filter(wl, false); 469 ret = wl1271_acx_beacon_filter_opt(wl, false);
487 if (ret < 0) 470 if (ret < 0)
488 return ret; 471 return ret;
489 472
@@ -532,6 +515,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
532 else 515 else
533 supported_rates = CONF_TX_AP_ENABLED_RATES; 516 supported_rates = CONF_TX_AP_ENABLED_RATES;
534 517
518 /* unconditionally enable HT rates */
519 supported_rates |= CONF_TX_MCS_RATES;
520
535 /* configure unicast TX rate classes */ 521 /* configure unicast TX rate classes */
536 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) { 522 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
537 rc.enabled_rates = supported_rates; 523 rc.enabled_rates = supported_rates;
@@ -546,41 +532,24 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
546 return 0; 532 return 0;
547} 533}
548 534
549static void wl1271_check_ba_support(struct wl1271 *wl)
550{
551 /* validate FW cose ver x.x.x.50-60.x */
552 if ((wl->chip.fw_ver[3] >= WL12XX_BA_SUPPORT_FW_COST_VER2_START) &&
553 (wl->chip.fw_ver[3] < WL12XX_BA_SUPPORT_FW_COST_VER2_END)) {
554 wl->ba_support = true;
555 return;
556 }
557
558 wl->ba_support = false;
559}
560
561static int wl1271_set_ba_policies(struct wl1271 *wl) 535static int wl1271_set_ba_policies(struct wl1271 *wl)
562{ 536{
563 u8 tid_index;
564 int ret = 0;
565
566 /* Reset the BA RX indicators */ 537 /* Reset the BA RX indicators */
567 wl->ba_rx_bitmap = 0; 538 wl->ba_rx_bitmap = 0;
568 wl->ba_allowed = true; 539 wl->ba_allowed = true;
540 wl->ba_rx_session_count = 0;
569 541
570 /* validate that FW support BA */ 542 /* BA is supported in STA/AP modes */
571 wl1271_check_ba_support(wl); 543 if (wl->bss_type != BSS_TYPE_AP_BSS &&
544 wl->bss_type != BSS_TYPE_STA_BSS) {
545 wl->ba_support = false;
546 return 0;
547 }
572 548
573 if (wl->ba_support) 549 wl->ba_support = true;
574 /* 802.11n initiator BA session setting */
575 for (tid_index = 0; tid_index < CONF_TX_MAX_TID_COUNT;
576 ++tid_index) {
577 ret = wl1271_acx_set_ba_session(wl, WLAN_BACK_INITIATOR,
578 tid_index, true);
579 if (ret < 0)
580 break;
581 }
582 550
583 return ret; 551 /* 802.11n initiator BA session setting */
552 return wl12xx_acx_set_ba_initiator_policy(wl);
584} 553}
585 554
586int wl1271_chip_specific_init(struct wl1271 *wl) 555int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -650,11 +619,7 @@ int wl1271_hw_init(struct wl1271 *wl)
650 return ret; 619 return ret;
651 620
652 /* RX config */ 621 /* RX config */
653 ret = wl1271_init_rx_config(wl, 622 ret = wl12xx_init_rx_config(wl);
654 RX_CFG_PROMISCUOUS | RX_CFG_TSF,
655 RX_FILTER_OPTION_DEF);
656 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
657 RX_FILTER_OPTION_FILTER_ALL); */
658 if (ret < 0) 623 if (ret < 0)
659 goto out_free_memmap; 624 goto out_free_memmap;
660 625
@@ -733,6 +698,10 @@ int wl1271_hw_init(struct wl1271 *wl)
733 if (ret < 0) 698 if (ret < 0)
734 goto out_free_memmap; 699 goto out_free_memmap;
735 700
701 ret = wl12xx_acx_set_rate_mgmt_params(wl);
702 if (ret < 0)
703 goto out_free_memmap;
704
736 /* Configure initiator BA sessions policies */ 705 /* Configure initiator BA sessions policies */
737 ret = wl1271_set_ba_policies(wl); 706 ret = wl1271_set_ba_policies(wl);
738 if (ret < 0) 707 if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index a2fe4f506ada..e839341dfafe 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -186,6 +186,5 @@ int wl1271_free_hw(struct wl1271 *wl);
186irqreturn_t wl1271_irq(int irq, void *data); 186irqreturn_t wl1271_irq(int irq, void *data);
187bool wl1271_set_block_size(struct wl1271 *wl); 187bool wl1271_set_block_size(struct wl1271 *wl);
188int wl1271_tx_dummy_packet(struct wl1271 *wl); 188int wl1271_tx_dummy_packet(struct wl1271 *wl);
189void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters);
190 189
191#endif 190#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 3418299e17c8..82f4408e89ad 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -52,110 +52,67 @@
52 52
53static struct conf_drv_settings default_conf = { 53static struct conf_drv_settings default_conf = {
54 .sg = { 54 .sg = {
55 .sta_params = { 55 .params = {
56 [CONF_SG_BT_PER_THRESHOLD] = 7500, 56 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
57 [CONF_SG_HV3_MAX_OVERRIDE] = 0, 57 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
58 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400, 58 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
59 [CONF_SG_BT_LOAD_RATIO] = 200, 59 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
60 [CONF_SG_AUTO_PS_MODE] = 1, 60 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
61 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, 61 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
62 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, 62 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
63 [CONF_SG_ANTENNA_CONFIGURATION] = 0, 63 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
64 [CONF_SG_BEACON_MISS_PERCENT] = 60, 64 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
65 [CONF_SG_RATE_ADAPT_THRESH] = 12, 65 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
66 [CONF_SG_RATE_ADAPT_SNR] = 0, 66 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
67 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10, 67 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
68 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30, 68 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
69 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8, 69 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
70 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20, 70 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
71 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50, 71 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
72 /* Note: with UPSD, this should be 4 */ 72 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
73 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8, 73 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
74 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7, 74 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
75 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25, 75 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
76 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20, 76 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
77 /* Note: with UPDS, this should be 15 */ 77 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
78 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8, 78 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
79 /* Note: with UPDS, this should be 50 */ 79 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
80 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40, 80 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
81 /* Note: with UPDS, this should be 10 */ 81 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
82 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20, 82 /* active scan params */
83 [CONF_SG_RXT] = 1200, 83 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
84 [CONF_SG_TXT] = 1000, 84 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
85 [CONF_SG_ADAPTIVE_RXT_TXT] = 1, 85 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
86 [CONF_SG_PS_POLL_TIMEOUT] = 10, 86 /* passive scan params */
87 [CONF_SG_UPSD_TIMEOUT] = 10, 87 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
88 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7, 88 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
89 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15, 89 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
90 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15, 90 /* passive scan in dual antenna params */
91 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8, 91 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
92 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20, 92 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
93 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15, 93 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
94 [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20, 94 /* general params */
95 [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50, 95 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
96 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10, 96 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
97 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, 97 [CONF_SG_BEACON_MISS_PERCENT] = 60,
98 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800, 98 [CONF_SG_DHCP_TIME] = 5000,
99 [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75, 99 [CONF_SG_RXT] = 1200,
100 [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15, 100 [CONF_SG_TXT] = 1000,
101 [CONF_SG_HV3_MAX_SERVED] = 6, 101 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
102 [CONF_SG_DHCP_TIME] = 5000, 102 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
103 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, 103 [CONF_SG_HV3_MAX_SERVED] = 6,
104 }, 104 [CONF_SG_PS_POLL_TIMEOUT] = 10,
105 .ap_params = { 105 [CONF_SG_UPSD_TIMEOUT] = 10,
106 [CONF_SG_BT_PER_THRESHOLD] = 7500, 106 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
107 [CONF_SG_HV3_MAX_OVERRIDE] = 0, 107 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
108 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400, 108 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
109 [CONF_SG_BT_LOAD_RATIO] = 50, 109 /* AP params */
110 [CONF_SG_AUTO_PS_MODE] = 1, 110 [CONF_AP_BEACON_MISS_TX] = 3,
111 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, 111 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
112 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, 112 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
113 [CONF_SG_ANTENNA_CONFIGURATION] = 0, 113 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
114 [CONF_SG_BEACON_MISS_PERCENT] = 60, 114 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
115 [CONF_SG_RATE_ADAPT_THRESH] = 64, 115 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
116 [CONF_SG_RATE_ADAPT_SNR] = 1,
117 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
118 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 25,
119 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 25,
120 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
121 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 25,
122 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 25,
123 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
124 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
125 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 25,
126 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
127 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 25,
128 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 25,
129 [CONF_SG_RXT] = 1200,
130 [CONF_SG_TXT] = 1000,
131 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
132 [CONF_SG_PS_POLL_TIMEOUT] = 10,
133 [CONF_SG_UPSD_TIMEOUT] = 10,
134 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
135 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
136 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
137 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
138 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
139 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
140 [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
141 [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
142 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
143 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
144 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
145 [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
146 [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
147 [CONF_SG_HV3_MAX_SERVED] = 6,
148 [CONF_SG_DHCP_TIME] = 5000,
149 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
150 [CONF_SG_TEMP_PARAM_1] = 0,
151 [CONF_SG_TEMP_PARAM_2] = 0,
152 [CONF_SG_TEMP_PARAM_3] = 0,
153 [CONF_SG_TEMP_PARAM_4] = 0,
154 [CONF_SG_TEMP_PARAM_5] = 0,
155 [CONF_SG_AP_BEACON_MISS_TX] = 3,
156 [CONF_SG_RX_WINDOW_LENGTH] = 6,
157 [CONF_SG_AP_CONNECTION_PROTECTION_TIME] = 50,
158 [CONF_SG_TEMP_PARAM_6] = 1,
159 }, 116 },
160 .state = CONF_SG_PROTECTIVE, 117 .state = CONF_SG_PROTECTIVE,
161 }, 118 },
@@ -329,8 +286,10 @@ static struct conf_drv_settings default_conf = {
329 }, 286 },
330 }, 287 },
331 .ht = { 288 .ht = {
289 .rx_ba_win_size = 8,
332 .tx_ba_win_size = 64, 290 .tx_ba_win_size = 64,
333 .inactivity_timeout = 10000, 291 .inactivity_timeout = 10000,
292 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
334 }, 293 },
335 .mem_wl127x = { 294 .mem_wl127x = {
336 .num_stations = 1, 295 .num_stations = 1,
@@ -379,6 +338,27 @@ static struct conf_drv_settings default_conf = {
379 .threshold = 0, 338 .threshold = 0,
380 }, 339 },
381 .hci_io_ds = HCI_IO_DS_6MA, 340 .hci_io_ds = HCI_IO_DS_6MA,
341 .rate = {
342 .rate_retry_score = 32000,
343 .per_add = 8192,
344 .per_th1 = 2048,
345 .per_th2 = 4096,
346 .max_per = 8100,
347 .inverse_curiosity_factor = 5,
348 .tx_fail_low_th = 4,
349 .tx_fail_high_th = 10,
350 .per_alpha_shift = 4,
351 .per_add_shift = 13,
352 .per_beta1_shift = 10,
353 .per_beta2_shift = 8,
354 .rate_check_up = 2,
355 .rate_check_down = 12,
356 .rate_retry_policy = {
357 0x00, 0x00, 0x00, 0x00, 0x00,
358 0x00, 0x00, 0x00, 0x00, 0x00,
359 0x00, 0x00, 0x00,
360 },
361 },
382}; 362};
383 363
384static char *fwlog_param; 364static char *fwlog_param;
@@ -415,10 +395,12 @@ static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
415 if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) 395 if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
416 return 0; 396 return 0;
417 397
418 ret = wl1271_cmd_set_sta_state(wl); 398 ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
419 if (ret < 0) 399 if (ret < 0)
420 return ret; 400 return ret;
421 401
402 wl12xx_croc(wl, wl->role_id);
403
422 wl1271_info("Association completed."); 404 wl1271_info("Association completed.");
423 return 0; 405 return 0;
424} 406}
@@ -718,7 +700,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
718 if (ret < 0) 700 if (ret < 0)
719 goto out_free_memmap; 701 goto out_free_memmap;
720 702
721 ret = wl1271_acx_sta_mem_cfg(wl); 703 ret = wl12xx_acx_mem_cfg(wl);
722 if (ret < 0) 704 if (ret < 0)
723 goto out_free_memmap; 705 goto out_free_memmap;
724 706
@@ -773,7 +755,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
773 return ret; 755 return ret;
774} 756}
775 757
776static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks) 758static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
777{ 759{
778 bool fw_ps; 760 bool fw_ps;
779 761
@@ -785,21 +767,35 @@ static void wl1271_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_blks)
785 767
786 /* 768 /*
787 * Wake up from high level PS if the STA is asleep with too little 769 * Wake up from high level PS if the STA is asleep with too little
788 * blocks in FW or if the STA is awake. 770 * packets in FW or if the STA is awake.
789 */ 771 */
790 if (!fw_ps || tx_blks < WL1271_PS_STA_MAX_BLOCKS) 772 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
791 wl1271_ps_link_end(wl, hlid); 773 wl1271_ps_link_end(wl, hlid);
792 774
793 /* Start high-level PS if the STA is asleep with enough blocks in FW */ 775 /* Start high-level PS if the STA is asleep with enough blocks in FW */
794 else if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS) 776 else if (fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
795 wl1271_ps_link_start(wl, hlid, true); 777 wl1271_ps_link_start(wl, hlid, true);
796} 778}
797 779
798static void wl1271_irq_update_links_status(struct wl1271 *wl, 780bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
799 struct wl1271_fw_ap_status *status) 781{
782 int id;
783
784 /* global/broadcast "stations" are always active */
785 if (hlid < WL1271_AP_STA_HLID_START)
786 return true;
787
788 id = hlid - WL1271_AP_STA_HLID_START;
789 return test_bit(id, wl->ap_hlid_map);
790}
791
792static void wl12xx_irq_update_links_status(struct wl1271 *wl,
793 struct wl12xx_fw_status *status)
800{ 794{
801 u32 cur_fw_ps_map; 795 u32 cur_fw_ps_map;
802 u8 hlid; 796 u8 hlid, cnt;
797
798 /* TODO: also use link_fast_bitmap here */
803 799
804 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); 800 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
805 if (wl->ap_fw_ps_map != cur_fw_ps_map) { 801 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
@@ -812,45 +808,30 @@ static void wl1271_irq_update_links_status(struct wl1271 *wl,
812 } 808 }
813 809
814 for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) { 810 for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
815 u8 cnt = status->tx_lnk_free_blks[hlid] - 811 if (!wl1271_is_active_sta(wl, hlid))
816 wl->links[hlid].prev_freed_blks; 812 continue;
817 813
818 wl->links[hlid].prev_freed_blks = 814 cnt = status->tx_lnk_free_pkts[hlid] -
819 status->tx_lnk_free_blks[hlid]; 815 wl->links[hlid].prev_freed_pkts;
820 wl->links[hlid].allocated_blks -= cnt;
821 816
822 wl1271_irq_ps_regulate_link(wl, hlid, 817 wl->links[hlid].prev_freed_pkts =
823 wl->links[hlid].allocated_blks); 818 status->tx_lnk_free_pkts[hlid];
824 } 819 wl->links[hlid].allocated_pkts -= cnt;
825}
826 820
827static u32 wl1271_tx_allocated_blocks(struct wl1271 *wl) 821 wl12xx_irq_ps_regulate_link(wl, hlid,
828{ 822 wl->links[hlid].allocated_pkts);
829 int i; 823 }
830 u32 total_alloc_blocks = 0;
831
832 for (i = 0; i < NUM_TX_QUEUES; i++)
833 total_alloc_blocks += wl->tx_allocated_blocks[i];
834
835 return total_alloc_blocks;
836} 824}
837 825
838static void wl1271_fw_status(struct wl1271 *wl, 826static void wl12xx_fw_status(struct wl1271 *wl,
839 struct wl1271_fw_full_status *full_status) 827 struct wl12xx_fw_status *status)
840{ 828{
841 struct wl1271_fw_common_status *status = &full_status->common;
842 struct timespec ts; 829 struct timespec ts;
843 u32 old_tx_blk_count = wl->tx_blocks_available; 830 u32 old_tx_blk_count = wl->tx_blocks_available;
844 u32 freed_blocks = 0, ac_freed_blocks; 831 int avail, freed_blocks;
845 int i; 832 int i;
846 833
847 if (wl->bss_type == BSS_TYPE_AP_BSS) { 834 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
848 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
849 sizeof(struct wl1271_fw_ap_status), false);
850 } else {
851 wl1271_raw_read(wl, FW_STATUS_ADDR, status,
852 sizeof(struct wl1271_fw_sta_status), false);
853 }
854 835
855 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 836 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
856 "drv_rx_counter = %d, tx_results_counter = %d)", 837 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -859,42 +840,49 @@ static void wl1271_fw_status(struct wl1271 *wl,
859 status->drv_rx_counter, 840 status->drv_rx_counter,
860 status->tx_results_counter); 841 status->tx_results_counter);
861 842
862 /* update number of available TX blocks */
863 for (i = 0; i < NUM_TX_QUEUES; i++) { 843 for (i = 0; i < NUM_TX_QUEUES; i++) {
864 ac_freed_blocks = le32_to_cpu(status->tx_released_blks[i]) - 844 /* prevent wrap-around in freed-packets counter */
865 wl->tx_blocks_freed[i]; 845 wl->tx_allocated_pkts[i] -=
866 freed_blocks += ac_freed_blocks; 846 (status->tx_released_pkts[i] -
867 847 wl->tx_pkts_freed[i]) & 0xff;
868 wl->tx_allocated_blocks[i] -= ac_freed_blocks;
869 848
870 wl->tx_blocks_freed[i] = 849 wl->tx_pkts_freed[i] = status->tx_released_pkts[i];
871 le32_to_cpu(status->tx_released_blks[i]);
872 } 850 }
873 851
874 if (wl->bss_type == BSS_TYPE_AP_BSS) { 852 /* prevent wrap-around in total blocks counter */
875 /* Update num of allocated TX blocks per link and ps status */ 853 if (likely(wl->tx_blocks_freed <=
876 wl1271_irq_update_links_status(wl, &full_status->ap); 854 le32_to_cpu(status->total_released_blks)))
877 wl->tx_blocks_available += freed_blocks; 855 freed_blocks = le32_to_cpu(status->total_released_blks) -
878 } else { 856 wl->tx_blocks_freed;
879 int avail = full_status->sta.tx_total - 857 else
880 wl1271_tx_allocated_blocks(wl); 858 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
859 le32_to_cpu(status->total_released_blks);
881 860
882 /* 861 wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
883 * The FW might change the total number of TX memblocks before 862
884 * we get a notification about blocks being released. Thus, the 863 wl->tx_allocated_blocks -= freed_blocks;
885 * available blocks calculation might yield a temporary result 864
886 * which is lower than the actual available blocks. Keeping in 865 avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
887 * mind that only blocks that were allocated can be moved from 866
888 * TX to RX, tx_blocks_available should never decrease here. 867 /*
889 */ 868 * The FW might change the total number of TX memblocks before
890 wl->tx_blocks_available = max((int)wl->tx_blocks_available, 869 * we get a notification about blocks being released. Thus, the
891 avail); 870 * available blocks calculation might yield a temporary result
892 } 871 * which is lower than the actual available blocks. Keeping in
872 * mind that only blocks that were allocated can be moved from
873 * TX to RX, tx_blocks_available should never decrease here.
874 */
875 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
876 avail);
893 877
894 /* if more blocks are available now, tx work can be scheduled */ 878 /* if more blocks are available now, tx work can be scheduled */
895 if (wl->tx_blocks_available > old_tx_blk_count) 879 if (wl->tx_blocks_available > old_tx_blk_count)
896 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 880 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
897 881
882 /* for AP update num of allocated TX blocks per link and ps status */
883 if (wl->bss_type == BSS_TYPE_AP_BSS)
884 wl12xx_irq_update_links_status(wl, status);
885
898 /* update the host-chipset time offset */ 886 /* update the host-chipset time offset */
899 getnstimeofday(&ts); 887 getnstimeofday(&ts);
900 wl->time_offset = (timespec_to_ns(&ts) >> 10) - 888 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
@@ -967,8 +955,8 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
967 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 955 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
968 smp_mb__after_clear_bit(); 956 smp_mb__after_clear_bit();
969 957
970 wl1271_fw_status(wl, wl->fw_status); 958 wl12xx_fw_status(wl, wl->fw_status);
971 intr = le32_to_cpu(wl->fw_status->common.intr); 959 intr = le32_to_cpu(wl->fw_status->intr);
972 intr &= WL1271_INTR_MASK; 960 intr &= WL1271_INTR_MASK;
973 if (!intr) { 961 if (!intr) {
974 done = true; 962 done = true;
@@ -987,7 +975,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
987 if (likely(intr & WL1271_ACX_INTR_DATA)) { 975 if (likely(intr & WL1271_ACX_INTR_DATA)) {
988 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 976 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
989 977
990 wl1271_rx(wl, &wl->fw_status->common); 978 wl12xx_rx(wl, wl->fw_status);
991 979
992 /* Check if any tx blocks were freed */ 980 /* Check if any tx blocks were freed */
993 spin_lock_irqsave(&wl->wl_lock, flags); 981 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -1004,7 +992,7 @@ irqreturn_t wl1271_irq(int irq, void *cookie)
1004 } 992 }
1005 993
1006 /* check for tx results */ 994 /* check for tx results */
1007 if (wl->fw_status->common.tx_results_counter != 995 if (wl->fw_status->tx_results_counter !=
1008 (wl->tx_results_count & 0xff)) 996 (wl->tx_results_count & 0xff))
1009 wl1271_tx_complete(wl); 997 wl1271_tx_complete(wl);
1010 998
@@ -1056,25 +1044,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
1056 const char *fw_name; 1044 const char *fw_name;
1057 int ret; 1045 int ret;
1058 1046
1059 switch (wl->bss_type) { 1047 if (wl->chip.id == CHIP_ID_1283_PG20)
1060 case BSS_TYPE_AP_BSS: 1048 fw_name = WL128X_FW_NAME;
1061 if (wl->chip.id == CHIP_ID_1283_PG20) 1049 else
1062 fw_name = WL128X_AP_FW_NAME; 1050 fw_name = WL127X_FW_NAME;
1063 else
1064 fw_name = WL127X_AP_FW_NAME;
1065 break;
1066 case BSS_TYPE_IBSS:
1067 case BSS_TYPE_STA_BSS:
1068 if (wl->chip.id == CHIP_ID_1283_PG20)
1069 fw_name = WL128X_FW_NAME;
1070 else
1071 fw_name = WL1271_FW_NAME;
1072 break;
1073 default:
1074 wl1271_error("no compatible firmware for bss_type %d",
1075 wl->bss_type);
1076 return -EINVAL;
1077 }
1078 1051
1079 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); 1052 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
1080 1053
@@ -1103,7 +1076,6 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
1103 } 1076 }
1104 1077
1105 memcpy(wl->fw, fw->data, wl->fw_len); 1078 memcpy(wl->fw, fw->data, wl->fw_len);
1106 wl->fw_bss_type = wl->bss_type;
1107 ret = 0; 1079 ret = 0;
1108 1080
1109out: 1081out:
@@ -1194,8 +1166,8 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
1194 wl12xx_cmd_stop_fwlog(wl); 1166 wl12xx_cmd_stop_fwlog(wl);
1195 1167
1196 /* Read the first memory block address */ 1168 /* Read the first memory block address */
1197 wl1271_fw_status(wl, wl->fw_status); 1169 wl12xx_fw_status(wl, wl->fw_status);
1198 first_addr = __le32_to_cpu(wl->fw_status->sta.log_start_addr); 1170 first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
1199 if (!first_addr) 1171 if (!first_addr)
1200 goto out; 1172 goto out;
1201 1173
@@ -1211,7 +1183,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
1211 * of each memory block hold the hardware address of the next 1183 * of each memory block hold the hardware address of the next
1212 * one. The last memory block points to the first one. 1184 * one. The last memory block points to the first one.
1213 */ 1185 */
1214 addr = __le32_to_cpup((__le32 *)block); 1186 addr = le32_to_cpup((__le32 *)block);
1215 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr), 1187 if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
1216 WL12XX_HW_BLOCK_SIZE - sizeof(addr))) 1188 WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
1217 break; 1189 break;
@@ -1374,8 +1346,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
1374 goto out; 1346 goto out;
1375 } 1347 }
1376 1348
1377 /* Make sure the firmware type matches the BSS type */ 1349 if (wl->fw == NULL) {
1378 if (wl->fw == NULL || wl->fw_bss_type != wl->bss_type) {
1379 ret = wl1271_fetch_firmware(wl); 1350 ret = wl1271_fetch_firmware(wl);
1380 if (ret < 0) 1351 if (ret < 0)
1381 goto out; 1352 goto out;
@@ -1395,6 +1366,7 @@ out:
1395int wl1271_plt_start(struct wl1271 *wl) 1366int wl1271_plt_start(struct wl1271 *wl)
1396{ 1367{
1397 int retries = WL1271_BOOT_RETRIES; 1368 int retries = WL1271_BOOT_RETRIES;
1369 struct wiphy *wiphy = wl->hw->wiphy;
1398 int ret; 1370 int ret;
1399 1371
1400 mutex_lock(&wl->mutex); 1372 mutex_lock(&wl->mutex);
@@ -1428,6 +1400,11 @@ int wl1271_plt_start(struct wl1271 *wl)
1428 wl1271_notice("firmware booted in PLT mode (%s)", 1400 wl1271_notice("firmware booted in PLT mode (%s)",
1429 wl->chip.fw_ver_str); 1401 wl->chip.fw_ver_str);
1430 1402
1403 /* update hw/fw version info in wiphy struct */
1404 wiphy->hw_version = wl->chip.id;
1405 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1406 sizeof(wiphy->fw_version));
1407
1431 goto out; 1408 goto out;
1432 1409
1433irq_disable: 1410irq_disable:
@@ -1504,10 +1481,25 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1504 q = wl1271_tx_get_queue(mapping); 1481 q = wl1271_tx_get_queue(mapping);
1505 1482
1506 if (wl->bss_type == BSS_TYPE_AP_BSS) 1483 if (wl->bss_type == BSS_TYPE_AP_BSS)
1507 hlid = wl1271_tx_get_hlid(skb); 1484 hlid = wl12xx_tx_get_hlid_ap(wl, skb);
1508 1485
1509 spin_lock_irqsave(&wl->wl_lock, flags); 1486 spin_lock_irqsave(&wl->wl_lock, flags);
1510 1487
1488 /* queue the packet */
1489 if (wl->bss_type == BSS_TYPE_AP_BSS) {
1490 if (!wl1271_is_active_sta(wl, hlid)) {
1491 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
1492 hlid, q);
1493 dev_kfree_skb(skb);
1494 goto out;
1495 }
1496
1497 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1498 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1499 } else {
1500 skb_queue_tail(&wl->tx_queue[q], skb);
1501 }
1502
1511 wl->tx_queue_count[q]++; 1503 wl->tx_queue_count[q]++;
1512 1504
1513 /* 1505 /*
@@ -1520,14 +1512,6 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1520 set_bit(q, &wl->stopped_queues_map); 1512 set_bit(q, &wl->stopped_queues_map);
1521 } 1513 }
1522 1514
1523 /* queue the packet */
1524 if (wl->bss_type == BSS_TYPE_AP_BSS) {
1525 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
1526 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1527 } else {
1528 skb_queue_tail(&wl->tx_queue[q], skb);
1529 }
1530
1531 /* 1515 /*
1532 * The chip specific setup must run before the first TX packet - 1516 * The chip specific setup must run before the first TX packet -
1533 * before that, the tx_work will not be initialized! 1517 * before that, the tx_work will not be initialized!
@@ -1537,6 +1521,7 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1537 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) 1521 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1538 ieee80211_queue_work(wl->hw, &wl->tx_work); 1522 ieee80211_queue_work(wl->hw, &wl->tx_work);
1539 1523
1524out:
1540 spin_unlock_irqrestore(&wl->wl_lock, flags); 1525 spin_unlock_irqrestore(&wl->wl_lock, flags);
1541} 1526}
1542 1527
@@ -1673,7 +1658,7 @@ static int wl1271_configure_suspend_ap(struct wl1271 *wl)
1673 if (ret < 0) 1658 if (ret < 0)
1674 goto out_unlock; 1659 goto out_unlock;
1675 1660
1676 ret = wl1271_acx_set_ap_beacon_filter(wl, true); 1661 ret = wl1271_acx_beacon_filter_opt(wl, true);
1677 1662
1678 wl1271_ps_elp_sleep(wl); 1663 wl1271_ps_elp_sleep(wl);
1679out_unlock: 1664out_unlock:
@@ -1711,7 +1696,7 @@ static void wl1271_configure_resume(struct wl1271 *wl)
1711 wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, 1696 wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
1712 wl->basic_rate, true); 1697 wl->basic_rate, true);
1713 } else if (is_ap) { 1698 } else if (is_ap) {
1714 wl1271_acx_set_ap_beacon_filter(wl, false); 1699 wl1271_acx_beacon_filter_opt(wl, false);
1715 } 1700 }
1716 1701
1717 wl1271_ps_elp_sleep(wl); 1702 wl1271_ps_elp_sleep(wl);
@@ -1803,9 +1788,6 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
1803 * 1788 *
1804 * The MAC address is first known when the corresponding interface 1789 * The MAC address is first known when the corresponding interface
1805 * is added. That is where we will initialize the hardware. 1790 * is added. That is where we will initialize the hardware.
1806 *
1807 * In addition, we currently have different firmwares for AP and managed
1808 * operation. We will know which to boot according to interface type.
1809 */ 1791 */
1810 1792
1811 return 0; 1793 return 0;
@@ -1816,6 +1798,24 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1816 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 1798 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1817} 1799}
1818 1800
1801static u8 wl12xx_get_role_type(struct wl1271 *wl)
1802{
1803 switch (wl->bss_type) {
1804 case BSS_TYPE_AP_BSS:
1805 return WL1271_ROLE_AP;
1806
1807 case BSS_TYPE_STA_BSS:
1808 return WL1271_ROLE_STA;
1809
1810 case BSS_TYPE_IBSS:
1811 return WL1271_ROLE_IBSS;
1812
1813 default:
1814 wl1271_error("invalid bss_type: %d", wl->bss_type);
1815 }
1816 return WL12XX_INVALID_ROLE_TYPE;
1817}
1818
1819static int wl1271_op_add_interface(struct ieee80211_hw *hw, 1819static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1820 struct ieee80211_vif *vif) 1820 struct ieee80211_vif *vif)
1821{ 1821{
@@ -1823,6 +1823,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1823 struct wiphy *wiphy = hw->wiphy; 1823 struct wiphy *wiphy = hw->wiphy;
1824 int retries = WL1271_BOOT_RETRIES; 1824 int retries = WL1271_BOOT_RETRIES;
1825 int ret = 0; 1825 int ret = 0;
1826 u8 role_type;
1826 bool booted = false; 1827 bool booted = false;
1827 1828
1828 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 1829 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
@@ -1863,6 +1864,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1863 goto out; 1864 goto out;
1864 } 1865 }
1865 1866
1867 role_type = wl12xx_get_role_type(wl);
1868 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
1869 ret = -EINVAL;
1870 goto out;
1871 }
1866 memcpy(wl->mac_addr, vif->addr, ETH_ALEN); 1872 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
1867 1873
1868 if (wl->state != WL1271_STATE_OFF) { 1874 if (wl->state != WL1271_STATE_OFF) {
@@ -1882,6 +1888,25 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1882 if (ret < 0) 1888 if (ret < 0)
1883 goto power_off; 1889 goto power_off;
1884 1890
1891 if (wl->bss_type == BSS_TYPE_STA_BSS ||
1892 wl->bss_type == BSS_TYPE_IBSS) {
1893 /*
1894 * The device role is a special role used for
1895 * rx and tx frames prior to association (as
1896 * the STA role can get packets only from
1897 * its associated bssid)
1898 */
1899 ret = wl12xx_cmd_role_enable(wl,
1900 WL1271_ROLE_DEVICE,
1901 &wl->dev_role_id);
1902 if (ret < 0)
1903 goto irq_disable;
1904 }
1905
1906 ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
1907 if (ret < 0)
1908 goto irq_disable;
1909
1885 ret = wl1271_hw_init(wl); 1910 ret = wl1271_hw_init(wl);
1886 if (ret < 0) 1911 if (ret < 0)
1887 goto irq_disable; 1912 goto irq_disable;
@@ -1946,7 +1971,7 @@ out:
1946static void __wl1271_op_remove_interface(struct wl1271 *wl, 1971static void __wl1271_op_remove_interface(struct wl1271 *wl,
1947 bool reset_tx_queues) 1972 bool reset_tx_queues)
1948{ 1973{
1949 int i; 1974 int ret, i;
1950 1975
1951 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 1976 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1952 1977
@@ -1971,6 +1996,31 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
1971 ieee80211_scan_completed(wl->hw, true); 1996 ieee80211_scan_completed(wl->hw, true);
1972 } 1997 }
1973 1998
1999 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2000 /* disable active roles */
2001 ret = wl1271_ps_elp_wakeup(wl);
2002 if (ret < 0)
2003 goto deinit;
2004
2005 if (wl->bss_type == BSS_TYPE_STA_BSS) {
2006 ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
2007 if (ret < 0)
2008 goto deinit;
2009 }
2010
2011 ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
2012 if (ret < 0)
2013 goto deinit;
2014
2015 wl1271_ps_elp_sleep(wl);
2016 }
2017deinit:
2018 /* clear all hlids (except system_hlid) */
2019 wl->sta_hlid = WL12XX_INVALID_LINK_ID;
2020 wl->dev_hlid = WL12XX_INVALID_LINK_ID;
2021 wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
2022 wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
2023
1974 /* 2024 /*
1975 * this must be before the cancel_work calls below, so that the work 2025 * this must be before the cancel_work calls below, so that the work
1976 * functions don't perform further work. 2026 * functions don't perform further work.
@@ -2007,18 +2057,26 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2007 wl->psm_entry_retry = 0; 2057 wl->psm_entry_retry = 0;
2008 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 2058 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2009 wl->tx_blocks_available = 0; 2059 wl->tx_blocks_available = 0;
2060 wl->tx_allocated_blocks = 0;
2010 wl->tx_results_count = 0; 2061 wl->tx_results_count = 0;
2011 wl->tx_packets_count = 0; 2062 wl->tx_packets_count = 0;
2012 wl->time_offset = 0; 2063 wl->time_offset = 0;
2013 wl->session_counter = 0; 2064 wl->session_counter = 0;
2014 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 2065 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2015 wl->vif = NULL; 2066 wl->vif = NULL;
2016 wl->filters = 0;
2017 wl1271_free_ap_keys(wl); 2067 wl1271_free_ap_keys(wl);
2018 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); 2068 memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
2019 wl->ap_fw_ps_map = 0; 2069 wl->ap_fw_ps_map = 0;
2020 wl->ap_ps_map = 0; 2070 wl->ap_ps_map = 0;
2021 wl->sched_scanning = false; 2071 wl->sched_scanning = false;
2072 wl->role_id = WL12XX_INVALID_ROLE_ID;
2073 wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
2074 memset(wl->roles_map, 0, sizeof(wl->roles_map));
2075 memset(wl->links_map, 0, sizeof(wl->links_map));
2076 memset(wl->roc_map, 0, sizeof(wl->roc_map));
2077
2078 /* The system link is always allocated */
2079 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2022 2080
2023 /* 2081 /*
2024 * this is performed after the cancel_work calls and the associated 2082 * this is performed after the cancel_work calls and the associated
@@ -2027,9 +2085,11 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
2027 */ 2085 */
2028 wl->flags = 0; 2086 wl->flags = 0;
2029 2087
2088 wl->tx_blocks_freed = 0;
2089
2030 for (i = 0; i < NUM_TX_QUEUES; i++) { 2090 for (i = 0; i < NUM_TX_QUEUES; i++) {
2031 wl->tx_blocks_freed[i] = 0; 2091 wl->tx_pkts_freed[i] = 0;
2032 wl->tx_allocated_blocks[i] = 0; 2092 wl->tx_allocated_pkts[i] = 0;
2033 } 2093 }
2034 2094
2035 wl1271_debugfs_reset(wl); 2095 wl1271_debugfs_reset(wl);
@@ -2061,64 +2121,10 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2061 cancel_work_sync(&wl->recovery_work); 2121 cancel_work_sync(&wl->recovery_work);
2062} 2122}
2063 2123
2064void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
2065{
2066 wl1271_set_default_filters(wl);
2067
2068 /* combine requested filters with current filter config */
2069 filters = wl->filters | filters;
2070
2071 wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
2072
2073 if (filters & FIF_PROMISC_IN_BSS) {
2074 wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
2075 wl->rx_config &= ~CFG_UNI_FILTER_EN;
2076 wl->rx_config |= CFG_BSSID_FILTER_EN;
2077 }
2078 if (filters & FIF_BCN_PRBRESP_PROMISC) {
2079 wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
2080 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
2081 wl->rx_config &= ~CFG_SSID_FILTER_EN;
2082 }
2083 if (filters & FIF_OTHER_BSS) {
2084 wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
2085 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
2086 }
2087 if (filters & FIF_CONTROL) {
2088 wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
2089 wl->rx_filter |= CFG_RX_CTL_EN;
2090 }
2091 if (filters & FIF_FCSFAIL) {
2092 wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
2093 wl->rx_filter |= CFG_RX_FCS_ERROR;
2094 }
2095}
2096
2097static int wl1271_dummy_join(struct wl1271 *wl)
2098{
2099 int ret = 0;
2100 /* we need to use a dummy BSSID for now */
2101 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
2102 0xad, 0xbe, 0xef };
2103
2104 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
2105
2106 /* pass through frames from all BSS */
2107 wl1271_configure_filters(wl, FIF_OTHER_BSS);
2108
2109 ret = wl1271_cmd_join(wl, wl->set_bss_type);
2110 if (ret < 0)
2111 goto out;
2112
2113 set_bit(WL1271_FLAG_JOINED, &wl->flags);
2114
2115out:
2116 return ret;
2117}
2118
2119static int wl1271_join(struct wl1271 *wl, bool set_assoc) 2124static int wl1271_join(struct wl1271 *wl, bool set_assoc)
2120{ 2125{
2121 int ret; 2126 int ret;
2127 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
2122 2128
2123 /* 2129 /*
2124 * One of the side effects of the JOIN command is that is clears 2130 * One of the side effects of the JOIN command is that is clears
@@ -2135,12 +2141,13 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
2135 if (set_assoc) 2141 if (set_assoc)
2136 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 2142 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
2137 2143
2138 ret = wl1271_cmd_join(wl, wl->set_bss_type); 2144 if (is_ibss)
2145 ret = wl12xx_cmd_role_start_ibss(wl);
2146 else
2147 ret = wl12xx_cmd_role_start_sta(wl);
2139 if (ret < 0) 2148 if (ret < 0)
2140 goto out; 2149 goto out;
2141 2150
2142 set_bit(WL1271_FLAG_JOINED, &wl->flags);
2143
2144 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 2151 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
2145 goto out; 2152 goto out;
2146 2153
@@ -2176,20 +2183,16 @@ static int wl1271_unjoin(struct wl1271 *wl)
2176 int ret; 2183 int ret;
2177 2184
2178 /* to stop listening to a channel, we disconnect */ 2185 /* to stop listening to a channel, we disconnect */
2179 ret = wl1271_cmd_disconnect(wl); 2186 ret = wl12xx_cmd_role_stop_sta(wl);
2180 if (ret < 0) 2187 if (ret < 0)
2181 goto out; 2188 goto out;
2182 2189
2183 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
2184 memset(wl->bssid, 0, ETH_ALEN); 2190 memset(wl->bssid, 0, ETH_ALEN);
2185 2191
2186 /* reset TX security counters on a clean disconnect */ 2192 /* reset TX security counters on a clean disconnect */
2187 wl->tx_security_last_seq_lsb = 0; 2193 wl->tx_security_last_seq_lsb = 0;
2188 wl->tx_security_seq = 0; 2194 wl->tx_security_seq = 0;
2189 2195
2190 /* stop filtering packets based on bssid */
2191 wl1271_configure_filters(wl, FIF_OTHER_BSS);
2192
2193out: 2196out:
2194 return ret; 2197 return ret;
2195} 2198}
@@ -2202,13 +2205,29 @@ static void wl1271_set_band_rate(struct wl1271 *wl)
2202 wl->basic_rate_set = wl->conf.tx.basic_rate_5; 2205 wl->basic_rate_set = wl->conf.tx.basic_rate_5;
2203} 2206}
2204 2207
2208static bool wl12xx_is_roc(struct wl1271 *wl)
2209{
2210 u8 role_id;
2211
2212 role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
2213 if (role_id >= WL12XX_MAX_ROLES)
2214 return false;
2215
2216 return true;
2217}
2218
2205static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) 2219static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
2206{ 2220{
2207 int ret; 2221 int ret;
2208 2222
2209 if (idle) { 2223 if (idle) {
2210 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { 2224 /* no need to croc if we weren't busy (e.g. during boot) */
2211 ret = wl1271_unjoin(wl); 2225 if (wl12xx_is_roc(wl)) {
2226 ret = wl12xx_croc(wl, wl->dev_role_id);
2227 if (ret < 0)
2228 goto out;
2229
2230 ret = wl12xx_cmd_role_stop_dev(wl);
2212 if (ret < 0) 2231 if (ret < 0)
2213 goto out; 2232 goto out;
2214 } 2233 }
@@ -2223,18 +2242,17 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
2223 goto out; 2242 goto out;
2224 set_bit(WL1271_FLAG_IDLE, &wl->flags); 2243 set_bit(WL1271_FLAG_IDLE, &wl->flags);
2225 } else { 2244 } else {
2226 /* increment the session counter */
2227 wl->session_counter++;
2228 if (wl->session_counter >= SESSION_COUNTER_MAX)
2229 wl->session_counter = 0;
2230
2231 /* The current firmware only supports sched_scan in idle */ 2245 /* The current firmware only supports sched_scan in idle */
2232 if (wl->sched_scanning) { 2246 if (wl->sched_scanning) {
2233 wl1271_scan_sched_scan_stop(wl); 2247 wl1271_scan_sched_scan_stop(wl);
2234 ieee80211_sched_scan_stopped(wl->hw); 2248 ieee80211_sched_scan_stopped(wl->hw);
2235 } 2249 }
2236 2250
2237 ret = wl1271_dummy_join(wl); 2251 ret = wl12xx_cmd_role_start_dev(wl);
2252 if (ret < 0)
2253 goto out;
2254
2255 ret = wl12xx_roc(wl, wl->dev_role_id);
2238 if (ret < 0) 2256 if (ret < 0)
2239 goto out; 2257 goto out;
2240 clear_bit(WL1271_FLAG_IDLE, &wl->flags); 2258 clear_bit(WL1271_FLAG_IDLE, &wl->flags);
@@ -2314,11 +2332,34 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2314 wl1271_warning("rate policy for channel " 2332 wl1271_warning("rate policy for channel "
2315 "failed %d", ret); 2333 "failed %d", ret);
2316 2334
2317 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) { 2335 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
2336 if (wl12xx_is_roc(wl)) {
2337 /* roaming */
2338 ret = wl12xx_croc(wl, wl->dev_role_id);
2339 if (ret < 0)
2340 goto out_sleep;
2341 }
2318 ret = wl1271_join(wl, false); 2342 ret = wl1271_join(wl, false);
2319 if (ret < 0) 2343 if (ret < 0)
2320 wl1271_warning("cmd join on channel " 2344 wl1271_warning("cmd join on channel "
2321 "failed %d", ret); 2345 "failed %d", ret);
2346 } else {
2347 /*
2348 * change the ROC channel. do it only if we are
2349 * not idle. otherwise, CROC will be called
2350 * anyway.
2351 */
2352 if (wl12xx_is_roc(wl) &&
2353 !(conf->flags & IEEE80211_CONF_IDLE)) {
2354 ret = wl12xx_croc(wl, wl->dev_role_id);
2355 if (ret < 0)
2356 goto out_sleep;
2357
2358 ret = wl12xx_roc(wl, wl->dev_role_id);
2359 if (ret < 0)
2360 wl1271_warning("roc failed %d",
2361 ret);
2362 }
2322 } 2363 }
2323 } 2364 }
2324 } 2365 }
@@ -2458,18 +2499,11 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2458 goto out_sleep; 2499 goto out_sleep;
2459 } 2500 }
2460 2501
2461 /* determine, whether supported filter values have changed */ 2502 /*
2462 if (changed == 0) 2503 * the fw doesn't provide an api to configure the filters. instead,
2463 goto out_sleep; 2504 * the filters configuration is based on the active roles / ROC
2464 2505 * state.
2465 /* configure filters */ 2506 */
2466 wl->filters = *total;
2467 wl1271_configure_filters(wl, 0);
2468
2469 /* apply configured filters */
2470 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
2471 if (ret < 0)
2472 goto out_sleep;
2473 2507
2474out_sleep: 2508out_sleep:
2475 wl1271_ps_elp_sleep(wl); 2509 wl1271_ps_elp_sleep(wl);
@@ -2541,14 +2575,19 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
2541 bool wep_key_added = false; 2575 bool wep_key_added = false;
2542 2576
2543 for (i = 0; i < MAX_NUM_KEYS; i++) { 2577 for (i = 0; i < MAX_NUM_KEYS; i++) {
2578 u8 hlid;
2544 if (wl->recorded_ap_keys[i] == NULL) 2579 if (wl->recorded_ap_keys[i] == NULL)
2545 break; 2580 break;
2546 2581
2547 key = wl->recorded_ap_keys[i]; 2582 key = wl->recorded_ap_keys[i];
2583 hlid = key->hlid;
2584 if (hlid == WL12XX_INVALID_LINK_ID)
2585 hlid = wl->ap_bcast_hlid;
2586
2548 ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE, 2587 ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
2549 key->id, key->key_type, 2588 key->id, key->key_type,
2550 key->key_size, key->key, 2589 key->key_size, key->key,
2551 key->hlid, key->tx_seq_32, 2590 hlid, key->tx_seq_32,
2552 key->tx_seq_16); 2591 key->tx_seq_16);
2553 if (ret < 0) 2592 if (ret < 0)
2554 goto out; 2593 goto out;
@@ -2558,7 +2597,8 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
2558 } 2597 }
2559 2598
2560 if (wep_key_added) { 2599 if (wep_key_added) {
2561 ret = wl1271_cmd_set_ap_default_wep_key(wl, wl->default_key); 2600 ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
2601 wl->ap_bcast_hlid);
2562 if (ret < 0) 2602 if (ret < 0)
2563 goto out; 2603 goto out;
2564 } 2604 }
@@ -2583,7 +2623,7 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2583 wl_sta = (struct wl1271_station *)sta->drv_priv; 2623 wl_sta = (struct wl1271_station *)sta->drv_priv;
2584 hlid = wl_sta->hlid; 2624 hlid = wl_sta->hlid;
2585 } else { 2625 } else {
2586 hlid = WL1271_AP_BROADCAST_HLID; 2626 hlid = wl->ap_bcast_hlid;
2587 } 2627 }
2588 2628
2589 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { 2629 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
@@ -2627,6 +2667,11 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2627 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) 2667 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
2628 return 0; 2668 return 0;
2629 2669
2670 /* don't remove key if hlid was already deleted */
2671 if (action == KEY_REMOVE &&
2672 wl->sta_hlid == WL12XX_INVALID_LINK_ID)
2673 return 0;
2674
2630 ret = wl1271_cmd_set_sta_key(wl, action, 2675 ret = wl1271_cmd_set_sta_key(wl, action,
2631 id, key_type, key_size, 2676 id, key_type, key_size,
2632 key, addr, tx_seq_32, 2677 key, addr, tx_seq_32,
@@ -2636,8 +2681,9 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
2636 2681
2637 /* the default WEP key needs to be configured at least once */ 2682 /* the default WEP key needs to be configured at least once */
2638 if (key_type == KEY_WEP) { 2683 if (key_type == KEY_WEP) {
2639 ret = wl1271_cmd_set_sta_default_wep_key(wl, 2684 ret = wl12xx_cmd_set_default_wep_key(wl,
2640 wl->default_key); 2685 wl->default_key,
2686 wl->sta_hlid);
2641 if (ret < 0) 2687 if (ret < 0)
2642 return ret; 2688 return ret;
2643 } 2689 }
@@ -2779,10 +2825,20 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
2779 if (ret < 0) 2825 if (ret < 0)
2780 goto out; 2826 goto out;
2781 2827
2782 ret = wl1271_scan(hw->priv, ssid, len, req); 2828 /* cancel ROC before scanning */
2829 if (wl12xx_is_roc(wl)) {
2830 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
2831 /* don't allow scanning right now */
2832 ret = -EBUSY;
2833 goto out_sleep;
2834 }
2835 wl12xx_croc(wl, wl->dev_role_id);
2836 wl12xx_cmd_role_stop_dev(wl);
2837 }
2783 2838
2839 ret = wl1271_scan(hw->priv, ssid, len, req);
2840out_sleep:
2784 wl1271_ps_elp_sleep(wl); 2841 wl1271_ps_elp_sleep(wl);
2785
2786out: 2842out:
2787 mutex_unlock(&wl->mutex); 2843 mutex_unlock(&wl->mutex);
2788 2844
@@ -3094,20 +3150,20 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3094 if ((changed & BSS_CHANGED_BEACON_ENABLED)) { 3150 if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
3095 if (bss_conf->enable_beacon) { 3151 if (bss_conf->enable_beacon) {
3096 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { 3152 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
3097 ret = wl1271_cmd_start_bss(wl); 3153 ret = wl12xx_cmd_role_start_ap(wl);
3098 if (ret < 0) 3154 if (ret < 0)
3099 goto out; 3155 goto out;
3100 3156
3101 set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
3102 wl1271_debug(DEBUG_AP, "started AP");
3103
3104 ret = wl1271_ap_init_hwenc(wl); 3157 ret = wl1271_ap_init_hwenc(wl);
3105 if (ret < 0) 3158 if (ret < 0)
3106 goto out; 3159 goto out;
3160
3161 set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
3162 wl1271_debug(DEBUG_AP, "started AP");
3107 } 3163 }
3108 } else { 3164 } else {
3109 if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { 3165 if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
3110 ret = wl1271_cmd_stop_bss(wl); 3166 ret = wl12xx_cmd_role_stop_ap(wl);
3111 if (ret < 0) 3167 if (ret < 0)
3112 goto out; 3168 goto out;
3113 3169
@@ -3120,6 +3176,18 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3120 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed); 3176 ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
3121 if (ret < 0) 3177 if (ret < 0)
3122 goto out; 3178 goto out;
3179
3180 /* Handle HT information change */
3181 if ((changed & BSS_CHANGED_HT) &&
3182 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3183 ret = wl1271_acx_set_ht_information(wl,
3184 bss_conf->ht_operation_mode);
3185 if (ret < 0) {
3186 wl1271_warning("Set ht information failed %d", ret);
3187 goto out;
3188 }
3189 }
3190
3123out: 3191out:
3124 return; 3192 return;
3125} 3193}
@@ -3132,6 +3200,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3132{ 3200{
3133 bool do_join = false, set_assoc = false; 3201 bool do_join = false, set_assoc = false;
3134 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); 3202 bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
3203 bool ibss_joined = false;
3135 u32 sta_rate_set = 0; 3204 u32 sta_rate_set = 0;
3136 int ret; 3205 int ret;
3137 struct ieee80211_sta *sta; 3206 struct ieee80211_sta *sta;
@@ -3145,14 +3214,28 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3145 goto out; 3214 goto out;
3146 } 3215 }
3147 3216
3148 if ((changed & BSS_CHANGED_BEACON_INT) && is_ibss) 3217 if (changed & BSS_CHANGED_IBSS) {
3218 if (bss_conf->ibss_joined) {
3219 set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
3220 ibss_joined = true;
3221 } else {
3222 if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
3223 &wl->flags)) {
3224 wl1271_unjoin(wl);
3225 wl12xx_cmd_role_start_dev(wl);
3226 wl12xx_roc(wl, wl->dev_role_id);
3227 }
3228 }
3229 }
3230
3231 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
3149 do_join = true; 3232 do_join = true;
3150 3233
3151 /* Need to update the SSID (for filtering etc) */ 3234 /* Need to update the SSID (for filtering etc) */
3152 if ((changed & BSS_CHANGED_BEACON) && is_ibss) 3235 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
3153 do_join = true; 3236 do_join = true;
3154 3237
3155 if ((changed & BSS_CHANGED_BEACON_ENABLED) && is_ibss) { 3238 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
3156 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", 3239 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
3157 bss_conf->enable_beacon ? "enabled" : "disabled"); 3240 bss_conf->enable_beacon ? "enabled" : "disabled");
3158 3241
@@ -3192,17 +3275,17 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3192 if (ret < 0) 3275 if (ret < 0)
3193 goto out; 3276 goto out;
3194 3277
3195 /* filter out all packets not from this BSSID */
3196 wl1271_configure_filters(wl, 0);
3197
3198 /* Need to update the BSSID (for filtering etc) */ 3278 /* Need to update the BSSID (for filtering etc) */
3199 do_join = true; 3279 do_join = true;
3200 } 3280 }
3201 } 3281 }
3202 3282
3203 rcu_read_lock(); 3283 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
3204 sta = ieee80211_find_sta(vif, bss_conf->bssid); 3284 rcu_read_lock();
3205 if (sta) { 3285 sta = ieee80211_find_sta(vif, bss_conf->bssid);
3286 if (!sta)
3287 goto sta_not_found;
3288
3206 /* save the supp_rates of the ap */ 3289 /* save the supp_rates of the ap */
3207 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band]; 3290 sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
3208 if (sta->ht_cap.ht_supported) 3291 if (sta->ht_cap.ht_supported)
@@ -3210,38 +3293,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3210 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET); 3293 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
3211 sta_ht_cap = sta->ht_cap; 3294 sta_ht_cap = sta->ht_cap;
3212 sta_exists = true; 3295 sta_exists = true;
3213 }
3214 rcu_read_unlock();
3215 3296
3216 if (sta_exists) { 3297sta_not_found:
3217 /* handle new association with HT and HT information change */ 3298 rcu_read_unlock();
3218 if ((changed & BSS_CHANGED_HT) &&
3219 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3220 ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
3221 true);
3222 if (ret < 0) {
3223 wl1271_warning("Set ht cap true failed %d",
3224 ret);
3225 goto out;
3226 }
3227 ret = wl1271_acx_set_ht_information(wl,
3228 bss_conf->ht_operation_mode);
3229 if (ret < 0) {
3230 wl1271_warning("Set ht information failed %d",
3231 ret);
3232 goto out;
3233 }
3234 }
3235 /* handle new association without HT and disassociation */
3236 else if (changed & BSS_CHANGED_ASSOC) {
3237 ret = wl1271_acx_set_ht_capabilities(wl, &sta_ht_cap,
3238 false);
3239 if (ret < 0) {
3240 wl1271_warning("Set ht cap false failed %d",
3241 ret);
3242 goto out;
3243 }
3244 }
3245 } 3299 }
3246 3300
3247 if ((changed & BSS_CHANGED_ASSOC)) { 3301 if ((changed & BSS_CHANGED_ASSOC)) {
@@ -3309,7 +3363,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3309 bool was_assoc = 3363 bool was_assoc =
3310 !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED, 3364 !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
3311 &wl->flags); 3365 &wl->flags);
3312 clear_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags); 3366 bool was_ifup =
3367 !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
3368 &wl->flags);
3313 wl->aid = 0; 3369 wl->aid = 0;
3314 3370
3315 /* free probe-request template */ 3371 /* free probe-request template */
@@ -3336,8 +3392,32 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3336 3392
3337 /* restore the bssid filter and go to dummy bssid */ 3393 /* restore the bssid filter and go to dummy bssid */
3338 if (was_assoc) { 3394 if (was_assoc) {
3395 u32 conf_flags = wl->hw->conf.flags;
3396 /*
3397 * we might have to disable roc, if there was
3398 * no IF_OPER_UP notification.
3399 */
3400 if (!was_ifup) {
3401 ret = wl12xx_croc(wl, wl->role_id);
3402 if (ret < 0)
3403 goto out;
3404 }
3405 /*
3406 * (we also need to disable roc in case of
3407 * roaming on the same channel. until we will
3408 * have a better flow...)
3409 */
3410 if (test_bit(wl->dev_role_id, wl->roc_map)) {
3411 ret = wl12xx_croc(wl, wl->dev_role_id);
3412 if (ret < 0)
3413 goto out;
3414 }
3415
3339 wl1271_unjoin(wl); 3416 wl1271_unjoin(wl);
3340 wl1271_dummy_join(wl); 3417 if (!(conf_flags & IEEE80211_CONF_IDLE)) {
3418 wl12xx_cmd_role_start_dev(wl);
3419 wl12xx_roc(wl, wl->dev_role_id);
3420 }
3341 } 3421 }
3342 } 3422 }
3343 } 3423 }
@@ -3398,7 +3478,68 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3398 wl1271_warning("cmd join failed %d", ret); 3478 wl1271_warning("cmd join failed %d", ret);
3399 goto out; 3479 goto out;
3400 } 3480 }
3401 wl1271_check_operstate(wl, ieee80211_get_operstate(vif)); 3481
3482 /* ROC until connected (after EAPOL exchange) */
3483 if (!is_ibss) {
3484 ret = wl12xx_roc(wl, wl->role_id);
3485 if (ret < 0)
3486 goto out;
3487
3488 wl1271_check_operstate(wl,
3489 ieee80211_get_operstate(vif));
3490 }
3491 /*
3492 * stop device role if started (we might already be in
3493 * STA role). TODO: make it better.
3494 */
3495 if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
3496 ret = wl12xx_croc(wl, wl->dev_role_id);
3497 if (ret < 0)
3498 goto out;
3499
3500 ret = wl12xx_cmd_role_stop_dev(wl);
3501 if (ret < 0)
3502 goto out;
3503 }
3504 }
3505
3506 /* Handle new association with HT. Do this after join. */
3507 if (sta_exists) {
3508 if ((changed & BSS_CHANGED_HT) &&
3509 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3510 ret = wl1271_acx_set_ht_capabilities(wl,
3511 &sta_ht_cap,
3512 true,
3513 wl->sta_hlid);
3514 if (ret < 0) {
3515 wl1271_warning("Set ht cap true failed %d",
3516 ret);
3517 goto out;
3518 }
3519 }
3520 /* handle new association without HT and disassociation */
3521 else if (changed & BSS_CHANGED_ASSOC) {
3522 ret = wl1271_acx_set_ht_capabilities(wl,
3523 &sta_ht_cap,
3524 false,
3525 wl->sta_hlid);
3526 if (ret < 0) {
3527 wl1271_warning("Set ht cap false failed %d",
3528 ret);
3529 goto out;
3530 }
3531 }
3532 }
3533
3534 /* Handle HT information change. Done after join. */
3535 if ((changed & BSS_CHANGED_HT) &&
3536 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
3537 ret = wl1271_acx_set_ht_information(wl,
3538 bss_conf->ht_operation_mode);
3539 if (ret < 0) {
3540 wl1271_warning("Set ht information failed %d", ret);
3541 goto out;
3542 }
3402 } 3543 }
3403 3544
3404out: 3545out:
@@ -3568,7 +3709,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
3568 } 3709 }
3569 3710
3570 wl_sta = (struct wl1271_station *)sta->drv_priv; 3711 wl_sta = (struct wl1271_station *)sta->drv_priv;
3571 __set_bit(id, wl->ap_hlid_map); 3712 set_bit(id, wl->ap_hlid_map);
3572 wl_sta->hlid = WL1271_AP_STA_HLID_START + id; 3713 wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
3573 *hlid = wl_sta->hlid; 3714 *hlid = wl_sta->hlid;
3574 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); 3715 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
@@ -3582,19 +3723,14 @@ static void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
3582 if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) 3723 if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
3583 return; 3724 return;
3584 3725
3585 __clear_bit(id, wl->ap_hlid_map); 3726 clear_bit(id, wl->ap_hlid_map);
3586 memset(wl->links[hlid].addr, 0, ETH_ALEN); 3727 memset(wl->links[hlid].addr, 0, ETH_ALEN);
3728 wl->links[hlid].ba_bitmap = 0;
3587 wl1271_tx_reset_link_queues(wl, hlid); 3729 wl1271_tx_reset_link_queues(wl, hlid);
3588 __clear_bit(hlid, &wl->ap_ps_map); 3730 __clear_bit(hlid, &wl->ap_ps_map);
3589 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 3731 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
3590} 3732}
3591 3733
3592bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
3593{
3594 int id = hlid - WL1271_AP_STA_HLID_START;
3595 return test_bit(id, wl->ap_hlid_map);
3596}
3597
3598static int wl1271_op_sta_add(struct ieee80211_hw *hw, 3734static int wl1271_op_sta_add(struct ieee80211_hw *hw,
3599 struct ieee80211_vif *vif, 3735 struct ieee80211_vif *vif,
3600 struct ieee80211_sta *sta) 3736 struct ieee80211_sta *sta)
@@ -3621,7 +3757,15 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
3621 if (ret < 0) 3757 if (ret < 0)
3622 goto out_free_sta; 3758 goto out_free_sta;
3623 3759
3624 ret = wl1271_cmd_add_sta(wl, sta, hlid); 3760 ret = wl12xx_cmd_add_peer(wl, sta, hlid);
3761 if (ret < 0)
3762 goto out_sleep;
3763
3764 ret = wl12xx_cmd_set_peer_state(wl, hlid);
3765 if (ret < 0)
3766 goto out_sleep;
3767
3768 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, hlid);
3625 if (ret < 0) 3769 if (ret < 0)
3626 goto out_sleep; 3770 goto out_sleep;
3627 3771
@@ -3664,7 +3808,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
3664 if (ret < 0) 3808 if (ret < 0)
3665 goto out; 3809 goto out;
3666 3810
3667 ret = wl1271_cmd_remove_sta(wl, wl_sta->hlid); 3811 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
3668 if (ret < 0) 3812 if (ret < 0)
3669 goto out_sleep; 3813 goto out_sleep;
3670 3814
@@ -3686,6 +3830,14 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
3686{ 3830{
3687 struct wl1271 *wl = hw->priv; 3831 struct wl1271 *wl = hw->priv;
3688 int ret; 3832 int ret;
3833 u8 hlid, *ba_bitmap;
3834
3835 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
3836 tid);
3837
3838 /* sanity check - the fields in FW are only 8bits wide */
3839 if (WARN_ON(tid > 0xFF))
3840 return -ENOTSUPP;
3689 3841
3690 mutex_lock(&wl->mutex); 3842 mutex_lock(&wl->mutex);
3691 3843
@@ -3694,6 +3846,20 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
3694 goto out; 3846 goto out;
3695 } 3847 }
3696 3848
3849 if (wl->bss_type == BSS_TYPE_STA_BSS) {
3850 hlid = wl->sta_hlid;
3851 ba_bitmap = &wl->ba_rx_bitmap;
3852 } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
3853 struct wl1271_station *wl_sta;
3854
3855 wl_sta = (struct wl1271_station *)sta->drv_priv;
3856 hlid = wl_sta->hlid;
3857 ba_bitmap = &wl->links[hlid].ba_bitmap;
3858 } else {
3859 ret = -EINVAL;
3860 goto out;
3861 }
3862
3697 ret = wl1271_ps_elp_wakeup(wl); 3863 ret = wl1271_ps_elp_wakeup(wl);
3698 if (ret < 0) 3864 if (ret < 0)
3699 goto out; 3865 goto out;
@@ -3703,20 +3869,46 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
3703 3869
3704 switch (action) { 3870 switch (action) {
3705 case IEEE80211_AMPDU_RX_START: 3871 case IEEE80211_AMPDU_RX_START:
3706 if ((wl->ba_support) && (wl->ba_allowed)) { 3872 if (!wl->ba_support || !wl->ba_allowed) {
3707 ret = wl1271_acx_set_ba_receiver_session(wl, tid, *ssn,
3708 true);
3709 if (!ret)
3710 wl->ba_rx_bitmap |= BIT(tid);
3711 } else {
3712 ret = -ENOTSUPP; 3873 ret = -ENOTSUPP;
3874 break;
3875 }
3876
3877 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
3878 ret = -EBUSY;
3879 wl1271_error("exceeded max RX BA sessions");
3880 break;
3881 }
3882
3883 if (*ba_bitmap & BIT(tid)) {
3884 ret = -EINVAL;
3885 wl1271_error("cannot enable RX BA session on active "
3886 "tid: %d", tid);
3887 break;
3888 }
3889
3890 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
3891 hlid);
3892 if (!ret) {
3893 *ba_bitmap |= BIT(tid);
3894 wl->ba_rx_session_count++;
3713 } 3895 }
3714 break; 3896 break;
3715 3897
3716 case IEEE80211_AMPDU_RX_STOP: 3898 case IEEE80211_AMPDU_RX_STOP:
3717 ret = wl1271_acx_set_ba_receiver_session(wl, tid, 0, false); 3899 if (!(*ba_bitmap & BIT(tid))) {
3718 if (!ret) 3900 ret = -EINVAL;
3719 wl->ba_rx_bitmap &= ~BIT(tid); 3901 wl1271_error("no active RX BA session on tid: %d",
3902 tid);
3903 break;
3904 }
3905
3906 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
3907 hlid);
3908 if (!ret) {
3909 *ba_bitmap &= ~BIT(tid);
3910 wl->ba_rx_session_count--;
3911 }
3720 break; 3912 break;
3721 3913
3722 /* 3914 /*
@@ -4126,7 +4318,7 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
4126 return len; 4318 return len;
4127} 4319}
4128 4320
4129static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR, 4321static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
4130 wl1271_sysfs_show_hw_pg_ver, NULL); 4322 wl1271_sysfs_show_hw_pg_ver, NULL);
4131 4323
4132static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, 4324static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
@@ -4288,7 +4480,7 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
4288 * should be the maximum length possible for a template, without 4480 * should be the maximum length possible for a template, without
4289 * the IEEE80211 header of the template 4481 * the IEEE80211 header of the template
4290 */ 4482 */
4291 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 4483 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_DFLT_SIZE -
4292 sizeof(struct ieee80211_header); 4484 sizeof(struct ieee80211_header);
4293 4485
4294 /* make sure all our channels fit in the scanned_ch bitmask */ 4486 /* make sure all our channels fit in the scanned_ch bitmask */
@@ -4387,8 +4579,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4387 wl->beacon_int = WL1271_DEFAULT_BEACON_INT; 4579 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
4388 wl->default_key = 0; 4580 wl->default_key = 0;
4389 wl->rx_counter = 0; 4581 wl->rx_counter = 0;
4390 wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
4391 wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
4392 wl->psm_entry_retry = 0; 4582 wl->psm_entry_retry = 0;
4393 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 4583 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
4394 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 4584 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
@@ -4401,7 +4591,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4401 wl->hw_pg_ver = -1; 4591 wl->hw_pg_ver = -1;
4402 wl->bss_type = MAX_BSS_TYPE; 4592 wl->bss_type = MAX_BSS_TYPE;
4403 wl->set_bss_type = MAX_BSS_TYPE; 4593 wl->set_bss_type = MAX_BSS_TYPE;
4404 wl->fw_bss_type = MAX_BSS_TYPE;
4405 wl->last_tx_hlid = 0; 4594 wl->last_tx_hlid = 0;
4406 wl->ap_ps_map = 0; 4595 wl->ap_ps_map = 0;
4407 wl->ap_fw_ps_map = 0; 4596 wl->ap_fw_ps_map = 0;
@@ -4410,12 +4599,22 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
4410 wl->sched_scanning = false; 4599 wl->sched_scanning = false;
4411 wl->tx_security_seq = 0; 4600 wl->tx_security_seq = 0;
4412 wl->tx_security_last_seq_lsb = 0; 4601 wl->tx_security_last_seq_lsb = 0;
4413 4602 wl->role_id = WL12XX_INVALID_ROLE_ID;
4603 wl->system_hlid = WL12XX_SYSTEM_HLID;
4604 wl->sta_hlid = WL12XX_INVALID_LINK_ID;
4605 wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
4606 wl->dev_hlid = WL12XX_INVALID_LINK_ID;
4607 wl->session_counter = 0;
4608 wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
4609 wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
4414 setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer, 4610 setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
4415 (unsigned long) wl); 4611 (unsigned long) wl);
4416 wl->fwlog_size = 0; 4612 wl->fwlog_size = 0;
4417 init_waitqueue_head(&wl->fwlog_waitq); 4613 init_waitqueue_head(&wl->fwlog_waitq);
4418 4614
4615 /* The system link is always allocated */
4616 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
4617
4419 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 4618 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
4420 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 4619 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
4421 wl->tx_frames[i] = NULL; 4620 wl->tx_frames[i] = NULL;
@@ -4522,6 +4721,10 @@ int wl1271_free_hw(struct wl1271 *wl)
4522 mutex_unlock(&wl->mutex); 4721 mutex_unlock(&wl->mutex);
4523 4722
4524 device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr); 4723 device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
4724
4725 device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
4726
4727 device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
4525 platform_device_unregister(wl->plat_dev); 4728 platform_device_unregister(wl->plat_dev);
4526 free_page((unsigned long)wl->fwlog); 4729 free_page((unsigned long)wl->fwlog);
4527 dev_kfree_skb(wl->dummy_packet); 4730 dev_kfree_skb(wl->dummy_packet);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index 3548377ab9c2..4b720b1b9f65 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -226,8 +226,8 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
226 if (test_bit(hlid, &wl->ap_ps_map)) 226 if (test_bit(hlid, &wl->ap_ps_map))
227 return; 227 return;
228 228
229 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d blks %d " 229 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
230 "clean_queues %d", hlid, wl->links[hlid].allocated_blks, 230 "clean_queues %d", hlid, wl->links[hlid].allocated_pkts,
231 clean_queues); 231 clean_queues);
232 232
233 rcu_read_lock(); 233 rcu_read_lock();
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 440a4ee9cb42..3f570f397586 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -296,81 +296,6 @@
296 ===============================================*/ 296 ===============================================*/
297#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) 297#define REG_EVENT_MAILBOX_PTR (SCR_PAD1)
298 298
299
300/* Misc */
301
302#define REG_ENABLE_TX_RX (ENABLE)
303/*
304 * Rx configuration (filter) information element
305 * ---------------------------------------------
306 */
307#define REG_RX_CONFIG (RX_CFG)
308#define REG_RX_FILTER (RX_FILTER_CFG)
309
310
311#define RX_CFG_ENABLE_PHY_HEADER_PLCP 0x0002
312
313/* promiscuous - receives all valid frames */
314#define RX_CFG_PROMISCUOUS 0x0008
315
316/* receives frames from any BSSID */
317#define RX_CFG_BSSID 0x0020
318
319/* receives frames destined to any MAC address */
320#define RX_CFG_MAC 0x0010
321
322#define RX_CFG_ENABLE_ONLY_MY_DEST_MAC 0x0010
323#define RX_CFG_ENABLE_ANY_DEST_MAC 0x0000
324#define RX_CFG_ENABLE_ONLY_MY_BSSID 0x0020
325#define RX_CFG_ENABLE_ANY_BSSID 0x0000
326
327/* discards all broadcast frames */
328#define RX_CFG_DISABLE_BCAST 0x0200
329
330#define RX_CFG_ENABLE_ONLY_MY_SSID 0x0400
331#define RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR 0x0800
332#define RX_CFG_COPY_RX_STATUS 0x2000
333#define RX_CFG_TSF 0x10000
334
335#define RX_CONFIG_OPTION_ANY_DST_MY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
336 RX_CFG_ENABLE_ONLY_MY_BSSID)
337
338#define RX_CONFIG_OPTION_MY_DST_ANY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
339 | RX_CFG_ENABLE_ANY_BSSID)
340
341#define RX_CONFIG_OPTION_ANY_DST_ANY_BSS (RX_CFG_ENABLE_ANY_DEST_MAC | \
342 RX_CFG_ENABLE_ANY_BSSID)
343
344#define RX_CONFIG_OPTION_MY_DST_MY_BSS (RX_CFG_ENABLE_ONLY_MY_DEST_MAC\
345 | RX_CFG_ENABLE_ONLY_MY_BSSID)
346
347#define RX_CONFIG_OPTION_FOR_SCAN (RX_CFG_ENABLE_PHY_HEADER_PLCP \
348 | RX_CFG_ENABLE_RX_CMPLT_FCS_ERROR \
349 | RX_CFG_COPY_RX_STATUS | RX_CFG_TSF)
350
351#define RX_CONFIG_OPTION_FOR_MEASUREMENT (RX_CFG_ENABLE_ANY_DEST_MAC)
352
353#define RX_CONFIG_OPTION_FOR_JOIN (RX_CFG_ENABLE_ONLY_MY_BSSID | \
354 RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
355
356#define RX_CONFIG_OPTION_FOR_IBSS_JOIN (RX_CFG_ENABLE_ONLY_MY_SSID | \
357 RX_CFG_ENABLE_ONLY_MY_DEST_MAC)
358
359#define RX_FILTER_OPTION_DEF (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
360 | CFG_RX_CTL_EN | CFG_RX_BCN_EN\
361 | CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
362
363#define RX_FILTER_OPTION_FILTER_ALL 0
364
365#define RX_FILTER_OPTION_DEF_PRSP_BCN (CFG_RX_PRSP_EN | CFG_RX_MGMT_EN\
366 | CFG_RX_RCTS_ACK | CFG_RX_BCN_EN)
367
368#define RX_FILTER_OPTION_JOIN (CFG_RX_MGMT_EN | CFG_RX_DATA_EN\
369 | CFG_RX_BCN_EN | CFG_RX_AUTH_EN\
370 | CFG_RX_ASSOC_EN | CFG_RX_RCTS_ACK\
371 | CFG_RX_PRSP_EN)
372
373
374/*=============================================== 299/*===============================================
375 EEPROM Read/Write Request 32bit RW 300 EEPROM Read/Write Request 32bit RW
376 ------------------------------------------ 301 ------------------------------------------
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index 0450fb49dbb1..78d8410da1f4 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -30,20 +30,28 @@
30#include "rx.h" 30#include "rx.h"
31#include "io.h" 31#include "io.h"
32 32
33static u8 wl1271_rx_get_mem_block(struct wl1271_fw_common_status *status, 33static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
34 u32 drv_rx_counter) 34 u32 drv_rx_counter)
35{ 35{
36 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 36 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
37 RX_MEM_BLOCK_MASK; 37 RX_MEM_BLOCK_MASK;
38} 38}
39 39
40static u32 wl1271_rx_get_buf_size(struct wl1271_fw_common_status *status, 40static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status,
41 u32 drv_rx_counter) 41 u32 drv_rx_counter)
42{ 42{
43 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 43 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
44 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; 44 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
45} 45}
46 46
47static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status,
48 u32 drv_rx_counter)
49{
50 /* Convert the value to bool */
51 return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
52 RX_BUF_UNALIGNED_PAYLOAD);
53}
54
47static void wl1271_rx_status(struct wl1271 *wl, 55static void wl1271_rx_status(struct wl1271 *wl,
48 struct wl1271_rx_descriptor *desc, 56 struct wl1271_rx_descriptor *desc,
49 struct ieee80211_rx_status *status, 57 struct ieee80211_rx_status *status,
@@ -89,7 +97,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
89 } 97 }
90} 98}
91 99
92static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length) 100static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
101 bool unaligned)
93{ 102{
94 struct wl1271_rx_descriptor *desc; 103 struct wl1271_rx_descriptor *desc;
95 struct sk_buff *skb; 104 struct sk_buff *skb;
@@ -97,6 +106,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
97 u8 *buf; 106 u8 *buf;
98 u8 beacon = 0; 107 u8 beacon = 0;
99 u8 is_data = 0; 108 u8 is_data = 0;
109 u8 reserved = unaligned ? NET_IP_ALIGN : 0;
100 110
101 /* 111 /*
102 * In PLT mode we seem to get frames and mac80211 warns about them, 112 * In PLT mode we seem to get frames and mac80211 warns about them,
@@ -131,17 +141,25 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
131 return -EINVAL; 141 return -EINVAL;
132 } 142 }
133 143
134 skb = __dev_alloc_skb(length, GFP_KERNEL); 144 /* skb length not included rx descriptor */
145 skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL);
135 if (!skb) { 146 if (!skb) {
136 wl1271_error("Couldn't allocate RX frame"); 147 wl1271_error("Couldn't allocate RX frame");
137 return -ENOMEM; 148 return -ENOMEM;
138 } 149 }
139 150
140 buf = skb_put(skb, length); 151 /* reserve the unaligned payload(if any) */
141 memcpy(buf, data, length); 152 skb_reserve(skb, reserved);
142 153
143 /* now we pull the descriptor out of the buffer */ 154 buf = skb_put(skb, length - sizeof(*desc));
144 skb_pull(skb, sizeof(*desc)); 155
156 /*
157 * Copy packets from aggregation buffer to the skbs without rx
158 * descriptor and with packet payload aligned care. In case of unaligned
159 * packets copy the packets in offset of 2 bytes guarantee IP header
160 * payload aligned to 4 bytes.
161 */
162 memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
145 163
146 hdr = (struct ieee80211_hdr *)skb->data; 164 hdr = (struct ieee80211_hdr *)skb->data;
147 if (ieee80211_is_beacon(hdr->frame_control)) 165 if (ieee80211_is_beacon(hdr->frame_control))
@@ -163,7 +181,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length)
163 return is_data; 181 return is_data;
164} 182}
165 183
166void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status) 184void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
167{ 185{
168 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; 186 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
169 u32 buf_size; 187 u32 buf_size;
@@ -175,12 +193,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
175 u32 pkt_offset; 193 u32 pkt_offset;
176 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 194 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
177 bool had_data = false; 195 bool had_data = false;
196 bool unaligned = false;
178 197
179 while (drv_rx_counter != fw_rx_counter) { 198 while (drv_rx_counter != fw_rx_counter) {
180 buf_size = 0; 199 buf_size = 0;
181 rx_counter = drv_rx_counter; 200 rx_counter = drv_rx_counter;
182 while (rx_counter != fw_rx_counter) { 201 while (rx_counter != fw_rx_counter) {
183 pkt_length = wl1271_rx_get_buf_size(status, rx_counter); 202 pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
184 if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE) 203 if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
185 break; 204 break;
186 buf_size += pkt_length; 205 buf_size += pkt_length;
@@ -199,7 +218,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
199 * For aggregated packets, only the first memory block 218 * For aggregated packets, only the first memory block
200 * should be retrieved. The FW takes care of the rest. 219 * should be retrieved. The FW takes care of the rest.
201 */ 220 */
202 mem_block = wl1271_rx_get_mem_block(status, 221 mem_block = wl12xx_rx_get_mem_block(status,
203 drv_rx_counter); 222 drv_rx_counter);
204 223
205 wl->rx_mem_pool_addr.addr = (mem_block << 8) + 224 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
@@ -220,8 +239,12 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
220 /* Split data into separate packets */ 239 /* Split data into separate packets */
221 pkt_offset = 0; 240 pkt_offset = 0;
222 while (pkt_offset < buf_size) { 241 while (pkt_offset < buf_size) {
223 pkt_length = wl1271_rx_get_buf_size(status, 242 pkt_length = wl12xx_rx_get_buf_size(status,
224 drv_rx_counter); 243 drv_rx_counter);
244
245 unaligned = wl12xx_rx_get_unaligned(status,
246 drv_rx_counter);
247
225 /* 248 /*
226 * the handle data call can only fail in memory-outage 249 * the handle data call can only fail in memory-outage
227 * conditions, in that case the received frame will just 250 * conditions, in that case the received frame will just
@@ -229,7 +252,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
229 */ 252 */
230 if (wl1271_rx_handle_data(wl, 253 if (wl1271_rx_handle_data(wl,
231 wl->aggr_buf + pkt_offset, 254 wl->aggr_buf + pkt_offset,
232 pkt_length) == 1) 255 pkt_length, unaligned) == 1)
233 had_data = true; 256 had_data = true;
234 257
235 wl->rx_counter++; 258 wl->rx_counter++;
@@ -260,14 +283,3 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
260 jiffies + msecs_to_jiffies(timeout)); 283 jiffies + msecs_to_jiffies(timeout));
261 } 284 }
262} 285}
263
264void wl1271_set_default_filters(struct wl1271 *wl)
265{
266 if (wl->bss_type == BSS_TYPE_AP_BSS) {
267 wl->rx_config = WL1271_DEFAULT_AP_RX_CONFIG;
268 wl->rx_filter = WL1271_DEFAULT_AP_RX_FILTER;
269 } else {
270 wl->rx_config = WL1271_DEFAULT_STA_RX_CONFIG;
271 wl->rx_filter = WL1271_DEFAULT_STA_RX_FILTER;
272 }
273}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/wl12xx/rx.h
index c88e3fa1d603..86ba6b1d0cdc 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -86,16 +86,18 @@
86 * Bits 3-5 - process_id tag (AP mode FW) 86 * Bits 3-5 - process_id tag (AP mode FW)
87 * Bits 6-7 - reserved 87 * Bits 6-7 - reserved
88 */ 88 */
89#define WL1271_RX_DESC_STATUS_MASK 0x07 89#define WL1271_RX_DESC_STATUS_MASK 0x03
90 90
91#define WL1271_RX_DESC_SUCCESS 0x00 91#define WL1271_RX_DESC_SUCCESS 0x00
92#define WL1271_RX_DESC_DECRYPT_FAIL 0x01 92#define WL1271_RX_DESC_DECRYPT_FAIL 0x01
93#define WL1271_RX_DESC_MIC_FAIL 0x02 93#define WL1271_RX_DESC_MIC_FAIL 0x02
94#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03 94#define WL1271_RX_DESC_DRIVER_RX_Q_FAIL 0x03
95 95
96#define RX_MEM_BLOCK_MASK 0xFF 96#define RX_MEM_BLOCK_MASK 0xFF
97#define RX_BUF_SIZE_MASK 0xFFF00 97#define RX_BUF_SIZE_MASK 0xFFF00
98#define RX_BUF_SIZE_SHIFT_DIV 6 98#define RX_BUF_SIZE_SHIFT_DIV 6
99/* If set, the start of IP payload is not 4 bytes aligned */
100#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
99 101
100enum { 102enum {
101 WL12XX_RX_CLASS_UNKNOWN, 103 WL12XX_RX_CLASS_UNKNOWN,
@@ -119,16 +121,12 @@ struct wl1271_rx_descriptor {
119 u8 snr; 121 u8 snr;
120 __le32 timestamp; 122 __le32 timestamp;
121 u8 packet_class; 123 u8 packet_class;
122 union { 124 u8 hlid;
123 u8 process_id; /* STA FW */
124 u8 hlid; /* AP FW */
125 } __packed;
126 u8 pad_len; 125 u8 pad_len;
127 u8 reserved; 126 u8 reserved;
128} __packed; 127} __packed;
129 128
130void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status); 129void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status);
131u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 130u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
132void wl1271_set_default_filters(struct wl1271 *wl);
133 131
134#endif 132#endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index edfe01c321ca..7229eaa89018 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -33,6 +33,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
33{ 33{
34 struct delayed_work *dwork; 34 struct delayed_work *dwork;
35 struct wl1271 *wl; 35 struct wl1271 *wl;
36 int ret;
37 bool is_sta, is_ibss;
36 38
37 dwork = container_of(work, struct delayed_work, work); 39 dwork = container_of(work, struct delayed_work, work);
38 wl = container_of(dwork, struct wl1271, scan_complete_work); 40 wl = container_of(dwork, struct wl1271, scan_complete_work);
@@ -50,21 +52,34 @@ void wl1271_scan_complete_work(struct work_struct *work)
50 wl->scan.state = WL1271_SCAN_STATE_IDLE; 52 wl->scan.state = WL1271_SCAN_STATE_IDLE;
51 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 53 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
52 wl->scan.req = NULL; 54 wl->scan.req = NULL;
53 ieee80211_scan_completed(wl->hw, false);
54 55
55 /* restore hardware connection monitoring template */ 56 ret = wl1271_ps_elp_wakeup(wl);
57 if (ret < 0)
58 goto out;
59
56 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 60 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
57 if (wl1271_ps_elp_wakeup(wl) == 0) { 61 /* restore hardware connection monitoring template */
58 wl1271_cmd_build_ap_probe_req(wl, wl->probereq); 62 wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
59 wl1271_ps_elp_sleep(wl); 63 }
60 } 64
65 /* return to ROC if needed */
66 is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
67 is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
68 if ((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
69 (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) {
70 /* restore remain on channel */
71 wl12xx_cmd_role_start_dev(wl);
72 wl12xx_roc(wl, wl->dev_role_id);
61 } 73 }
74 wl1271_ps_elp_sleep(wl);
62 75
63 if (wl->scan.failed) { 76 if (wl->scan.failed) {
64 wl1271_info("Scan completed due to error."); 77 wl1271_info("Scan completed due to error.");
65 wl12xx_queue_recovery_work(wl); 78 wl12xx_queue_recovery_work(wl);
66 } 79 }
67 80
81 ieee80211_scan_completed(wl->hw, false);
82
68out: 83out:
69 mutex_unlock(&wl->mutex); 84 mutex_unlock(&wl->mutex);
70 85
@@ -156,6 +171,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
156 if (passive || wl->scan.req->n_ssids == 0) 171 if (passive || wl->scan.req->n_ssids == 0)
157 scan_options |= WL1271_SCAN_OPT_PASSIVE; 172 scan_options |= WL1271_SCAN_OPT_PASSIVE;
158 173
174 if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
175 ret = -EINVAL;
176 goto out;
177 }
178 cmd->params.role_id = wl->role_id;
159 cmd->params.scan_options = cpu_to_le16(scan_options); 179 cmd->params.scan_options = cpu_to_le16(scan_options);
160 180
161 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req, 181 cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -167,10 +187,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
167 } 187 }
168 188
169 cmd->params.tx_rate = cpu_to_le32(basic_rate); 189 cmd->params.tx_rate = cpu_to_le32(basic_rate);
170 cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
171 cmd->params.rx_filter_options =
172 cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
173
174 cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs; 190 cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
175 cmd->params.tx_rate = cpu_to_le32(basic_rate); 191 cmd->params.tx_rate = cpu_to_le32(basic_rate);
176 cmd->params.tid_trigger = 0; 192 cmd->params.tid_trigger = 0;
@@ -186,6 +202,8 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
186 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len); 202 memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
187 } 203 }
188 204
205 memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
206
189 ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len, 207 ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
190 wl->scan.req->ie, wl->scan.req->ie_len, 208 wl->scan.req->ie, wl->scan.req->ie_len,
191 band); 209 band);
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 0b2a2987439d..92115156522f 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -46,7 +46,10 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl);
46#define WL1271_SCAN_CURRENT_TX_PWR 0 46#define WL1271_SCAN_CURRENT_TX_PWR 0
47#define WL1271_SCAN_OPT_ACTIVE 0 47#define WL1271_SCAN_OPT_ACTIVE 0
48#define WL1271_SCAN_OPT_PASSIVE 1 48#define WL1271_SCAN_OPT_PASSIVE 1
49#define WL1271_SCAN_OPT_TRIGGERED_SCAN 2
49#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 50#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
51/* scan even if we fail to enter psm */
52#define WL1271_SCAN_OPT_FORCE 8
50#define WL1271_SCAN_BAND_2_4_GHZ 0 53#define WL1271_SCAN_BAND_2_4_GHZ 0
51#define WL1271_SCAN_BAND_5_GHZ 1 54#define WL1271_SCAN_BAND_5_GHZ 1
52 55
@@ -62,27 +65,27 @@ enum {
62}; 65};
63 66
64struct basic_scan_params { 67struct basic_scan_params {
65 __le32 rx_config_options;
66 __le32 rx_filter_options;
67 /* Scan option flags (WL1271_SCAN_OPT_*) */ 68 /* Scan option flags (WL1271_SCAN_OPT_*) */
68 __le16 scan_options; 69 __le16 scan_options;
70 u8 role_id;
69 /* Number of scan channels in the list (maximum 30) */ 71 /* Number of scan channels in the list (maximum 30) */
70 u8 n_ch; 72 u8 n_ch;
71 /* This field indicates the number of probe requests to send 73 /* This field indicates the number of probe requests to send
72 per channel for an active scan */ 74 per channel for an active scan */
73 u8 n_probe_reqs; 75 u8 n_probe_reqs;
74 /* Rate bit field for sending the probes */
75 __le32 tx_rate;
76 u8 tid_trigger; 76 u8 tid_trigger;
77 u8 ssid_len; 77 u8 ssid_len;
78 /* in order to align */ 78 u8 use_ssid_list;
79 u8 padding1[2]; 79
80 /* Rate bit field for sending the probes */
81 __le32 tx_rate;
82
80 u8 ssid[IEEE80211_MAX_SSID_LEN]; 83 u8 ssid[IEEE80211_MAX_SSID_LEN];
81 /* Band to scan */ 84 /* Band to scan */
82 u8 band; 85 u8 band;
83 u8 use_ssid_list; 86
84 u8 scan_tag; 87 u8 scan_tag;
85 u8 padding2; 88 u8 padding2[2];
86} __packed; 89} __packed;
87 90
88struct basic_scan_channel_params { 91struct basic_scan_channel_params {
@@ -105,6 +108,10 @@ struct wl1271_cmd_scan {
105 108
106 struct basic_scan_params params; 109 struct basic_scan_params params;
107 struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS]; 110 struct basic_scan_channel_params channels[WL1271_SCAN_MAX_CHANNELS];
111
112 /* src mac address */
113 u8 addr[ETH_ALEN];
114 u8 padding[2];
108} __packed; 115} __packed;
109 116
110struct wl1271_cmd_trigger_scan_to { 117struct wl1271_cmd_trigger_scan_to {
@@ -184,7 +191,7 @@ struct wl1271_cmd_sched_scan_config {
184} __packed; 191} __packed;
185 192
186 193
187#define SCHED_SCAN_MAX_SSIDS 8 194#define SCHED_SCAN_MAX_SSIDS 16
188 195
189enum { 196enum {
190 SCAN_SSID_TYPE_PUBLIC = 0, 197 SCAN_SSID_TYPE_PUBLIC = 0,
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 5cf18c2c23f0..ac2e5661397c 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -412,7 +412,5 @@ module_exit(wl1271_exit);
412MODULE_LICENSE("GPL"); 412MODULE_LICENSE("GPL");
413MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 413MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
414MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 414MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
415MODULE_FIRMWARE(WL1271_FW_NAME); 415MODULE_FIRMWARE(WL127X_FW_NAME);
416MODULE_FIRMWARE(WL128X_FW_NAME); 416MODULE_FIRMWARE(WL128X_FW_NAME);
417MODULE_FIRMWARE(WL127X_AP_FW_NAME);
418MODULE_FIRMWARE(WL128X_AP_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
index f28915392877..c3610492852e 100644
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ b/drivers/net/wireless/wl12xx/sdio_test.c
@@ -193,7 +193,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
193 ret = request_firmware(&fw, WL128X_FW_NAME, 193 ret = request_firmware(&fw, WL128X_FW_NAME,
194 wl1271_wl_to_dev(wl)); 194 wl1271_wl_to_dev(wl));
195 else 195 else
196 ret = request_firmware(&fw, WL1271_FW_NAME, 196 ret = request_firmware(&fw, WL127X_FW_NAME,
197 wl1271_wl_to_dev(wl)); 197 wl1271_wl_to_dev(wl));
198 198
199 if (ret < 0) { 199 if (ret < 0) {
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index e0b3736d7e19..0f9718677860 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -486,8 +486,6 @@ module_exit(wl1271_exit);
486MODULE_LICENSE("GPL"); 486MODULE_LICENSE("GPL");
487MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 487MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
488MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 488MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
489MODULE_FIRMWARE(WL1271_FW_NAME); 489MODULE_FIRMWARE(WL127X_FW_NAME);
490MODULE_FIRMWARE(WL128X_FW_NAME); 490MODULE_FIRMWARE(WL128X_FW_NAME);
491MODULE_FIRMWARE(WL127X_AP_FW_NAME);
492MODULE_FIRMWARE(WL128X_AP_FW_NAME);
493MODULE_ALIAS("spi:wl1271"); 491MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index 48fde96ce0d4..0f1578577b1a 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -37,9 +37,10 @@ static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
37 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 37 bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
38 38
39 if (is_ap) 39 if (is_ap)
40 ret = wl1271_cmd_set_ap_default_wep_key(wl, id); 40 ret = wl12xx_cmd_set_default_wep_key(wl, id,
41 wl->ap_bcast_hlid);
41 else 42 else
42 ret = wl1271_cmd_set_sta_default_wep_key(wl, id); 43 ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
43 44
44 if (ret < 0) 45 if (ret < 0)
45 return ret; 46 return ret;
@@ -77,6 +78,7 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
77 struct sk_buff *skb) 78 struct sk_buff *skb)
78{ 79{
79 struct ieee80211_hdr *hdr; 80 struct ieee80211_hdr *hdr;
81 int ret;
80 82
81 hdr = (struct ieee80211_hdr *)(skb->data + 83 hdr = (struct ieee80211_hdr *)(skb->data +
82 sizeof(struct wl1271_tx_hw_descr)); 84 sizeof(struct wl1271_tx_hw_descr));
@@ -90,9 +92,19 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
90 if (!ieee80211_is_auth(hdr->frame_control)) 92 if (!ieee80211_is_auth(hdr->frame_control))
91 return 0; 93 return 0;
92 94
93 wl1271_configure_filters(wl, FIF_OTHER_BSS); 95 if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
96 goto out;
94 97
95 return wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter); 98 wl1271_debug(DEBUG_CMD, "starting device role for roaming");
99 ret = wl12xx_cmd_role_start_dev(wl);
100 if (ret < 0)
101 goto out;
102
103 ret = wl12xx_roc(wl, wl->dev_role_id);
104 if (ret < 0)
105 goto out;
106out:
107 return 0;
96} 108}
97 109
98static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 110static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
@@ -114,24 +126,29 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
114static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid) 126static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
115{ 127{
116 bool fw_ps; 128 bool fw_ps;
117 u8 tx_blks; 129 u8 tx_pkts;
118 130
119 /* only regulate station links */ 131 /* only regulate station links */
120 if (hlid < WL1271_AP_STA_HLID_START) 132 if (hlid < WL1271_AP_STA_HLID_START)
121 return; 133 return;
122 134
123 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 135 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
124 tx_blks = wl->links[hlid].allocated_blks; 136 tx_pkts = wl->links[hlid].allocated_pkts;
125 137
126 /* 138 /*
127 * if in FW PS and there is enough data in FW we can put the link 139 * if in FW PS and there is enough data in FW we can put the link
128 * into high-level PS and clean out its TX queues. 140 * into high-level PS and clean out its TX queues.
129 */ 141 */
130 if (fw_ps && tx_blks >= WL1271_PS_STA_MAX_BLOCKS) 142 if (fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
131 wl1271_ps_link_start(wl, hlid, true); 143 wl1271_ps_link_start(wl, hlid, true);
132} 144}
133 145
134u8 wl1271_tx_get_hlid(struct sk_buff *skb) 146static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
147{
148 return wl->dummy_packet == skb;
149}
150
151u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
135{ 152{
136 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); 153 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
137 154
@@ -144,14 +161,32 @@ u8 wl1271_tx_get_hlid(struct sk_buff *skb)
144 } else { 161 } else {
145 struct ieee80211_hdr *hdr; 162 struct ieee80211_hdr *hdr;
146 163
164 if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
165 return wl->system_hlid;
166
147 hdr = (struct ieee80211_hdr *)skb->data; 167 hdr = (struct ieee80211_hdr *)skb->data;
148 if (ieee80211_is_mgmt(hdr->frame_control)) 168 if (ieee80211_is_mgmt(hdr->frame_control))
149 return WL1271_AP_GLOBAL_HLID; 169 return wl->ap_global_hlid;
150 else 170 else
151 return WL1271_AP_BROADCAST_HLID; 171 return wl->ap_bcast_hlid;
152 } 172 }
153} 173}
154 174
175static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
176{
177 if (wl12xx_is_dummy_packet(wl, skb))
178 return wl->system_hlid;
179
180 if (wl->bss_type == BSS_TYPE_AP_BSS)
181 return wl12xx_tx_get_hlid_ap(wl, skb);
182
183 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
184 test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))
185 return wl->sta_hlid;
186 else
187 return wl->dev_hlid;
188}
189
155static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, 190static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
156 unsigned int packet_length) 191 unsigned int packet_length)
157{ 192{
@@ -169,12 +204,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
169 u32 len; 204 u32 len;
170 u32 total_blocks; 205 u32 total_blocks;
171 int id, ret = -EBUSY, ac; 206 int id, ret = -EBUSY, ac;
172 u32 spare_blocks;
173 207
174 if (unlikely(wl->quirks & WL12XX_QUIRK_USE_2_SPARE_BLOCKS)) 208 /* we use 1 spare block */
175 spare_blocks = 2; 209 u32 spare_blocks = 1;
176 else
177 spare_blocks = 1;
178 210
179 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 211 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
180 return -EAGAIN; 212 return -EAGAIN;
@@ -206,12 +238,14 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
206 desc->id = id; 238 desc->id = id;
207 239
208 wl->tx_blocks_available -= total_blocks; 240 wl->tx_blocks_available -= total_blocks;
241 wl->tx_allocated_blocks += total_blocks;
209 242
210 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 243 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
211 wl->tx_allocated_blocks[ac] += total_blocks; 244 wl->tx_allocated_pkts[ac]++;
212 245
213 if (wl->bss_type == BSS_TYPE_AP_BSS) 246 if (wl->bss_type == BSS_TYPE_AP_BSS &&
214 wl->links[hlid].allocated_blks += total_blocks; 247 hlid >= WL1271_AP_STA_HLID_START)
248 wl->links[hlid].allocated_pkts++;
215 249
216 ret = 0; 250 ret = 0;
217 251
@@ -225,11 +259,6 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
225 return ret; 259 return ret;
226} 260}
227 261
228static bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
229{
230 return wl->dummy_packet == skb;
231}
232
233static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 262static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
234 u32 extra, struct ieee80211_tx_info *control, 263 u32 extra, struct ieee80211_tx_info *control,
235 u8 hlid) 264 u8 hlid)
@@ -280,9 +309,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
280 wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 309 wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
281 } 310 }
282 311
283 if (wl->bss_type != BSS_TYPE_AP_BSS) { 312 desc->hlid = hlid;
284 desc->aid = hlid;
285 313
314 if (wl->bss_type != BSS_TYPE_AP_BSS) {
286 /* if the packets are destined for AP (have a STA entry) 315 /* if the packets are destined for AP (have a STA entry)
287 send them with AP rate policies, otherwise use default 316 send them with AP rate policies, otherwise use default
288 basic rates */ 317 basic rates */
@@ -291,18 +320,12 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
291 else 320 else
292 rate_idx = ACX_TX_BASIC_RATE; 321 rate_idx = ACX_TX_BASIC_RATE;
293 } else { 322 } else {
294 desc->hlid = hlid; 323 if (hlid == wl->ap_global_hlid)
295 switch (hlid) {
296 case WL1271_AP_GLOBAL_HLID:
297 rate_idx = ACX_TX_AP_MODE_MGMT_RATE; 324 rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
298 break; 325 else if (hlid == wl->ap_bcast_hlid)
299 case WL1271_AP_BROADCAST_HLID:
300 rate_idx = ACX_TX_AP_MODE_BCST_RATE; 326 rate_idx = ACX_TX_AP_MODE_BCST_RATE;
301 break; 327 else
302 default:
303 rate_idx = ac; 328 rate_idx = ac;
304 break;
305 }
306 } 329 }
307 330
308 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 331 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -376,10 +399,11 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
376 } 399 }
377 } 400 }
378 401
379 if (wl->bss_type == BSS_TYPE_AP_BSS) 402 hlid = wl1271_tx_get_hlid(wl, skb);
380 hlid = wl1271_tx_get_hlid(skb); 403 if (hlid == WL12XX_INVALID_LINK_ID) {
381 else 404 wl1271_error("invalid hlid. dropping skb 0x%p", skb);
382 hlid = TX_HW_DEFAULT_AID; 405 return -EINVAL;
406 }
383 407
384 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid); 408 ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
385 if (ret < 0) 409 if (ret < 0)
@@ -462,20 +486,24 @@ void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
462static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, 486static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
463 struct sk_buff_head *queues) 487 struct sk_buff_head *queues)
464{ 488{
465 int i, q = -1; 489 int i, q = -1, ac;
466 u32 min_blks = 0xffffffff; 490 u32 min_pkts = 0xffffffff;
467 491
468 /* 492 /*
469 * Find a non-empty ac where: 493 * Find a non-empty ac where:
470 * 1. There are packets to transmit 494 * 1. There are packets to transmit
471 * 2. The FW has the least allocated blocks 495 * 2. The FW has the least allocated blocks
496 *
497 * We prioritize the ACs according to VO>VI>BE>BK
472 */ 498 */
473 for (i = 0; i < NUM_TX_QUEUES; i++) 499 for (i = 0; i < NUM_TX_QUEUES; i++) {
474 if (!skb_queue_empty(&queues[i]) && 500 ac = wl1271_tx_get_queue(i);
475 (wl->tx_allocated_blocks[i] < min_blks)) { 501 if (!skb_queue_empty(&queues[ac]) &&
476 q = i; 502 (wl->tx_allocated_pkts[ac] < min_pkts)) {
477 min_blks = wl->tx_allocated_blocks[q]; 503 q = ac;
504 min_pkts = wl->tx_allocated_pkts[q];
478 } 505 }
506 }
479 507
480 if (q == -1) 508 if (q == -1)
481 return NULL; 509 return NULL;
@@ -579,7 +607,7 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
579 if (wl12xx_is_dummy_packet(wl, skb)) { 607 if (wl12xx_is_dummy_packet(wl, skb)) {
580 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 608 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
581 } else if (wl->bss_type == BSS_TYPE_AP_BSS) { 609 } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
582 u8 hlid = wl1271_tx_get_hlid(skb); 610 u8 hlid = wl1271_tx_get_hlid(wl, skb);
583 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 611 skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
584 612
585 /* make sure we dequeue the same packet next time */ 613 /* make sure we dequeue the same packet next time */
@@ -826,10 +854,14 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
826 total[i] = 0; 854 total[i] = 0;
827 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { 855 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
828 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); 856 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
829 info = IEEE80211_SKB_CB(skb); 857
830 info->status.rates[0].idx = -1; 858 if (!wl12xx_is_dummy_packet(wl, skb)) {
831 info->status.rates[0].count = 0; 859 info = IEEE80211_SKB_CB(skb);
832 ieee80211_tx_status_ni(wl->hw, skb); 860 info->status.rates[0].idx = -1;
861 info->status.rates[0].count = 0;
862 ieee80211_tx_status_ni(wl->hw, skb);
863 }
864
833 total[i]++; 865 total[i]++;
834 } 866 }
835 } 867 }
@@ -853,8 +885,8 @@ void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
853 if (wl->bss_type == BSS_TYPE_AP_BSS) { 885 if (wl->bss_type == BSS_TYPE_AP_BSS) {
854 for (i = 0; i < AP_MAX_LINKS; i++) { 886 for (i = 0; i < AP_MAX_LINKS; i++) {
855 wl1271_tx_reset_link_queues(wl, i); 887 wl1271_tx_reset_link_queues(wl, i);
856 wl->links[i].allocated_blks = 0; 888 wl->links[i].allocated_pkts = 0;
857 wl->links[i].prev_freed_blks = 0; 889 wl->links[i].prev_freed_pkts = 0;
858 } 890 }
859 891
860 wl->last_tx_hlid = 0; 892 wl->last_tx_hlid = 0;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index 5d719b5a3d1d..7da35c0e411b 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -29,9 +29,6 @@
29 29
30#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 30#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
31#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000 31#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
32/* The chipset reference driver states, that the "aid" value 1
33 * is for infra-BSS, but is still always used */
34#define TX_HW_DEFAULT_AID 1
35 32
36#define TX_HW_ATTR_SAVE_RETRIES BIT(0) 33#define TX_HW_ATTR_SAVE_RETRIES BIT(0)
37#define TX_HW_ATTR_HEADER_PAD BIT(1) 34#define TX_HW_ATTR_HEADER_PAD BIT(1)
@@ -116,12 +113,8 @@ struct wl1271_tx_hw_descr {
116 u8 id; 113 u8 id;
117 /* The packet TID value (as User-Priority) */ 114 /* The packet TID value (as User-Priority) */
118 u8 tid; 115 u8 tid;
119 union { 116 /* host link ID (HLID) */
120 /* STA - Identifier of the remote STA in IBSS, 1 in infra-BSS */ 117 u8 hlid;
121 u8 aid;
122 /* AP - host link ID (HLID) */
123 u8 hlid;
124 } __packed;
125 u8 reserved; 118 u8 reserved;
126} __packed; 119} __packed;
127 120
@@ -133,7 +126,8 @@ enum wl1271_tx_hw_res_status {
133 TX_TIMEOUT = 4, 126 TX_TIMEOUT = 4,
134 TX_KEY_NOT_FOUND = 5, 127 TX_KEY_NOT_FOUND = 5,
135 TX_PEER_NOT_FOUND = 6, 128 TX_PEER_NOT_FOUND = 6,
136 TX_SESSION_MISMATCH = 7 129 TX_SESSION_MISMATCH = 7,
130 TX_LINK_NOT_VALID = 8,
137}; 131};
138 132
139struct wl1271_tx_hw_res_descr { 133struct wl1271_tx_hw_res_descr {
@@ -216,7 +210,7 @@ void wl1271_tx_flush(struct wl1271 *wl);
216u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 210u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
217u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 211u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
218u32 wl1271_tx_min_rate_get(struct wl1271 *wl); 212u32 wl1271_tx_min_rate_get(struct wl1271 *wl);
219u8 wl1271_tx_get_hlid(struct sk_buff *skb); 213u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
220void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid); 214void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
221void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 215void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
222 216
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 0bc29356ebe4..61a7c2163ea2 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -112,28 +112,8 @@ extern u32 wl12xx_debug_level;
112 true); \ 112 true); \
113 } while (0) 113 } while (0)
114 114
115#define WL1271_DEFAULT_STA_RX_CONFIG (CFG_UNI_FILTER_EN | \ 115#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
116 CFG_BSSID_FILTER_EN | \ 116#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
117 CFG_MC_FILTER_EN)
118
119#define WL1271_DEFAULT_STA_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
120 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
121 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
122 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
123
124#define WL1271_DEFAULT_AP_RX_CONFIG 0
125
126#define WL1271_DEFAULT_AP_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PREQ_EN | \
127 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
128 CFG_RX_CTL_EN | CFG_RX_AUTH_EN | \
129 CFG_RX_ASSOC_EN)
130
131
132
133#define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin"
134#define WL128X_FW_NAME "ti-connectivity/wl128x-fw.bin"
135#define WL127X_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin"
136#define WL128X_AP_FW_NAME "ti-connectivity/wl128x-fw-ap.bin"
137 117
138/* 118/*
139 * wl127x and wl128x are using the same NVS file name. However, the 119 * wl127x and wl128x are using the same NVS file name. However, the
@@ -157,25 +137,34 @@ extern u32 wl12xx_debug_level;
157#define WL1271_DEFAULT_BEACON_INT 100 137#define WL1271_DEFAULT_BEACON_INT 100
158#define WL1271_DEFAULT_DTIM_PERIOD 1 138#define WL1271_DEFAULT_DTIM_PERIOD 1
159 139
160#define WL1271_AP_GLOBAL_HLID 0 140#define WL12XX_MAX_ROLES 4
161#define WL1271_AP_BROADCAST_HLID 1 141#define WL12XX_MAX_LINKS 8
162#define WL1271_AP_STA_HLID_START 2 142#define WL12XX_INVALID_ROLE_ID 0xff
143#define WL12XX_INVALID_LINK_ID 0xff
144
145/* Defined by FW as 0. Will not be freed or allocated. */
146#define WL12XX_SYSTEM_HLID 0
147
148/*
149 * TODO: we currently don't support multirole. remove
150 * this constant from the code when we do.
151 */
152#define WL1271_AP_STA_HLID_START 3
163 153
164/* 154/*
165 * When in AP-mode, we allow (at least) this number of mem-blocks 155 * When in AP-mode, we allow (at least) this number of packets
166 * to be transmitted to FW for a STA in PS-mode. Only when packets are 156 * to be transmitted to FW for a STA in PS-mode. Only when packets are
167 * present in the FW buffers it will wake the sleeping STA. We want to put 157 * present in the FW buffers it will wake the sleeping STA. We want to put
168 * enough packets for the driver to transmit all of its buffered data before 158 * enough packets for the driver to transmit all of its buffered data before
169 * the STA goes to sleep again. But we don't want to take too much mem-blocks 159 * the STA goes to sleep again. But we don't want to take too much memory
170 * as it might hurt the throughput of active STAs. 160 * as it might hurt the throughput of active STAs.
171 * The number of blocks (18) is enough for 2 large packets.
172 */ 161 */
173#define WL1271_PS_STA_MAX_BLOCKS (2 * 9) 162#define WL1271_PS_STA_MAX_PACKETS 2
174 163
175#define WL1271_AP_BSS_INDEX 0 164#define WL1271_AP_BSS_INDEX 0
176#define WL1271_AP_DEF_BEACON_EXP 20 165#define WL1271_AP_DEF_BEACON_EXP 20
177 166
178#define ACX_TX_DESCRIPTORS 32 167#define ACX_TX_DESCRIPTORS 16
179 168
180#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 169#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
181 170
@@ -247,26 +236,22 @@ struct wl1271_stats {
247 236
248#define AP_MAX_STATIONS 5 237#define AP_MAX_STATIONS 5
249 238
250/* Broadcast and Global links + links to stations */ 239/* Broadcast and Global links + system link + links to stations */
251#define AP_MAX_LINKS (AP_MAX_STATIONS + 2) 240/*
241 * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
242 * the places that use this.
243 */
244#define AP_MAX_LINKS (AP_MAX_STATIONS + 3)
252 245
253/* FW status registers common for AP/STA */ 246/* FW status registers */
254struct wl1271_fw_common_status { 247struct wl12xx_fw_status {
255 __le32 intr; 248 __le32 intr;
256 u8 fw_rx_counter; 249 u8 fw_rx_counter;
257 u8 drv_rx_counter; 250 u8 drv_rx_counter;
258 u8 reserved; 251 u8 reserved;
259 u8 tx_results_counter; 252 u8 tx_results_counter;
260 __le32 rx_pkt_descs[NUM_RX_PKT_DESC]; 253 __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
261 __le32 tx_released_blks[NUM_TX_QUEUES];
262 __le32 fw_localtime; 254 __le32 fw_localtime;
263} __packed;
264
265/* FW status registers for AP */
266struct wl1271_fw_ap_status {
267 struct wl1271_fw_common_status common;
268
269 /* Next fields valid only in AP FW */
270 255
271 /* 256 /*
272 * A bitmap (where each bit represents a single HLID) 257 * A bitmap (where each bit represents a single HLID)
@@ -274,29 +259,29 @@ struct wl1271_fw_ap_status {
274 */ 259 */
275 __le32 link_ps_bitmap; 260 __le32 link_ps_bitmap;
276 261
277 /* Number of freed MBs per HLID */ 262 /*
278 u8 tx_lnk_free_blks[AP_MAX_LINKS]; 263 * A bitmap (where each bit represents a single HLID) to indicate
279 u8 padding_1[1]; 264 * if the station is in Fast mode
280} __packed; 265 */
266 __le32 link_fast_bitmap;
281 267
282/* FW status registers for STA */ 268 /* Cumulative counter of total released mem blocks since FW-reset */
283struct wl1271_fw_sta_status { 269 __le32 total_released_blks;
284 struct wl1271_fw_common_status common;
285 270
286 u8 tx_total; 271 /* Size (in Memory Blocks) of TX pool */
287 u8 reserved1; 272 __le32 tx_total;
288 __le16 reserved2;
289 __le32 log_start_addr;
290} __packed;
291 273
292struct wl1271_fw_full_status { 274 /* Cumulative counter of released packets per AC */
293 union { 275 u8 tx_released_pkts[NUM_TX_QUEUES];
294 struct wl1271_fw_common_status common; 276
295 struct wl1271_fw_sta_status sta; 277 /* Cumulative counter of freed packets per HLID */
296 struct wl1271_fw_ap_status ap; 278 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
297 };
298} __packed;
299 279
280 /* Cumulative counter of released Voice memory blocks */
281 u8 tx_voice_released_blks;
282 u8 padding_1[7];
283 __le32 log_start_addr;
284} __packed;
300 285
301struct wl1271_rx_mem_pool_addr { 286struct wl1271_rx_mem_pool_addr {
302 u32 addr; 287 u32 addr;
@@ -342,7 +327,7 @@ struct wl1271_ap_key {
342 327
343enum wl12xx_flags { 328enum wl12xx_flags {
344 WL1271_FLAG_STA_ASSOCIATED, 329 WL1271_FLAG_STA_ASSOCIATED,
345 WL1271_FLAG_JOINED, 330 WL1271_FLAG_IBSS_JOINED,
346 WL1271_FLAG_GPIO_POWER, 331 WL1271_FLAG_GPIO_POWER,
347 WL1271_FLAG_TX_QUEUE_STOPPED, 332 WL1271_FLAG_TX_QUEUE_STOPPED,
348 WL1271_FLAG_TX_PENDING, 333 WL1271_FLAG_TX_PENDING,
@@ -369,11 +354,14 @@ struct wl1271_link {
369 /* AP-mode - TX queue per AC in link */ 354 /* AP-mode - TX queue per AC in link */
370 struct sk_buff_head tx_queue[NUM_TX_QUEUES]; 355 struct sk_buff_head tx_queue[NUM_TX_QUEUES];
371 356
372 /* accounting for allocated / available TX blocks in FW */ 357 /* accounting for allocated / freed packets in FW */
373 u8 allocated_blks; 358 u8 allocated_pkts;
374 u8 prev_freed_blks; 359 u8 prev_freed_pkts;
375 360
376 u8 addr[ETH_ALEN]; 361 u8 addr[ETH_ALEN];
362
363 /* bitmap of TIDs where RX BA sessions are active for this link */
364 u8 ba_bitmap;
377}; 365};
378 366
379struct wl1271 { 367struct wl1271 {
@@ -405,7 +393,6 @@ struct wl1271 {
405 393
406 u8 *fw; 394 u8 *fw;
407 size_t fw_len; 395 size_t fw_len;
408 u8 fw_bss_type;
409 void *nvs; 396 void *nvs;
410 size_t nvs_len; 397 size_t nvs_len;
411 398
@@ -418,15 +405,30 @@ struct wl1271 {
418 u8 ssid[IEEE80211_MAX_SSID_LEN + 1]; 405 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
419 u8 ssid_len; 406 u8 ssid_len;
420 int channel; 407 int channel;
408 u8 role_id;
409 u8 dev_role_id;
410 u8 system_hlid;
411 u8 sta_hlid;
412 u8 dev_hlid;
413 u8 ap_global_hlid;
414 u8 ap_bcast_hlid;
415
416 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
417 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
418 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
421 419
422 struct wl1271_acx_mem_map *target_mem_map; 420 struct wl1271_acx_mem_map *target_mem_map;
423 421
424 /* Accounting for allocated / available TX blocks on HW */ 422 /* Accounting for allocated / available TX blocks on HW */
425 u32 tx_blocks_freed[NUM_TX_QUEUES]; 423 u32 tx_blocks_freed;
426 u32 tx_blocks_available; 424 u32 tx_blocks_available;
427 u32 tx_allocated_blocks[NUM_TX_QUEUES]; 425 u32 tx_allocated_blocks;
428 u32 tx_results_count; 426 u32 tx_results_count;
429 427
428 /* Accounting for allocated / available Tx packets in HW */
429 u32 tx_pkts_freed[NUM_TX_QUEUES];
430 u32 tx_allocated_pkts[NUM_TX_QUEUES];
431
430 /* Transmitted TX packets counter for chipset interface */ 432 /* Transmitted TX packets counter for chipset interface */
431 u32 tx_packets_count; 433 u32 tx_packets_count;
432 434
@@ -535,10 +537,6 @@ struct wl1271 {
535 struct work_struct rx_streaming_disable_work; 537 struct work_struct rx_streaming_disable_work;
536 struct timer_list rx_streaming_timer; 538 struct timer_list rx_streaming_timer;
537 539
538 unsigned int filters;
539 unsigned int rx_config;
540 unsigned int rx_filter;
541
542 struct completion *elp_compl; 540 struct completion *elp_compl;
543 struct completion *ps_compl; 541 struct completion *ps_compl;
544 struct delayed_work elp_work; 542 struct delayed_work elp_work;
@@ -562,7 +560,7 @@ struct wl1271 {
562 u32 buffer_cmd; 560 u32 buffer_cmd;
563 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 561 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
564 562
565 struct wl1271_fw_full_status *fw_status; 563 struct wl12xx_fw_status *fw_status;
566 struct wl1271_tx_hw_res_if *tx_res_if; 564 struct wl1271_tx_hw_res_if *tx_res_if;
567 565
568 struct ieee80211_vif *vif; 566 struct ieee80211_vif *vif;
@@ -622,6 +620,9 @@ struct wl1271 {
622 620
623 /* Platform limitations */ 621 /* Platform limitations */
624 unsigned int platform_quirks; 622 unsigned int platform_quirks;
623
624 /* number of currently active RX BA sessions */
625 int ba_rx_session_count;
625}; 626};
626 627
627struct wl1271_station { 628struct wl1271_station {
@@ -659,12 +660,6 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
659/* Each RX/TX transaction requires an end-of-transaction transfer */ 660/* Each RX/TX transaction requires an end-of-transaction transfer */
660#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) 661#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
661 662
662/*
663 * Older firmwares use 2 spare TX blocks
664 * (for STA < 6.1.3.50.58 or for AP < 6.2.0.0.47)
665 */
666#define WL12XX_QUIRK_USE_2_SPARE_BLOCKS BIT(1)
667
668/* WL128X requires aggregated packets to be aligned to the SDIO block size */ 663/* WL128X requires aggregated packets to be aligned to the SDIO block size */
669#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2) 664#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
670 665
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f334ea081722..f7971d3b0898 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -105,18 +105,6 @@ struct wl12xx_ie_country {
105 105
106/* Templates */ 106/* Templates */
107 107
108struct wl12xx_beacon_template {
109 struct ieee80211_header header;
110 __le32 time_stamp[2];
111 __le16 beacon_interval;
112 __le16 capability;
113 struct wl12xx_ie_ssid ssid;
114 struct wl12xx_ie_rates rates;
115 struct wl12xx_ie_rates ext_rates;
116 struct wl12xx_ie_ds_params ds_params;
117 struct wl12xx_ie_country country;
118} __packed;
119
120struct wl12xx_null_data_template { 108struct wl12xx_null_data_template {
121 struct ieee80211_header header; 109 struct ieee80211_header header;
122} __packed; 110} __packed;
@@ -146,19 +134,6 @@ struct wl12xx_arp_rsp_template {
146 __be32 target_ip; 134 __be32 target_ip;
147} __packed; 135} __packed;
148 136
149
150struct wl12xx_probe_resp_template {
151 struct ieee80211_header header;
152 __le32 time_stamp[2];
153 __le16 beacon_interval;
154 __le16 capability;
155 struct wl12xx_ie_ssid ssid;
156 struct wl12xx_ie_rates rates;
157 struct wl12xx_ie_rates ext_rates;
158 struct wl12xx_ie_ds_params ds_params;
159 struct wl12xx_ie_country country;
160} __packed;
161
162struct wl12xx_disconn_template { 137struct wl12xx_disconn_template {
163 struct ieee80211_header header; 138 struct ieee80211_header header;
164 __le16 disconn_reason; 139 __le16 disconn_reason;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 88112ca59c8e..eb2659aefd97 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1792,6 +1792,7 @@ struct wiphy_wowlan_support {
1792 * @debugfsdir: debugfs directory used for this wiphy, will be renamed 1792 * @debugfsdir: debugfs directory used for this wiphy, will be renamed
1793 * automatically on wiphy renames 1793 * automatically on wiphy renames
1794 * @dev: (virtual) struct device for this wiphy 1794 * @dev: (virtual) struct device for this wiphy
1795 * @registered: helps synchronize suspend/resume with wiphy unregister
1795 * @wext: wireless extension handlers 1796 * @wext: wireless extension handlers
1796 * @priv: driver private data (sized according to wiphy_new() parameter) 1797 * @priv: driver private data (sized according to wiphy_new() parameter)
1797 * @interface_modes: bitmask of interfaces types valid for this wiphy, 1798 * @interface_modes: bitmask of interfaces types valid for this wiphy,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 186e02f7cc32..267ed45ef6a2 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -195,20 +195,12 @@ static ssize_t uapsd_queues_write(struct file *file,
195 size_t count, loff_t *ppos) 195 size_t count, loff_t *ppos)
196{ 196{
197 struct ieee80211_local *local = file->private_data; 197 struct ieee80211_local *local = file->private_data;
198 unsigned long val; 198 u8 val;
199 char buf[10];
200 size_t len;
201 int ret; 199 int ret;
202 200
203 len = min(count, sizeof(buf) - 1); 201 ret = kstrtou8_from_user(user_buf, count, 0, &val);
204 if (copy_from_user(buf, user_buf, len))
205 return -EFAULT;
206 buf[len] = '\0';
207
208 ret = strict_strtoul(buf, 0, &val);
209
210 if (ret) 202 if (ret)
211 return -EINVAL; 203 return ret;
212 204
213 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) 205 if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
214 return -ERANGE; 206 return -ERANGE;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7fde32159fdc..f97d17cb073c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -307,14 +307,14 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
307 307
308 while (num_skbs--) { 308 while (num_skbs--) {
309 skb = __skb_dequeue(&failq); 309 skb = __skb_dequeue(&failq);
310 if (copy) 310 if (copy) {
311 cp_skb = skb_copy(skb, GFP_ATOMIC); 311 cp_skb = skb_copy(skb, GFP_ATOMIC);
312 if (cp_skb)
313 __skb_queue_tail(&failq, cp_skb);
314 }
312 315
313 prepare_for_gate(skb, gate_mpath->dst, gate_mpath); 316 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
314 __skb_queue_tail(&gateq, skb); 317 __skb_queue_tail(&gateq, skb);
315
316 if (copy && cp_skb)
317 __skb_queue_tail(&failq, cp_skb);
318 } 318 }
319 319
320 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags); 320 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);