aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKalle Valo <kvalo@codeaurora.org>2016-03-07 08:48:56 -0500
committerKalle Valo <kvalo@codeaurora.org>2016-03-07 08:48:56 -0500
commit739596b09b01dc972a27d12c4058f7ee2c3c6e40 (patch)
treeabb03f52ad867c4a333542a58171c5344e122e3c
parent0ea6f0c582c2675285ad094df0137f4f0de47869 (diff)
parent53f09e742b0fdf14a2a2bfd2062ee96c9b3eedf0 (diff)
Merge tag 'iwlwifi-next-for-kalle-2016-03-02' of https://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
* add support for thermal device / cooling device (Chaya Rachel) * fixes for 9000 devices data path (Sara Sharon) * improvements in scheduled scan w/o profiles (Luca) * new firmware support (-21.ucode) * add MSIX support for 9000 devices (Haim Dreyfuss) * cleanup in PCIe initialization * enable MU-MIMO and take care of firmware restart(Sara Sharon) ===> This needs mac80211-next * add support for large SKBs in mvm to reach A-MSDU ===> This needs mac80211-next * add support for filtering frames from a BA session (Sara Sharon) ===> This needs mac80211-next * start implementing the new Rx path for 9000 devices (Sara Sharon) * enable the new RRM feature flag (Beni Lev) * fix U-APSD enablement on P2P Client (Avri Altman) * fix beacon abort enablement (Avri Altman) * forbid beacon storing with WoWLAN (Matti Gottlieb) * support unified uSniffer / regular firmware image (Golan Ben-Ami) * fix a race between debugfs hooks and iface up (Chaya Rachel Ivgi) * fixes for runtime PM (Luca) * add a new module paramater to disable VHT (Andrei Otcheretianski) * build infrastructure for Dynamic Queue Allocation (Liad Kaufman)
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h126
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c146
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c461
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c277
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c10
35 files changed, 2052 insertions, 214 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index fa41a5e1c890..fc475ce59b47 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -73,8 +73,8 @@
73/* Highest firmware API version supported */ 73/* Highest firmware API version supported */
74#define IWL7260_UCODE_API_MAX 17 74#define IWL7260_UCODE_API_MAX 17
75#define IWL7265_UCODE_API_MAX 17 75#define IWL7265_UCODE_API_MAX 17
76#define IWL7265D_UCODE_API_MAX 20 76#define IWL7265D_UCODE_API_MAX 21
77#define IWL3168_UCODE_API_MAX 20 77#define IWL3168_UCODE_API_MAX 21
78 78
79/* Oldest version we won't warn about */ 79/* Oldest version we won't warn about */
80#define IWL7260_UCODE_API_OK 13 80#define IWL7260_UCODE_API_OK 13
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index bce9b3420a13..97be104d1203 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -70,8 +70,8 @@
70#include "iwl-agn-hw.h" 70#include "iwl-agn-hw.h"
71 71
72/* Highest firmware API version supported */ 72/* Highest firmware API version supported */
73#define IWL8000_UCODE_API_MAX 20 73#define IWL8000_UCODE_API_MAX 21
74#define IWL8265_UCODE_API_MAX 20 74#define IWL8265_UCODE_API_MAX 21
75 75
76/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
77#define IWL8000_UCODE_API_OK 13 77#define IWL8000_UCODE_API_OK 13
@@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
217 .nvm_ver = IWL8000_NVM_VERSION, 217 .nvm_ver = IWL8000_NVM_VERSION,
218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
219 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, 219 .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
220 .vht_mu_mimo_supported = true,
220}; 221};
221 222
222const struct iwl_cfg iwl4165_2ac_cfg = { 223const struct iwl_cfg iwl4165_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 4b93404f46a7..8e32a57dda0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -55,7 +55,7 @@
55#include "iwl-agn-hw.h" 55#include "iwl-agn-hw.h"
56 56
57/* Highest firmware API version supported */ 57/* Highest firmware API version supported */
58#define IWL9000_UCODE_API_MAX 20 58#define IWL9000_UCODE_API_MAX 21
59 59
60/* Oldest version we won't warn about */ 60/* Oldest version we won't warn about */
61#define IWL9000_UCODE_API_OK 13 61#define IWL9000_UCODE_API_OK 13
@@ -139,7 +139,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
139 .smem_len = IWL9000_SMEM_LEN, \ 139 .smem_len = IWL9000_SMEM_LEN, \
140 .thermal_params = &iwl9000_tt_params, \ 140 .thermal_params = &iwl9000_tt_params, \
141 .apmg_not_supported = true, \ 141 .apmg_not_supported = true, \
142 .mq_rx_supported = true 142 .mq_rx_supported = true, \
143 .vht_mu_mimo_supported = true
143 144
144const struct iwl_cfg iwl9260_2ac_cfg = { 145const struct iwl_cfg iwl9260_2ac_cfg = {
145 .name = "Intel(R) Dual Band Wireless AC 9260", 146 .name = "Intel(R) Dual Band Wireless AC 9260",
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index dad5570d6cc8..4f2b57e8bbc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -312,6 +312,7 @@ struct iwl_pwr_tx_backoff {
312 * @smem_offset: offset from which the SMEM begins 312 * @smem_offset: offset from which the SMEM begins
313 * @smem_len: the length of SMEM 313 * @smem_len: the length of SMEM
314 * @mq_rx_supported: multi-queue rx support 314 * @mq_rx_supported: multi-queue rx support
315 * @vht_mu_mimo_supported: VHT MU-MIMO support
315 * 316 *
316 * We enable the driver to be backward compatible wrt. hardware features. 317 * We enable the driver to be backward compatible wrt. hardware features.
317 * API differences in uCode shouldn't be handled here but through TLVs 318 * API differences in uCode shouldn't be handled here but through TLVs
@@ -364,6 +365,7 @@ struct iwl_cfg {
364 const struct iwl_tt_params *thermal_params; 365 const struct iwl_tt_params *thermal_params;
365 bool apmg_not_supported; 366 bool apmg_not_supported;
366 bool mq_rx_supported; 367 bool mq_rx_supported;
368 bool vht_mu_mimo_supported;
367}; 369};
368 370
369/* 371/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 163b21bc20cb..a79c4f61a851 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -549,4 +550,52 @@ enum dtd_diode_reg {
549 DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ 550 DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */
550}; 551};
551 552
553/*****************************************************************************
554 * MSIX related registers *
555 *****************************************************************************/
556
557#define CSR_MSIX_BASE (0x2000)
558#define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800)
559#define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804)
560#define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808)
561#define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C)
562#define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810)
563#define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880)
564#define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890)
565#define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000)
566#define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause))
567#define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause))
568
569#define MSIX_FH_INT_CAUSES_Q(q) (q)
570
571/*
572 * Causes for the FH register interrupts
573 */
574enum msix_fh_int_causes {
575 MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
576 MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
577 MSIX_FH_INT_CAUSES_S2D = BIT(19),
578 MSIX_FH_INT_CAUSES_FH_ERR = BIT(21),
579};
580
581/*
582 * Causes for the HW register interrupts
583 */
584enum msix_hw_int_causes {
585 MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
586 MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
587 MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
588 MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
589 MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
590 MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25),
591 MSIX_HW_INT_CAUSES_REG_SCD = BIT(26),
592 MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27),
593 MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29),
594 MSIX_HW_INT_CAUSES_REG_HAP = BIT(30),
595};
596
597#define MSIX_MIN_INTERRUPT_VECTORS 2
598#define MSIX_AUTO_CLEAR_CAUSE 0
599#define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7)
600
552#endif /* !__iwl_csr_h__ */ 601#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 22786d7dc00a..2a0703fcec56 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -73,12 +73,12 @@ TRACE_EVENT(iwlwifi_dev_rx,
73 TP_ARGS(dev, trans, pkt, len), 73 TP_ARGS(dev, trans, pkt, len),
74 TP_STRUCT__entry( 74 TP_STRUCT__entry(
75 DEV_ENTRY 75 DEV_ENTRY
76 __field(u8, cmd) 76 __field(u16, cmd)
77 __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) 77 __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
78 ), 78 ),
79 TP_fast_assign( 79 TP_fast_assign(
80 DEV_ASSIGN; 80 DEV_ASSIGN;
81 __entry->cmd = pkt->hdr.cmd; 81 __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
82 memcpy(__get_dynamic_array(rxbuf), pkt, 82 memcpy(__get_dynamic_array(rxbuf), pkt,
83 iwl_rx_trace_len(trans, pkt, len)); 83 iwl_rx_trace_len(trans, pkt, len));
84 ), 84 ),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ab4c2a0470b2..184c0fef37c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1033,7 +1033,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
1033 } 1033 }
1034 } 1034 }
1035 1035
1036 if (usniffer_req && !*usniffer_images) { 1036 if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
1037 usniffer_req && !*usniffer_images) {
1037 IWL_ERR(drv, 1038 IWL_ERR(drv,
1038 "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); 1039 "user selected to work with usniffer but usniffer image isn't available in ucode package\n");
1039 return -EINVAL; 1040 return -EINVAL;
@@ -1718,3 +1719,7 @@ MODULE_PARM_DESC(fw_monitor,
1718module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay, 1719module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay,
1719 uint, S_IRUGO); 1720 uint, S_IRUGO);
1720MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); 1721MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
1722
1723module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
1724 S_IRUGO);
1725MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 4ab6682ea53e..8af818b10e71 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -6,7 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2015 Intel Deutschland GmbH 9 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
32 * BSD LICENSE 32 * BSD LICENSE
33 * 33 *
34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2015 Intel Deutschland GmbH 35 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
36 * All rights reserved. 36 * All rights reserved.
37 * 37 *
38 * Redistribution and use in source and binary forms, with or without 38 * Redistribution and use in source and binary forms, with or without
@@ -368,20 +368,24 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
368#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) 368#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
369#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) 369#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
370#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) 370#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
371#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ 371#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
372#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 372#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24
373#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) 373#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
374#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ 374#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
375#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ 375#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
376#define RFH_DMA_EN_ENABLE_VAL BIT(31) 376#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
377#define RFH_DMA_EN_ENABLE_VAL BIT(31)
377 378
378#define RFH_RXF_RXQ_ACTIVE 0xA0980C 379#define RFH_RXF_RXQ_ACTIVE 0xA0980C
379 380
380#define RFH_GEN_CFG 0xA09800 381#define RFH_GEN_CFG 0xA09800
382#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
383#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
384#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */
381#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 385#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
382#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) 386#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8
383#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) 387
384#define DEFAULT_RXQ_NUM 8 388#define DEFAULT_RXQ_NUM 0
385 389
386/* end of 9000 rx series registers */ 390/* end of 9000 rx series registers */
387 391
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index e2dbc67a367b..5f69bf5e04c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -318,6 +318,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
318 * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon 318 * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
319 * from AP and will send it upon d0i3 exit. 319 * from AP and will send it upon d0i3 exit.
320 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 320 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
321 * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
322 * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
323 * thresholds reporting
324 * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
325 * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
326 * regular image.
321 * 327 *
322 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used 328 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
323 */ 329 */
@@ -351,6 +357,10 @@ enum iwl_ucode_tlv_capa {
351 IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, 357 IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71,
352 IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, 358 IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72,
353 IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, 359 IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73,
360 IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74,
361 IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
362 IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
363 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
354 364
355 NUM_IWL_UCODE_TLV_CAPA 365 NUM_IWL_UCODE_TLV_CAPA
356#ifdef __CHECKER__ 366#ifdef __CHECKER__
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index b88ecc7892a9..d1a5dd1602f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -115,6 +115,7 @@ enum iwl_amsdu_size {
115 * entering D0i3 (in msecs) 115 * entering D0i3 (in msecs)
116 * @lar_disable: disable LAR (regulatory), default = 0 116 * @lar_disable: disable LAR (regulatory), default = 0
117 * @fw_monitor: allow to use firmware monitor 117 * @fw_monitor: allow to use firmware monitor
118 * @disable_11ac: disable VHT capabilities, default = false.
118 */ 119 */
119struct iwl_mod_params { 120struct iwl_mod_params {
120 int sw_crypto; 121 int sw_crypto;
@@ -135,6 +136,7 @@ struct iwl_mod_params {
135 unsigned int d0i3_entry_delay; 136 unsigned int d0i3_entry_delay;
136 bool lar_disable; 137 bool lar_disable;
137 bool fw_monitor; 138 bool fw_monitor;
139 bool disable_11ac;
138}; 140};
139 141
140#endif /* #__iwl_modparams_h__ */ 142#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 50f4cc60cf3e..348135792f3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -366,6 +366,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
366 max_ampdu_exponent << 366 max_ampdu_exponent <<
367 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 367 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
368 368
369 if (cfg->vht_mu_mimo_supported)
370 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
371
369 if (cfg->ht_params->ldpc) 372 if (cfg->ht_params->ldpc)
370 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 373 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
371 374
@@ -449,7 +452,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
449 IEEE80211_BAND_5GHZ); 452 IEEE80211_BAND_5GHZ);
450 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, 453 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
451 tx_chains, rx_chains); 454 tx_chains, rx_chains);
452 if (data->sku_cap_11ac_enable) 455 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
453 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 456 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
454 tx_chains, rx_chains); 457 tx_chains, rx_chains);
455 458
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 5bde23a472b4..c46e596e12b1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -404,4 +404,6 @@ enum {
404 LMPM_PAGE_PASS_NOTIF_POS = BIT(20), 404 LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
405}; 405};
406 406
407#define UREG_CHICK (0xA05C00)
408#define UREG_CHICK_MSIX_ENABLE BIT(25)
407#endif /* __iwl_prph_h__ */ 409#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 0ca0f13b69b0..91d74b3f666b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -836,6 +836,7 @@ struct iwl_trans {
836 836
837 enum iwl_plat_pm_mode system_pm_mode; 837 enum iwl_plat_pm_mode system_pm_mode;
838 enum iwl_plat_pm_mode runtime_pm_mode; 838 enum iwl_plat_pm_mode runtime_pm_mode;
839 bool suspending;
839 840
840 /* pointer to trans specific struct */ 841 /* pointer to trans specific struct */
841 /*Ensure that this pointer will always be aligned to sizeof pointer */ 842 /*Ensure that this pointer will always be aligned to sizeof pointer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 5214482a0403..c1a313149eed 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -816,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
816{ 816{
817 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 817 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
818 818
819 iwl_trans_stop_device(mvm->trans); 819 iwl_mvm_stop_device(mvm);
820
821 /* 820 /*
822 * Set the HW restart bit -- this is mostly true as we're 821 * Set the HW restart bit -- this is mostly true as we're
823 * going to load new firmware and reprogram that, though 822 * going to load new firmware and reprogram that, though
@@ -856,8 +855,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
856 wowlan_config_cmd->is_11n_connection = 855 wowlan_config_cmd->is_11n_connection =
857 ap_sta->ht_cap.ht_supported; 856 ap_sta->ht_cap.ht_supported;
858 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 857 wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
859 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | 858 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
860 ENABLE_STORE_BEACON;
861 859
862 /* Query the last used seqno and set it */ 860 /* Query the last used seqno and set it */
863 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); 861 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index c529e5355803..56e6b0b8b9cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -64,6 +64,7 @@
64 * 64 *
65 *****************************************************************************/ 65 *****************************************************************************/
66#include <linux/vmalloc.h> 66#include <linux/vmalloc.h>
67#include <linux/ieee80211.h>
67 68
68#include "mvm.h" 69#include "mvm.h"
69#include "fw-dbg.h" 70#include "fw-dbg.h"
@@ -976,7 +977,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
976 memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, 977 memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
977 ARRAY_SIZE(cmd.indirection_table) % nbytes); 978 ARRAY_SIZE(cmd.indirection_table) % nbytes);
978 979
979 memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); 980 memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
980 981
981 mutex_lock(&mvm->mutex); 982 mutex_lock(&mvm->mutex);
982 ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 983 ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
@@ -1080,6 +1081,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1080 return count; 1081 return count;
1081} 1082}
1082 1083
1084static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
1085 char *buf, size_t count,
1086 loff_t *ppos)
1087{
1088 unsigned int max_amsdu_len;
1089 int ret;
1090
1091 ret = kstrtouint(buf, 0, &max_amsdu_len);
1092
1093 if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
1094 return -EINVAL;
1095 mvm->max_amsdu_len = max_amsdu_len;
1096
1097 return count;
1098}
1099
1083#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) 1100#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
1084#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1101#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1085static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, 1102static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
@@ -1497,7 +1514,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
1497MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); 1514MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
1498MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); 1515MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
1499MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); 1516MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
1500MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16); 1517MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
1518MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
1519 (IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
1501 1520
1502#ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1521#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1503MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); 1522MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1540,6 +1559,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
1540 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); 1559 MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1541 MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); 1560 MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
1542 MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); 1561 MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
1562 MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR);
1543 MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); 1563 MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
1544 MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); 1564 MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
1545 MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); 1565 MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index df939f51d9b9..eb9b87038e1f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -391,4 +391,56 @@ struct iwl_rss_config_cmd {
391 u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; 391 u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
392} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ 392} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
393 393
394#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
395#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
396#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
397
398/**
399 * struct iwl_rxq_sync_cmd - RXQ notification trigger
400 *
401 * @flags: flags of the notification. bit 0:3 are the sender queue
402 * @rxq_mask: rx queues to send the notification on
403 * @count: number of bytes in payload, should be DWORD aligned
404 * @payload: data to send to rx queues
405 */
406struct iwl_rxq_sync_cmd {
407 __le32 flags;
408 __le32 rxq_mask;
409 __le32 count;
410 u8 payload[];
411} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
412
413/**
414 * struct iwl_rxq_sync_notification - Notification triggered by RXQ
415 * sync command
416 *
417 * @count: number of bytes in payload
418 * @payload: data to send to rx queues
419 */
420struct iwl_rxq_sync_notification {
421 __le32 count;
422 u8 payload[];
423} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
424
425/**
426* Internal message identifier
427*
428* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
429*/
430enum iwl_mvm_rxq_notif_type {
431 IWL_MVM_RXQ_NOTIF_DEL_BA,
432};
433
434/**
435* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
436* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
437*
438* @type: value from &iwl_mvm_rxq_notif_type
439* @data: payload
440*/
441struct iwl_mvm_internal_rxq_notif {
442 u32 type;
443 u8 data[];
444} __packed;
445
394#endif /* __fw_api_rx_h__ */ 446#endif /* __fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index f332497e29d1..4a0fc47c81f2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -119,6 +119,8 @@ enum {
119 SCAN_ABORT_UMAC = 0xe, 119 SCAN_ABORT_UMAC = 0xe,
120 SCAN_COMPLETE_UMAC = 0xf, 120 SCAN_COMPLETE_UMAC = 0xf,
121 121
122 BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
123
122 /* station table */ 124 /* station table */
123 ADD_STA_KEY = 0x17, 125 ADD_STA_KEY = 0x17,
124 ADD_STA = 0x18, 126 ADD_STA = 0x18,
@@ -279,9 +281,19 @@ enum {
279 */ 281 */
280enum iwl_phy_ops_subcmd_ids { 282enum iwl_phy_ops_subcmd_ids {
281 CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, 283 CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
284 CTDP_CONFIG_CMD = 0x03,
285 TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
286 CT_KILL_NOTIFICATION = 0xFE,
282 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, 287 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
283}; 288};
284 289
290enum iwl_data_path_subcmd_ids {
291 UPDATE_MU_GROUPS_CMD = 0x1,
292 TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
293 MU_GROUP_MGMT_NOTIF = 0xFE,
294 RX_QUEUES_NOTIFICATION = 0xFF,
295};
296
285enum iwl_prot_offload_subcmd_ids { 297enum iwl_prot_offload_subcmd_ids {
286 STORED_BEACON_NTF = 0xFF, 298 STORED_BEACON_NTF = 0xFF,
287}; 299};
@@ -291,6 +303,7 @@ enum {
291 LEGACY_GROUP = 0x0, 303 LEGACY_GROUP = 0x0,
292 LONG_GROUP = 0x1, 304 LONG_GROUP = 0x1,
293 PHY_OPS_GROUP = 0x4, 305 PHY_OPS_GROUP = 0x4,
306 DATA_PATH_GROUP = 0x5,
294 PROT_OFFLOAD_GROUP = 0xb, 307 PROT_OFFLOAD_GROUP = 0xb,
295}; 308};
296 309
@@ -1278,6 +1291,26 @@ struct iwl_fw_bcast_filter {
1278 struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; 1291 struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
1279} __packed; /* BCAST_FILTER_S_VER_1 */ 1292} __packed; /* BCAST_FILTER_S_VER_1 */
1280 1293
1294#define BA_WINDOW_STREAMS_MAX 16
1295#define BA_WINDOW_STATUS_TID_MSK 0x000F
1296#define BA_WINDOW_STATUS_STA_ID_POS 4
1297#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0
1298#define BA_WINDOW_STATUS_VALID_MSK BIT(9)
1299
1300/**
1301 * struct iwl_ba_window_status_notif - reordering window's status notification
1302 * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
1303 * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
1304 * @start_seq_num: the start sequence number of the bitmap
1305 * @mpdu_rx_count: the number of received MPDUs since entering D0i3
1306 */
1307struct iwl_ba_window_status_notif {
1308 __le64 bitmap[BA_WINDOW_STREAMS_MAX];
1309 __le16 ra_tid[BA_WINDOW_STREAMS_MAX];
1310 __le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
1311 __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
1312} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
1313
1281/** 1314/**
1282 * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. 1315 * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
1283 * @default_discard: default action for this mac (discard (1) / pass (0)). 1316 * @default_discard: default action for this mac (discard (1) / pass (0)).
@@ -1675,15 +1708,77 @@ struct iwl_ext_dts_measurement_cmd {
1675} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ 1708} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
1676 1709
1677/** 1710/**
1678 * iwl_dts_measurement_notif - notification received with the measurements 1711 * struct iwl_dts_measurement_notif_v1 - measurements notification
1679 * 1712 *
1680 * @temp: the measured temperature 1713 * @temp: the measured temperature
1681 * @voltage: the measured voltage 1714 * @voltage: the measured voltage
1682 */ 1715 */
1683struct iwl_dts_measurement_notif { 1716struct iwl_dts_measurement_notif_v1 {
1684 __le32 temp; 1717 __le32 temp;
1685 __le32 voltage; 1718 __le32 voltage;
1686} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ 1719} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
1720
1721/**
1722 * struct iwl_dts_measurement_notif_v2 - measurements notification
1723 *
1724 * @temp: the measured temperature
1725 * @voltage: the measured voltage
1726 * @threshold_idx: the trip index that was crossed
1727 */
1728struct iwl_dts_measurement_notif_v2 {
1729 __le32 temp;
1730 __le32 voltage;
1731 __le32 threshold_idx;
1732} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
1733
1734/**
1735 * struct ct_kill_notif - CT-kill entry notification
1736 *
1737 * @temperature: the current temperature in celsius
1738 * @reserved: reserved
1739 */
1740struct ct_kill_notif {
1741 __le16 temperature;
1742 __le16 reserved;
1743} __packed; /* GRP_PHY_CT_KILL_NTF */
1744
1745/**
1746* enum ctdp_cmd_operation - CTDP command operations
1747* @CTDP_CMD_OPERATION_START: update the current budget
1748* @CTDP_CMD_OPERATION_STOP: stop ctdp
1749* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget
1750*/
1751enum iwl_mvm_ctdp_cmd_operation {
1752 CTDP_CMD_OPERATION_START = 0x1,
1753 CTDP_CMD_OPERATION_STOP = 0x2,
1754 CTDP_CMD_OPERATION_REPORT = 0x4,
1755};/* CTDP_CMD_OPERATION_TYPE_E */
1756
1757/**
1758 * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
1759 *
1760 * @operation: see &enum iwl_mvm_ctdp_cmd_operation
1761 * @budget: the budget in milliwatt
1762 * @window_size: defined in API but not used
1763 */
1764struct iwl_mvm_ctdp_cmd {
1765 __le32 operation;
1766 __le32 budget;
1767 __le32 window_size;
1768} __packed;
1769
1770#define IWL_MAX_DTS_TRIPS 8
1771
1772/**
1773 * struct iwl_temp_report_ths_cmd - set temperature thresholds
1774 *
1775 * @num_temps: number of temperature thresholds passed
1776 * @thresholds: array with the thresholds to be configured
1777 */
1778struct temp_report_ths_cmd {
1779 __le32 num_temps;
1780 __le16 thresholds[IWL_MAX_DTS_TRIPS];
1781} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
1687 1782
1688/*********************************** 1783/***********************************
1689 * TDLS API 1784 * TDLS API
@@ -1858,6 +1953,31 @@ struct iwl_shared_mem_cfg {
1858 __le32 page_buff_size; 1953 __le32 page_buff_size;
1859} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ 1954} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
1860 1955
1956/**
1957 * VHT MU-MIMO group configuration
1958 *
1959 * @membership_status: a bitmap of MU groups
1960 * @user_position:the position of station in a group. If the station is in the
1961 * group then bits (group * 2) is the position -1
1962 */
1963struct iwl_mu_group_mgmt_cmd {
1964 __le32 reserved;
1965 __le32 membership_status[2];
1966 __le32 user_position[4];
1967} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
1968
1969/**
1970 * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
1971 *
1972 * @membership_status: a bitmap of MU groups
1973 * @user_position: the position of station in a group. If the station is in the
1974 * group then bits (group * 2) is the position -1
1975 */
1976struct iwl_mu_group_mgmt_notif {
1977 __le32 membership_status[2];
1978 __le32 user_position[4];
1979} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
1980
1861#define MAX_STORED_BEACON_SIZE 600 1981#define MAX_STORED_BEACON_SIZE 600
1862 1982
1863/** 1983/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 070e2af05ca2..594cd0dc7df9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -121,12 +121,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
121 121
122 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 122 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
123 cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; 123 cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
124 memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); 124 memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
125 125
126 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 126 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
127} 127}
128 128
129static void iwl_free_fw_paging(struct iwl_mvm *mvm) 129void iwl_free_fw_paging(struct iwl_mvm *mvm)
130{ 130{
131 int i; 131 int i;
132 132
@@ -146,6 +146,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
146 get_order(mvm->fw_paging_db[i].fw_paging_size)); 146 get_order(mvm->fw_paging_db[i].fw_paging_size));
147 } 147 }
148 kfree(mvm->trans->paging_download_buf); 148 kfree(mvm->trans->paging_download_buf);
149 mvm->trans->paging_download_buf = NULL;
150
149 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 151 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
150} 152}
151 153
@@ -537,7 +539,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
537 struct iwl_sf_region st_fwrd_space; 539 struct iwl_sf_region st_fwrd_space;
538 540
539 if (ucode_type == IWL_UCODE_REGULAR && 541 if (ucode_type == IWL_UCODE_REGULAR &&
540 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) 542 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
543 !(fw_has_capa(&mvm->fw->ucode_capa,
544 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
541 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); 545 fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
542 else 546 else
543 fw = iwl_get_ucode_image(mvm, ucode_type); 547 fw = iwl_get_ucode_image(mvm, ucode_type);
@@ -952,8 +956,26 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
952 goto error; 956 goto error;
953 } 957 }
954 958
959#ifdef CONFIG_THERMAL
960 if (iwl_mvm_is_tt_in_fw(mvm)) {
961 /* in order to give the responsibility of ct-kill and
962 * TX backoff to FW we need to send empty temperature reporting
963 * cmd during init time
964 */
965 iwl_mvm_send_temp_report_ths_cmd(mvm);
966 } else {
967 /* Initialize tx backoffs to the minimal possible */
968 iwl_mvm_tt_tx_backoff(mvm, 0);
969 }
970
971 /* TODO: read the budget from BIOS / Platform NVM */
972 if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0)
973 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
974 mvm->cooling_dev.cur_state);
975#else
955 /* Initialize tx backoffs to the minimal possible */ 976 /* Initialize tx backoffs to the minimal possible */
956 iwl_mvm_tt_tx_backoff(mvm, 0); 977 iwl_mvm_tt_tx_backoff(mvm, 0);
978#endif
957 979
958 WARN_ON(iwl_mvm_config_ltr(mvm)); 980 WARN_ON(iwl_mvm_config_ltr(mvm));
959 981
@@ -989,7 +1011,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
989 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 1011 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
990 return 0; 1012 return 0;
991 error: 1013 error:
992 iwl_trans_stop_device(mvm->trans); 1014 iwl_mvm_stop_device(mvm);
993 return ret; 1015 return ret;
994} 1016}
995 1017
@@ -1033,7 +1055,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1033 1055
1034 return 0; 1056 return 0;
1035 error: 1057 error:
1036 iwl_trans_stop_device(mvm->trans); 1058 iwl_mvm_stop_device(mvm);
1037 return ret; 1059 return ret;
1038} 1060}
1039 1061
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 535134d639e0..e885db3464b0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -1484,6 +1484,8 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
1484 /* update rx_status according to the notification's metadata */ 1484 /* update rx_status according to the notification's metadata */
1485 memset(&rx_status, 0, sizeof(rx_status)); 1485 memset(&rx_status, 0, sizeof(rx_status));
1486 rx_status.mactime = le64_to_cpu(sb->tsf); 1486 rx_status.mactime = le64_to_cpu(sb->tsf);
1487 /* TSF as indicated by the firmware is at INA time */
1488 rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
1487 rx_status.device_timestamp = le32_to_cpu(sb->system_time); 1489 rx_status.device_timestamp = le32_to_cpu(sb->system_time);
1488 rx_status.band = 1490 rx_status.band =
1489 (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? 1491 (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 53156810185d..ec6b07282e7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -69,7 +70,6 @@
69#include <linux/etherdevice.h> 70#include <linux/etherdevice.h>
70#include <linux/ip.h> 71#include <linux/ip.h>
71#include <linux/if_arp.h> 72#include <linux/if_arp.h>
72#include <linux/devcoredump.h>
73#include <linux/time.h> 73#include <linux/time.h>
74#include <net/mac80211.h> 74#include <net/mac80211.h>
75#include <net/ieee80211_radiotap.h> 75#include <net/ieee80211_radiotap.h>
@@ -85,7 +85,6 @@
85#include "testmode.h" 85#include "testmode.h"
86#include "iwl-fw-error-dump.h" 86#include "iwl-fw-error-dump.h"
87#include "iwl-prph.h" 87#include "iwl-prph.h"
88#include "iwl-csr.h"
89#include "iwl-nvm-parse.h" 88#include "iwl-nvm-parse.h"
90#include "fw-dbg.h" 89#include "fw-dbg.h"
91 90
@@ -611,6 +610,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
611 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 610 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
612 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 611 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
613 612
613 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_RRM);
614
614 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 615 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
615 616
616#ifdef CONFIG_PM_SLEEP 617#ifdef CONFIG_PM_SLEEP
@@ -847,6 +848,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
847 u16 tid = params->tid; 848 u16 tid = params->tid;
848 u16 *ssn = &params->ssn; 849 u16 *ssn = &params->ssn;
849 u8 buf_size = params->buf_size; 850 u8 buf_size = params->buf_size;
851 bool amsdu = params->amsdu;
850 852
851 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 853 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
852 sta->addr, tid, action); 854 sta->addr, tid, action);
@@ -907,7 +909,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
907 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 909 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
908 break; 910 break;
909 case IEEE80211_AMPDU_TX_OPERATIONAL: 911 case IEEE80211_AMPDU_TX_OPERATIONAL:
910 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); 912 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
913 buf_size, amsdu);
911 break; 914 break;
912 default: 915 default:
913 WARN_ON_ONCE(1); 916 WARN_ON_ONCE(1);
@@ -969,7 +972,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
969 */ 972 */
970 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); 973 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
971 974
972 iwl_trans_stop_device(mvm->trans); 975 iwl_mvm_stop_device(mvm);
973 976
974 mvm->scan_status = 0; 977 mvm->scan_status = 0;
975 mvm->ps_disabled = false; 978 mvm->ps_disabled = false;
@@ -1138,7 +1141,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1138 */ 1141 */
1139 flush_work(&mvm->roc_done_wk); 1142 flush_work(&mvm->roc_done_wk);
1140 1143
1141 iwl_trans_stop_device(mvm->trans); 1144 iwl_mvm_stop_device(mvm);
1142 1145
1143 iwl_mvm_async_handlers_purge(mvm); 1146 iwl_mvm_async_handlers_purge(mvm);
1144 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1147 /* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -1169,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1169 mvm->scan_uid_status[i] = 0; 1172 mvm->scan_uid_status[i] = 0;
1170 } 1173 }
1171 } 1174 }
1172
1173 mvm->ucode_loaded = false;
1174} 1175}
1175 1176
1176static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1177static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
@@ -1762,6 +1763,50 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1762} 1763}
1763#endif 1764#endif
1764 1765
1766static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1767 struct ieee80211_vif *vif)
1768{
1769 struct iwl_mu_group_mgmt_cmd cmd = {};
1770
1771 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1772 WLAN_MEMBERSHIP_LEN);
1773 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1774 WLAN_USER_POSITION_LEN);
1775
1776 return iwl_mvm_send_cmd_pdu(mvm,
1777 WIDE_ID(DATA_PATH_GROUP,
1778 UPDATE_MU_GROUPS_CMD),
1779 0, sizeof(cmd), &cmd);
1780}
1781
1782static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1783 struct ieee80211_vif *vif)
1784{
1785 if (vif->mu_mimo_owner) {
1786 struct iwl_mu_group_mgmt_notif *notif = _data;
1787
1788 /*
1789 * MU-MIMO Group Id action frame is little endian. We treat
1790 * the data received from firmware as if it came from the
1791 * action frame, so no conversion is needed.
1792 */
1793 ieee80211_update_mu_groups(vif,
1794 (u8 *)&notif->membership_status,
1795 (u8 *)&notif->user_position);
1796 }
1797}
1798
1799void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1800 struct iwl_rx_cmd_buffer *rxb)
1801{
1802 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1803 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1804
1805 ieee80211_iterate_active_interfaces_atomic(
1806 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1807 iwl_mvm_mu_mimo_iface_iterator, notif);
1808}
1809
1765static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 1810static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1766 struct ieee80211_vif *vif, 1811 struct ieee80211_vif *vif,
1767 struct ieee80211_bss_conf *bss_conf, 1812 struct ieee80211_bss_conf *bss_conf,
@@ -1870,6 +1915,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1870 vif->addr); 1915 vif->addr);
1871 } 1916 }
1872 1917
1918 /*
1919 * The firmware tracks the MU-MIMO group on its own.
1920 * However, on HW restart we should restore this data.
1921 */
1922 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1923 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1924 ret = iwl_mvm_update_mu_groups(mvm, vif);
1925 if (ret)
1926 IWL_ERR(mvm,
1927 "failed to update VHT MU_MIMO groups\n");
1928 }
1929
1873 iwl_mvm_recalc_multicast(mvm); 1930 iwl_mvm_recalc_multicast(mvm);
1874 iwl_mvm_configure_bcast_filter(mvm); 1931 iwl_mvm_configure_bcast_filter(mvm);
1875 1932
@@ -1896,7 +1953,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1896 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 1953 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1897 } 1954 }
1898 1955
1899 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { 1956 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
1957 /*
1958 * Send power command on every beacon change,
1959 * because we may have not enabled beacon abort yet.
1960 */
1961 BSS_CHANGED_BEACON_INFO)) {
1900 ret = iwl_mvm_power_update_mac(mvm); 1962 ret = iwl_mvm_power_update_mac(mvm);
1901 if (ret) 1963 if (ret)
1902 IWL_ERR(mvm, "failed to update power mode\n"); 1964 IWL_ERR(mvm, "failed to update power mode\n");
@@ -2083,7 +2145,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2083 bss_conf->txpower); 2145 bss_conf->txpower);
2084 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2146 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2085 } 2147 }
2086
2087} 2148}
2088 2149
2089static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2150static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2276,6 +2337,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2276 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 2337 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2277 return; 2338 return;
2278 2339
2340 if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) {
2341 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2342 return;
2343 }
2344
2279 if (iwlwifi_mod_params.uapsd_disable) { 2345 if (iwlwifi_mod_params.uapsd_disable) {
2280 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2346 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2281 return; 2347 return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ebe37bb0ce4c..ab410b4659f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * 34 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -71,6 +73,10 @@
71#include <linux/leds.h> 73#include <linux/leds.h>
72#include <linux/in6.h> 74#include <linux/in6.h>
73 75
76#ifdef CONFIG_THERMAL
77#include <linux/thermal.h>
78#endif
79
74#include "iwl-op-mode.h" 80#include "iwl-op-mode.h"
75#include "iwl-trans.h" 81#include "iwl-trans.h"
76#include "iwl-notif-wait.h" 82#include "iwl-notif-wait.h"
@@ -487,6 +493,12 @@ enum iwl_mvm_scan_type {
487 IWL_SCAN_TYPE_FRAGMENTED, 493 IWL_SCAN_TYPE_FRAGMENTED,
488}; 494};
489 495
496enum iwl_mvm_sched_scan_pass_all_states {
497 SCHED_SCAN_PASS_ALL_DISABLED,
498 SCHED_SCAN_PASS_ALL_ENABLED,
499 SCHED_SCAN_PASS_ALL_FOUND,
500};
501
490/** 502/**
491 * struct iwl_nvm_section - describes an NVM section in memory. 503 * struct iwl_nvm_section - describes an NVM section in memory.
492 * 504 *
@@ -517,6 +529,30 @@ struct iwl_mvm_tt_mgmt {
517 bool throttle; 529 bool throttle;
518}; 530};
519 531
532#ifdef CONFIG_THERMAL
533/**
534 *struct iwl_mvm_thermal_device - thermal zone related data
535 * @temp_trips: temperature thresholds for report
536 * @fw_trips_index: keep indexes to original array - temp_trips
537 * @tzone: thermal zone device data
538*/
539struct iwl_mvm_thermal_device {
540 s16 temp_trips[IWL_MAX_DTS_TRIPS];
541 u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
542 struct thermal_zone_device *tzone;
543};
544
545/*
546 * iwl_mvm_cooling_device
547 * @cur_state: current state in milliwatts
548 * @cdev: struct thermal cooling device
549 */
550struct iwl_mvm_cooling_device {
551 u32 cur_state;
552 struct thermal_cooling_device *cdev;
553};
554#endif
555
520#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 556#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
521 557
522struct iwl_mvm_frame_stats { 558struct iwl_mvm_frame_stats {
@@ -657,6 +693,7 @@ struct iwl_mvm {
657 void *scan_cmd; 693 void *scan_cmd;
658 struct iwl_mcast_filter_cmd *mcast_filter_cmd; 694 struct iwl_mcast_filter_cmd *mcast_filter_cmd;
659 enum iwl_mvm_scan_type scan_type; 695 enum iwl_mvm_scan_type scan_type;
696 enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
660 697
661 /* max number of simultaneous scans the FW supports */ 698 /* max number of simultaneous scans the FW supports */
662 unsigned int max_scans; 699 unsigned int max_scans;
@@ -797,6 +834,11 @@ struct iwl_mvm {
797 834
798 /* Thermal Throttling and CTkill */ 835 /* Thermal Throttling and CTkill */
799 struct iwl_mvm_tt_mgmt thermal_throttle; 836 struct iwl_mvm_tt_mgmt thermal_throttle;
837#ifdef CONFIG_THERMAL
838 struct iwl_mvm_thermal_device tz_device;
839 struct iwl_mvm_cooling_device cooling_dev;
840#endif
841
800 s32 temperature; /* Celsius */ 842 s32 temperature; /* Celsius */
801 /* 843 /*
802 * Debug option to set the NIC temperature. This option makes the 844 * Debug option to set the NIC temperature. This option makes the
@@ -819,6 +861,7 @@ struct iwl_mvm {
819 861
820 /* Indicate if device power save is allowed */ 862 /* Indicate if device power save is allowed */
821 u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ 863 u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
864 unsigned int max_amsdu_len; /* used for debugfs only */
822 865
823 struct ieee80211_vif __rcu *csa_vif; 866 struct ieee80211_vif __rcu *csa_vif;
824 struct ieee80211_vif __rcu *csa_tx_blocked_vif; 867 struct ieee80211_vif __rcu *csa_tx_blocked_vif;
@@ -943,8 +986,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
943 986
944static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) 987static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
945{ 988{
946 return fw_has_capa(&mvm->fw->ucode_capa, 989 /* Make sure DQA isn't allowed in driver until feature is complete */
947 IWL_UCODE_TLV_CAPA_DQA_SUPPORT); 990 return false && fw_has_capa(&mvm->fw->ucode_capa,
991 IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
948} 992}
949 993
950static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) 994static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
@@ -1028,6 +1072,28 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
1028 IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); 1072 IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
1029} 1073}
1030 1074
1075static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
1076{
1077#ifdef CONFIG_THERMAL
1078 /* these two TLV are redundant since the responsibility to CT-kill by
1079 * FW happens only after we send at least one command of
1080 * temperature THs report.
1081 */
1082 return fw_has_capa(&mvm->fw->ucode_capa,
1083 IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
1084 fw_has_capa(&mvm->fw->ucode_capa,
1085 IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
1086#else /* CONFIG_THERMAL */
1087 return false;
1088#endif /* CONFIG_THERMAL */
1089}
1090
1091static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
1092{
1093 return fw_has_capa(&mvm->fw->ucode_capa,
1094 IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
1095}
1096
1031extern const u8 iwl_mvm_ac_to_tx_fifo[]; 1097extern const u8 iwl_mvm_ac_to_tx_fifo[];
1032 1098
1033struct iwl_rate_info { 1099struct iwl_rate_info {
@@ -1160,6 +1226,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
1160 struct iwl_rx_cmd_buffer *rxb, int queue); 1226 struct iwl_rx_cmd_buffer *rxb, int queue);
1161void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, 1227void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
1162 struct iwl_rx_cmd_buffer *rxb, int queue); 1228 struct iwl_rx_cmd_buffer *rxb, int queue);
1229int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
1230 const u8 *data, u32 count);
1231void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
1232 int queue);
1163void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); 1233void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1164void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); 1234void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1165void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, 1235void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
@@ -1203,6 +1273,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
1203 struct iwl_rx_cmd_buffer *rxb); 1273 struct iwl_rx_cmd_buffer *rxb);
1204void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, 1274void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
1205 struct iwl_rx_cmd_buffer *rxb); 1275 struct iwl_rx_cmd_buffer *rxb);
1276void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1277 struct iwl_rx_cmd_buffer *rxb);
1278void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
1279 struct iwl_rx_cmd_buffer *rxb);
1206void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, 1280void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
1207 struct ieee80211_vif *vif); 1281 struct ieee80211_vif *vif);
1208unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, 1282unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1244,6 +1318,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1244void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, 1318void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1245 struct iwl_rx_cmd_buffer *rxb); 1319 struct iwl_rx_cmd_buffer *rxb);
1246 1320
1321/* Paging */
1322void iwl_free_fw_paging(struct iwl_mvm *mvm);
1323
1247/* MVM debugfs */ 1324/* MVM debugfs */
1248#ifdef CONFIG_IWLWIFI_DEBUGFS 1325#ifdef CONFIG_IWLWIFI_DEBUGFS
1249int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); 1326int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -1476,32 +1553,30 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
1476 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); 1553 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
1477} 1554}
1478 1555
1479static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, 1556static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1480 int mac80211_queue, int fifo,
1481 int sta_id, int tid, int frame_limit,
1482 u16 ssn, unsigned int wdg_timeout)
1483{ 1557{
1484 struct iwl_trans_txq_scd_cfg cfg = { 1558 mvm->ucode_loaded = false;
1485 .fifo = fifo, 1559 iwl_trans_stop_device(mvm->trans);
1486 .sta_id = sta_id,
1487 .tid = tid,
1488 .frame_limit = frame_limit,
1489 .aggregate = true,
1490 };
1491
1492 iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
1493} 1560}
1494 1561
1562/* Stop/start all mac queues in a given bitmap */
1563void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
1564void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
1565
1495/* Thermal management and CT-kill */ 1566/* Thermal management and CT-kill */
1496void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); 1567void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
1497void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); 1568void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
1498void iwl_mvm_temp_notif(struct iwl_mvm *mvm, 1569void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
1499 struct iwl_rx_cmd_buffer *rxb); 1570 struct iwl_rx_cmd_buffer *rxb);
1500void iwl_mvm_tt_handler(struct iwl_mvm *mvm); 1571void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
1501void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); 1572void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
1502void iwl_mvm_tt_exit(struct iwl_mvm *mvm); 1573void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
1503void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); 1574void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
1504int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); 1575int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
1576void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
1577int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
1578int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm);
1579int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
1505 1580
1506/* Location Aware Regulatory */ 1581/* Location Aware Regulatory */
1507struct iwl_mcc_update_resp * 1582struct iwl_mcc_update_resp *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 09a94a5efb61..699a80863e86 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -236,6 +236,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
236 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, 236 RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
237 iwl_mvm_rx_ant_coupling_notif, true), 237 iwl_mvm_rx_ant_coupling_notif, true),
238 238
239 RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
240 iwl_mvm_window_status_notif, false),
241
239 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), 242 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
240 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), 243 RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true),
241 244
@@ -263,6 +266,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
263 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), 266 RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
264 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, 267 RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
265 iwl_mvm_temp_notif, true), 268 iwl_mvm_temp_notif, true),
269 RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
270 iwl_mvm_ct_kill_notif, false),
266 271
267 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, 272 RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
268 true), 273 true),
@@ -270,6 +275,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
270 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), 275 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
271 RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, 276 RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
272 iwl_mvm_rx_stored_beacon_notif, false), 277 iwl_mvm_rx_stored_beacon_notif, false),
278 RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
279 iwl_mvm_mu_mimo_grp_notif, false),
273 280
274}; 281};
275#undef RX_HANDLER 282#undef RX_HANDLER
@@ -292,6 +299,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
292 HCMD_NAME(SCAN_COMPLETE_UMAC), 299 HCMD_NAME(SCAN_COMPLETE_UMAC),
293 HCMD_NAME(TOF_CMD), 300 HCMD_NAME(TOF_CMD),
294 HCMD_NAME(TOF_NOTIFICATION), 301 HCMD_NAME(TOF_NOTIFICATION),
302 HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
295 HCMD_NAME(ADD_STA_KEY), 303 HCMD_NAME(ADD_STA_KEY),
296 HCMD_NAME(ADD_STA), 304 HCMD_NAME(ADD_STA),
297 HCMD_NAME(REMOVE_STA), 305 HCMD_NAME(REMOVE_STA),
@@ -387,12 +395,25 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
387 */ 395 */
388static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { 396static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
389 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), 397 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
398 HCMD_NAME(CTDP_CONFIG_CMD),
399 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
400 HCMD_NAME(CT_KILL_NOTIFICATION),
390 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), 401 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
391}; 402};
392 403
393/* Please keep this array *SORTED* by hex value. 404/* Please keep this array *SORTED* by hex value.
394 * Access is done through binary search 405 * Access is done through binary search
395 */ 406 */
407static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
408 HCMD_NAME(UPDATE_MU_GROUPS_CMD),
409 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
410 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
411 HCMD_NAME(RX_QUEUES_NOTIFICATION),
412};
413
414/* Please keep this array *SORTED* by hex value.
415 * Access is done through binary search
416 */
396static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { 417static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
397 HCMD_NAME(STORED_BEACON_NTF), 418 HCMD_NAME(STORED_BEACON_NTF),
398}; 419};
@@ -401,6 +422,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
401 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 422 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
402 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 423 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
403 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), 424 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
425 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
404 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), 426 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
405}; 427};
406 428
@@ -474,8 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
474 496
475 if (iwl_mvm_has_new_rx_api(mvm)) { 497 if (iwl_mvm_has_new_rx_api(mvm)) {
476 op_mode->ops = &iwl_mvm_ops_mq; 498 op_mode->ops = &iwl_mvm_ops_mq;
499 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
477 } else { 500 } else {
478 op_mode->ops = &iwl_mvm_ops; 501 op_mode->ops = &iwl_mvm_ops;
502 trans->rx_mpdu_cmd_hdr_size =
503 sizeof(struct iwl_rx_mpdu_res_start);
479 504
480 if (WARN_ON(trans->num_rx_queues > 1)) 505 if (WARN_ON(trans->num_rx_queues > 1))
481 goto out_free; 506 goto out_free;
@@ -567,7 +592,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
567 iwl_trans_configure(mvm->trans, &trans_cfg); 592 iwl_trans_configure(mvm->trans, &trans_cfg);
568 593
569 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; 594 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
570 trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
571 trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; 595 trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
572 trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; 596 trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
573 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, 597 memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
@@ -588,7 +612,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
588 mvm->cfg->name, mvm->trans->hw_rev); 612 mvm->cfg->name, mvm->trans->hw_rev);
589 613
590 min_backoff = calc_min_backoff(trans, cfg); 614 min_backoff = calc_min_backoff(trans, cfg);
591 iwl_mvm_tt_initialize(mvm, min_backoff); 615 iwl_mvm_thermal_initialize(mvm, min_backoff);
592 616
593 if (iwlwifi_mod_params.nvm_file) 617 if (iwlwifi_mod_params.nvm_file)
594 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; 618 mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
@@ -619,7 +643,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
619 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); 643 iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
620 err = iwl_run_init_mvm_ucode(mvm, true); 644 err = iwl_run_init_mvm_ucode(mvm, true);
621 if (!err || !iwlmvm_mod_params.init_dbg) 645 if (!err || !iwlmvm_mod_params.init_dbg)
622 iwl_trans_stop_device(trans); 646 iwl_mvm_stop_device(mvm);
623 iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); 647 iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
624 mutex_unlock(&mvm->mutex); 648 mutex_unlock(&mvm->mutex);
625 /* returns 0 if successful, 1 if success but in rfkill */ 649 /* returns 0 if successful, 1 if success but in rfkill */
@@ -648,19 +672,22 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
648 672
649 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); 673 memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
650 674
651 /* rpm starts with a taken reference, we can release it now */ 675 /* The transport always starts with a taken reference, we can
652 iwl_trans_unref(mvm->trans); 676 * release it now if d0i3 is supported */
677 if (iwl_mvm_is_d0i3_supported(mvm))
678 iwl_trans_unref(mvm->trans);
653 679
654 iwl_mvm_tof_init(mvm); 680 iwl_mvm_tof_init(mvm);
655 681
656 /* init RSS hash key */ 682 /* init RSS hash key */
657 get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key)); 683 get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
658 684
659 return op_mode; 685 return op_mode;
660 686
661 out_unregister: 687 out_unregister:
662 ieee80211_unregister_hw(mvm->hw); 688 ieee80211_unregister_hw(mvm->hw);
663 iwl_mvm_leds_exit(mvm); 689 iwl_mvm_leds_exit(mvm);
690 iwl_mvm_thermal_exit(mvm);
664 out_free: 691 out_free:
665 flush_delayed_work(&mvm->fw_dump_wk); 692 flush_delayed_work(&mvm->fw_dump_wk);
666 iwl_phy_db_free(mvm->phy_db); 693 iwl_phy_db_free(mvm->phy_db);
@@ -676,9 +703,16 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
676 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 703 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
677 int i; 704 int i;
678 705
706 /* If d0i3 is supported, we have released the reference that
707 * the transport started with, so we should take it back now
708 * that we are leaving.
709 */
710 if (iwl_mvm_is_d0i3_supported(mvm))
711 iwl_trans_ref(mvm->trans);
712
679 iwl_mvm_leds_exit(mvm); 713 iwl_mvm_leds_exit(mvm);
680 714
681 iwl_mvm_tt_exit(mvm); 715 iwl_mvm_thermal_exit(mvm);
682 716
683 ieee80211_unregister_hw(mvm->hw); 717 ieee80211_unregister_hw(mvm->hw);
684 718
@@ -699,6 +733,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
699 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 733 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
700 kfree(mvm->nvm_sections[i].data); 734 kfree(mvm->nvm_sections[i].data);
701 735
736 iwl_free_fw_paging(mvm);
737
702 iwl_mvm_tof_clean(mvm); 738 iwl_mvm_tof_clean(mvm);
703 739
704 ieee80211_free_hw(mvm->hw); 740 ieee80211_free_hw(mvm->hw);
@@ -856,28 +892,24 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
856 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); 892 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
857 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) 893 else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
858 iwl_mvm_rx_phy_cmd_mq(mvm, rxb); 894 iwl_mvm_rx_phy_cmd_mq(mvm, rxb);
895 else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP &&
896 pkt->hdr.cmd == RX_QUEUES_NOTIFICATION))
897 iwl_mvm_rx_queue_notif(mvm, rxb, 0);
859 else 898 else
860 iwl_mvm_rx_common(mvm, rxb, pkt); 899 iwl_mvm_rx_common(mvm, rxb, pkt);
861} 900}
862 901
863static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) 902void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
864{ 903{
865 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
866 unsigned long mq;
867 int q; 904 int q;
868 905
869 spin_lock_bh(&mvm->queue_info_lock);
870 mq = mvm->queue_info[queue].hw_queue_to_mac80211;
871 spin_unlock_bh(&mvm->queue_info_lock);
872
873 if (WARN_ON_ONCE(!mq)) 906 if (WARN_ON_ONCE(!mq))
874 return; 907 return;
875 908
876 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { 909 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
877 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { 910 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
878 IWL_DEBUG_TX_QUEUES(mvm, 911 IWL_DEBUG_TX_QUEUES(mvm,
879 "queue %d (mac80211 %d) already stopped\n", 912 "mac80211 %d already stopped\n", q);
880 queue, q);
881 continue; 913 continue;
882 } 914 }
883 915
@@ -897,24 +929,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
897 iwl_trans_block_txq_ptrs(mvm->trans, false); 929 iwl_trans_block_txq_ptrs(mvm->trans, false);
898} 930}
899 931
900static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) 932static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
901{ 933{
902 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 934 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
903 unsigned long mq; 935 unsigned long mq;
904 int q;
905 936
906 spin_lock_bh(&mvm->queue_info_lock); 937 spin_lock_bh(&mvm->queue_info_lock);
907 mq = mvm->queue_info[queue].hw_queue_to_mac80211; 938 mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
908 spin_unlock_bh(&mvm->queue_info_lock); 939 spin_unlock_bh(&mvm->queue_info_lock);
909 940
941 iwl_mvm_stop_mac_queues(mvm, mq);
942}
943
944void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
945{
946 int q;
947
910 if (WARN_ON_ONCE(!mq)) 948 if (WARN_ON_ONCE(!mq))
911 return; 949 return;
912 950
913 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { 951 for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
914 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { 952 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
915 IWL_DEBUG_TX_QUEUES(mvm, 953 IWL_DEBUG_TX_QUEUES(mvm,
916 "queue %d (mac80211 %d) still stopped\n", 954 "mac80211 %d still stopped\n", q);
917 queue, q);
918 continue; 955 continue;
919 } 956 }
920 957
@@ -922,6 +959,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
922 } 959 }
923} 960}
924 961
962static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
963{
964 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
965 unsigned long mq;
966
967 spin_lock_bh(&mvm->queue_info_lock);
968 mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211;
969 spin_unlock_bh(&mvm->queue_info_lock);
970
971 iwl_mvm_start_mac_queues(mvm, mq);
972}
973
925void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) 974void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
926{ 975{
927 if (state) 976 if (state)
@@ -1528,6 +1577,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
1528 1577
1529 if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) 1578 if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
1530 iwl_mvm_rx_frame_release(mvm, rxb, queue); 1579 iwl_mvm_rx_frame_release(mvm, rxb, queue);
1580 else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
1581 pkt->hdr.group_id == DATA_PATH_GROUP))
1582 iwl_mvm_rx_queue_notif(mvm, rxb, queue);
1531 else 1583 else
1532 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); 1584 iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
1533} 1585}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 6e7e78a37879..61d0a8cd13f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate)
556 if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX)) 556 if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
557 rate_str = legacy_rates[rate->index]; 557 rate_str = legacy_rates[rate->index];
558 else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) && 558 else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
559 (rate->index >= IWL_RATE_MCS_0_INDEX) &&
559 (rate->index <= IWL_RATE_MCS_9_INDEX)) 560 (rate->index <= IWL_RATE_MCS_9_INDEX))
560 rate_str = ht_vht_rates[rate->index]; 561 rate_str = ht_vht_rates[rate->index];
561 else 562 else
@@ -1672,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1672 } 1673 }
1673} 1674}
1674 1675
1676static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
1677 struct iwl_scale_tbl_info *tbl,
1678 enum rs_action scale_action)
1679{
1680 struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta);
1681
1682 if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
1683 tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
1684 scale_action == RS_ACTION_DOWNSCALE)
1685 sta_priv->tlc_amsdu = false;
1686 else
1687 sta_priv->tlc_amsdu = true;
1688}
1689
1675/* 1690/*
1676 * setup rate table in uCode 1691 * setup rate table in uCode
1677 */ 1692 */
@@ -2415,6 +2430,7 @@ lq_update:
2415 tbl->rate.index = index; 2430 tbl->rate.index = index;
2416 if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) 2431 if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
2417 rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); 2432 rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
2433 rs_set_amsdu_len(mvm, sta, tbl, scale_action);
2418 rs_update_rate_tbl(mvm, sta, lq_sta, tbl); 2434 rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
2419 } 2435 }
2420 2436
@@ -3098,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
3098 sband = hw->wiphy->bands[band]; 3114 sband = hw->wiphy->bands[band];
3099 3115
3100 lq_sta->lq.sta_id = sta_priv->sta_id; 3116 lq_sta->lq.sta_id = sta_priv->sta_id;
3117 sta_priv->tlc_amsdu = false;
3101 3118
3102 for (j = 0; j < LQ_SIZE; j++) 3119 for (j = 0; j < LQ_SIZE; j++)
3103 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); 3120 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
@@ -3657,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3657 ssize_t ret; 3674 ssize_t ret;
3658 3675
3659 struct iwl_lq_sta *lq_sta = file->private_data; 3676 struct iwl_lq_sta *lq_sta = file->private_data;
3677 struct iwl_mvm_sta *mvmsta =
3678 container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
3660 struct iwl_mvm *mvm; 3679 struct iwl_mvm *mvm;
3661 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 3680 struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
3662 struct rs_rate *rate = &tbl->rate; 3681 struct rs_rate *rate = &tbl->rate;
3663 u32 ss_params; 3682 u32 ss_params;
3683
3664 mvm = lq_sta->pers.drv; 3684 mvm = lq_sta->pers.drv;
3665 buff = kmalloc(2048, GFP_KERNEL); 3685 buff = kmalloc(2048, GFP_KERNEL);
3666 if (!buff) 3686 if (!buff)
@@ -3686,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3686 (is_ht20(rate)) ? "20MHz" : 3706 (is_ht20(rate)) ? "20MHz" :
3687 (is_ht40(rate)) ? "40MHz" : 3707 (is_ht40(rate)) ? "40MHz" :
3688 (is_ht80(rate)) ? "80Mhz" : "BAD BW"); 3708 (is_ht80(rate)) ? "80Mhz" : "BAD BW");
3689 desc += sprintf(buff + desc, " %s %s %s\n", 3709 desc += sprintf(buff + desc, " %s %s %s %s\n",
3690 (rate->sgi) ? "SGI" : "NGI", 3710 (rate->sgi) ? "SGI" : "NGI",
3691 (rate->ldpc) ? "LDPC" : "BCC", 3711 (rate->ldpc) ? "LDPC" : "BCC",
3692 (lq_sta->is_agg) ? "AGG on" : ""); 3712 (lq_sta->is_agg) ? "AGG on" : "",
3713 (mvmsta->tlc_amsdu) ? "AMSDU on" : "");
3693 } 3714 }
3694 desc += sprintf(buff+desc, "last tx rate=0x%X\n", 3715 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
3695 lq_sta->last_rate_n_flags); 3716 lq_sta->last_rate_n_flags);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 145ec68ce6f9..485cfc1a4daa 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -322,11 +323,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
322 rx_status->freq = 323 rx_status->freq =
323 ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), 324 ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
324 rx_status->band); 325 rx_status->band);
325 /* 326
326 * TSF as indicated by the fw is at INA time, but mac80211 expects the 327 /* TSF as indicated by the firmware is at INA time */
327 * TSF at the beginning of the MPDU. 328 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
328 */
329 /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/
330 329
331 iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); 330 iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
332 331
@@ -448,6 +447,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
448 iwl_mvm_update_frame_stats(mvm, rate_n_flags, 447 iwl_mvm_update_frame_stats(mvm, rate_n_flags,
449 rx_status->flag & RX_FLAG_AMPDU_DETAILS); 448 rx_status->flag & RX_FLAG_AMPDU_DETAILS);
450#endif 449#endif
450
451 if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
452 ieee80211_is_probe_resp(hdr->frame_control)) &&
453 mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
454 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
455
451 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, 456 iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
452 crypt_len, rxb); 457 crypt_len, rxb);
453} 458}
@@ -622,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
622{ 627{
623 iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); 628 iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
624} 629}
630
631void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
632 struct iwl_rx_cmd_buffer *rxb)
633{
634 struct iwl_rx_packet *pkt = rxb_addr(rxb);
635 struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
636 int i;
637 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
638
639 if (WARN_ONCE(pkt_len != sizeof(*notif),
640 "Received window status notification of wrong size (%u)\n",
641 pkt_len))
642 return;
643
644 rcu_read_lock();
645 for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
646 struct ieee80211_sta *sta;
647 u8 sta_id, tid;
648 u64 bitmap;
649 u32 ssn;
650 u16 ratid;
651 u16 received_mpdu;
652
653 ratid = le16_to_cpu(notif->ra_tid[i]);
654 /* check that this TID is valid */
655 if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
656 continue;
657
658 received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
659 if (received_mpdu == 0)
660 continue;
661
662 tid = ratid & BA_WINDOW_STATUS_TID_MSK;
663 /* get the station */
664 sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
665 >> BA_WINDOW_STATUS_STA_ID_POS;
666 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
667 if (IS_ERR_OR_NULL(sta))
668 continue;
669 bitmap = le64_to_cpu(notif->bitmap[i]);
670 ssn = le32_to_cpu(notif->start_seq_num[i]);
671
672 /* update mac80211 with the bitmap for the reordering buffer */
673 ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
674 received_mpdu);
675 }
676 rcu_read_unlock();
677}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 615dea143d4e..cd6ca374e5d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -7,7 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
29 * 29 *
30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 30 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 31 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
32 * Copyright(c) 2015 Intel Deutschland GmbH 32 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
33 * All rights reserved. 33 * All rights reserved.
34 * 34 *
35 * Redistribution and use in source and binary forms, with or without 35 * Redistribution and use in source and binary forms, with or without
@@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
156 u16 len, u8 crypt_len, 156 u16 len, u8 crypt_len,
157 struct iwl_rx_cmd_buffer *rxb) 157 struct iwl_rx_cmd_buffer *rxb)
158{ 158{
159 unsigned int hdrlen, fraglen; 159 struct iwl_rx_packet *pkt = rxb_addr(rxb);
160 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
161 unsigned int headlen, fraglen, pad_len = 0;
162 unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
163
164 if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
165 pad_len = 2;
166 len -= pad_len;
160 167
161 /* If frame is small enough to fit in skb->head, pull it completely. 168 /* If frame is small enough to fit in skb->head, pull it completely.
162 * If not, only pull ieee80211_hdr (including crypto if present, and 169 * If not, only pull ieee80211_hdr (including crypto if present, and
@@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
170 * If the latter changes (there are efforts in the standards group 177 * If the latter changes (there are efforts in the standards group
171 * to do so) we should revisit this and ieee80211_data_to_8023(). 178 * to do so) we should revisit this and ieee80211_data_to_8023().
172 */ 179 */
173 hdrlen = (len <= skb_tailroom(skb)) ? len : 180 headlen = (len <= skb_tailroom(skb)) ? len :
174 sizeof(*hdr) + crypt_len + 8; 181 hdrlen + crypt_len + 8;
175 182
183 /* The firmware may align the packet to DWORD.
184 * The padding is inserted after the IV.
185 * After copying the header + IV skip the padding if
186 * present before copying packet data.
187 */
188 hdrlen += crypt_len;
176 memcpy(skb_put(skb, hdrlen), hdr, hdrlen); 189 memcpy(skb_put(skb, hdrlen), hdr, hdrlen);
177 fraglen = len - hdrlen; 190 memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len,
191 headlen - hdrlen);
192
193 fraglen = len - headlen;
178 194
179 if (fraglen) { 195 if (fraglen) {
180 int offset = (void *)hdr + hdrlen - 196 int offset = (void *)hdr + headlen + pad_len -
181 rxb_addr(rxb) + rxb_offset(rxb); 197 rxb_addr(rxb) + rxb_offset(rxb);
182 198
183 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, 199 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
@@ -285,6 +301,114 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
285 skb->ip_summed = CHECKSUM_UNNECESSARY; 301 skb->ip_summed = CHECKSUM_UNNECESSARY;
286} 302}
287 303
304/*
305 * returns true if a packet outside BA session is a duplicate and
306 * should be dropped
307 */
308static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
309 struct ieee80211_rx_status *rx_status,
310 struct ieee80211_hdr *hdr,
311 struct iwl_rx_mpdu_desc *desc)
312{
313 struct iwl_mvm_sta *mvm_sta;
314 struct iwl_mvm_rxq_dup_data *dup_data;
315 u8 baid, tid, sub_frame_idx;
316
317 if (WARN_ON(IS_ERR_OR_NULL(sta)))
318 return false;
319
320 baid = (le32_to_cpu(desc->reorder_data) &
321 IWL_RX_MPDU_REORDER_BAID_MASK) >>
322 IWL_RX_MPDU_REORDER_BAID_SHIFT;
323
324 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
325 return false;
326
327 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
328 dup_data = &mvm_sta->dup_data[queue];
329
330 /*
331 * Drop duplicate 802.11 retransmissions
332 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
333 */
334 if (ieee80211_is_ctl(hdr->frame_control) ||
335 ieee80211_is_qos_nullfunc(hdr->frame_control) ||
336 is_multicast_ether_addr(hdr->addr1)) {
337 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
338 return false;
339 }
340
341 if (ieee80211_is_data_qos(hdr->frame_control))
342 /* frame has qos control */
343 tid = *ieee80211_get_qos_ctl(hdr) &
344 IEEE80211_QOS_CTL_TID_MASK;
345 else
346 tid = IWL_MAX_TID_COUNT;
347
348 /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
349 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
350
351 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
352 dup_data->last_seq[tid] == hdr->seq_ctrl &&
353 dup_data->last_sub_frame[tid] >= sub_frame_idx))
354 return true;
355
356 dup_data->last_seq[tid] = hdr->seq_ctrl;
357 dup_data->last_sub_frame[tid] = sub_frame_idx;
358
359 rx_status->flag |= RX_FLAG_DUP_VALIDATED;
360
361 return false;
362}
363
364int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
365 const u8 *data, u32 count)
366{
367 struct iwl_rxq_sync_cmd *cmd;
368 u32 data_size = sizeof(*cmd) + count;
369 int ret;
370
371 /* should be DWORD aligned */
372 if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
373 return -EINVAL;
374
375 cmd = kzalloc(data_size, GFP_KERNEL);
376 if (!cmd)
377 return -ENOMEM;
378
379 cmd->rxq_mask = cpu_to_le32(rxq_mask);
380 cmd->count = cpu_to_le32(count);
381 cmd->flags = 0;
382 memcpy(cmd->payload, data, count);
383
384 ret = iwl_mvm_send_cmd_pdu(mvm,
385 WIDE_ID(DATA_PATH_GROUP,
386 TRIGGER_RX_QUEUES_NOTIF_CMD),
387 0, data_size, cmd);
388
389 kfree(cmd);
390 return ret;
391}
392
393void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
394 int queue)
395{
396 struct iwl_rx_packet *pkt = rxb_addr(rxb);
397 struct iwl_rxq_sync_notification *notif;
398 struct iwl_mvm_internal_rxq_notif *internal_notif;
399
400 notif = (void *)pkt->data;
401 internal_notif = (void *)notif->payload;
402
403 switch (internal_notif->type) {
404 case IWL_MVM_RXQ_NOTIF_DEL_BA:
405 /* TODO */
406 break;
407 default:
408 WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
409 }
410}
411
288void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, 412void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
289 struct iwl_rx_cmd_buffer *rxb, int queue) 413 struct iwl_rx_cmd_buffer *rxb, int queue)
290{ 414{
@@ -332,6 +456,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
332 rx_status->freq = ieee80211_channel_to_frequency(desc->channel, 456 rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
333 rx_status->band); 457 rx_status->band);
334 iwl_mvm_get_signal_strength(mvm, desc, rx_status); 458 iwl_mvm_get_signal_strength(mvm, desc, rx_status);
459 /* TSF as indicated by the firmware is at INA time */
460 rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
335 461
336 rcu_read_lock(); 462 rcu_read_lock();
337 463
@@ -387,6 +513,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
387 513
388 if (ieee80211_is_data(hdr->frame_control)) 514 if (ieee80211_is_data(hdr->frame_control))
389 iwl_mvm_rx_csum(sta, skb, desc); 515 iwl_mvm_rx_csum(sta, skb, desc);
516
517 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) {
518 kfree_skb(skb);
519 rcu_read_unlock();
520 return;
521 }
390 } 522 }
391 523
392 /* 524 /*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index aa6d8074f63a..09eb72c4ae43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * 34 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
297 iwl_mvm_dump_channel_list(notif->results, 299 iwl_mvm_dump_channel_list(notif->results,
298 notif->scanned_channels, buf, 300 notif->scanned_channels, buf,
299 sizeof(buf))); 301 sizeof(buf)));
302
303 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
304 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
305 ieee80211_sched_scan_results(mvm->hw);
306 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
307 }
300} 308}
301 309
302void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, 310void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
@@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
380 388
381 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; 389 mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
382 ieee80211_sched_scan_stopped(mvm->hw); 390 ieee80211_sched_scan_stopped(mvm->hw);
391 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
383 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { 392 } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
384 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", 393 IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
385 aborted ? "aborted" : "completed", 394 aborted ? "aborted" : "completed",
@@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
533 IWL_DEBUG_SCAN(mvm, 542 IWL_DEBUG_SCAN(mvm,
534 "Sending scheduled scan with filtering, n_match_sets %d\n", 543 "Sending scheduled scan with filtering, n_match_sets %d\n",
535 req->n_match_sets); 544 req->n_match_sets);
545 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
536 return false; 546 return false;
537 } 547 }
538 548
539 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); 549 IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
550
551 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
540 return true; 552 return true;
541} 553}
542 554
@@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
788 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; 800 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
789#endif 801#endif
790 802
803 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
804 flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
805
791 if (iwl_mvm_is_regular_scan(params) && 806 if (iwl_mvm_is_regular_scan(params) &&
792 vif->type != NL80211_IFTYPE_P2P_DEVICE && 807 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
793 params->type != IWL_SCAN_TYPE_FRAGMENTED) 808 params->type != IWL_SCAN_TYPE_FRAGMENTED)
@@ -1074,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1074 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; 1089 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1075#endif 1090#endif
1076 1091
1092 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
1093 flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1094
1077 if (iwl_mvm_is_regular_scan(params) && 1095 if (iwl_mvm_is_regular_scan(params) &&
1078 vif->type != NL80211_IFTYPE_P2P_DEVICE && 1096 vif->type != NL80211_IFTYPE_P2P_DEVICE &&
1079 params->type != IWL_SCAN_TYPE_FRAGMENTED) 1097 params->type != IWL_SCAN_TYPE_FRAGMENTED)
@@ -1301,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1301 return -EBUSY; 1319 return -EBUSY;
1302 } 1320 }
1303 1321
1304 /* we don't support "match all" in the firmware */
1305 if (!req->n_match_sets)
1306 return -EOPNOTSUPP;
1307
1308 ret = iwl_mvm_check_running_scans(mvm, type); 1322 ret = iwl_mvm_check_running_scans(mvm, type);
1309 if (ret) 1323 if (ret)
1310 return ret; 1324 return ret;
@@ -1400,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1400 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); 1414 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1401 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { 1415 } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
1402 ieee80211_sched_scan_stopped(mvm->hw); 1416 ieee80211_sched_scan_stopped(mvm->hw);
1417 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1403 } 1418 }
1404 1419
1405 mvm->scan_status &= ~mvm->scan_uid_status[uid]; 1420 mvm->scan_status &= ~mvm->scan_uid_status[uid];
@@ -1434,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1434 iwl_mvm_dump_channel_list(notif->results, 1449 iwl_mvm_dump_channel_list(notif->results,
1435 notif->scanned_channels, buf, 1450 notif->scanned_channels, buf,
1436 sizeof(buf))); 1451 sizeof(buf)));
1452
1453 if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
1454 IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
1455 ieee80211_sched_scan_results(mvm->hw);
1456 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
1457 }
1437} 1458}
1438 1459
1439static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) 1460static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
@@ -1528,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1528 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); 1549 uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
1529 if (uid >= 0 && !mvm->restart_fw) { 1550 if (uid >= 0 && !mvm->restart_fw) {
1530 ieee80211_sched_scan_stopped(mvm->hw); 1551 ieee80211_sched_scan_stopped(mvm->hw);
1552 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1531 mvm->scan_uid_status[uid] = 0; 1553 mvm->scan_uid_status[uid] = 0;
1532 } 1554 }
1533 1555
@@ -1549,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1549 * restart_hw, so do not report if FW is about to be 1571 * restart_hw, so do not report if FW is about to be
1550 * restarted. 1572 * restarted.
1551 */ 1573 */
1552 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw) 1574 if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
1575 !mvm->restart_fw) {
1553 ieee80211_sched_scan_stopped(mvm->hw); 1576 ieee80211_sched_scan_stopped(mvm->hw);
1577 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1578 }
1554 } 1579 }
1555} 1580}
1556 1581
@@ -1586,6 +1611,7 @@ out:
1586 ieee80211_scan_completed(mvm->hw, true); 1611 ieee80211_scan_completed(mvm->hw, true);
1587 } else if (notify) { 1612 } else if (notify) {
1588 ieee80211_sched_scan_stopped(mvm->hw); 1613 ieee80211_sched_scan_stopped(mvm->hw);
1614 mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1589 } 1615 }
1590 1616
1591 return ret; 1617 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 4854e79cbda8..ef99942d7169 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -280,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
280{ 280{
281 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 281 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
282 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 282 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
283 struct iwl_mvm_rxq_dup_data *dup_data;
283 int i, ret, sta_id; 284 int i, ret, sta_id;
284 285
285 lockdep_assert_held(&mvm->mutex); 286 lockdep_assert_held(&mvm->mutex);
@@ -327,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
327 } 328 }
328 mvm_sta->agg_tids = 0; 329 mvm_sta->agg_tids = 0;
329 330
331 if (iwl_mvm_has_new_rx_api(mvm) &&
332 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
333 dup_data = kcalloc(mvm->trans->num_rx_queues,
334 sizeof(*dup_data),
335 GFP_KERNEL);
336 if (!dup_data)
337 return -ENOMEM;
338 mvm_sta->dup_data = dup_data;
339 }
340
330 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); 341 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
331 if (ret) 342 if (ret)
332 goto err; 343 goto err;
@@ -508,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
508 519
509 lockdep_assert_held(&mvm->mutex); 520 lockdep_assert_held(&mvm->mutex);
510 521
522 if (iwl_mvm_has_new_rx_api(mvm))
523 kfree(mvm_sta->dup_data);
524
511 if (vif->type == NL80211_IFTYPE_STATION && 525 if (vif->type == NL80211_IFTYPE_STATION &&
512 mvmvif->ap_sta_id == mvm_sta->sta_id) { 526 mvmvif->ap_sta_id == mvm_sta->sta_id) {
513 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 527 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
@@ -1031,15 +1045,23 @@ release_locks:
1031} 1045}
1032 1046
1033int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1047int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1034 struct ieee80211_sta *sta, u16 tid, u8 buf_size) 1048 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
1049 bool amsdu)
1035{ 1050{
1036 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1051 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1037 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 1052 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1038 unsigned int wdg_timeout = 1053 unsigned int wdg_timeout =
1039 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 1054 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
1040 int queue, fifo, ret; 1055 int queue, ret;
1041 u16 ssn; 1056 u16 ssn;
1042 1057
1058 struct iwl_trans_txq_scd_cfg cfg = {
1059 .sta_id = mvmsta->sta_id,
1060 .tid = tid,
1061 .frame_limit = buf_size,
1062 .aggregate = true,
1063 };
1064
1043 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 1065 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
1044 != IWL_MAX_TID_COUNT); 1066 != IWL_MAX_TID_COUNT);
1045 1067
@@ -1051,13 +1073,13 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1051 tid_data->state = IWL_AGG_ON; 1073 tid_data->state = IWL_AGG_ON;
1052 mvmsta->agg_tids |= BIT(tid); 1074 mvmsta->agg_tids |= BIT(tid);
1053 tid_data->ssn = 0xffff; 1075 tid_data->ssn = 0xffff;
1076 tid_data->amsdu_in_ampdu_allowed = amsdu;
1054 spin_unlock_bh(&mvmsta->lock); 1077 spin_unlock_bh(&mvmsta->lock);
1055 1078
1056 fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 1079 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
1057 1080
1058 iwl_mvm_enable_agg_txq(mvm, queue, 1081 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
1059 vif->hw_queue[tid_to_mac80211_ac[tid]], fifo, 1082 ssn, &cfg, wdg_timeout);
1060 mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout);
1061 1083
1062 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1084 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1063 if (ret) 1085 if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index e3b9446ee995..1a8f69a41405 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -258,8 +258,7 @@ enum iwl_mvm_agg_state {
258 * This is basically (last acked packet++). 258 * This is basically (last acked packet++).
259 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the 259 * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
260 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). 260 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
261 * @reduced_tpc: Reduced tx power. Holds the data between the 261 * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
262 * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
263 * @state: state of the BA agreement establishment / tear down. 262 * @state: state of the BA agreement establishment / tear down.
264 * @txq_id: Tx queue used by the BA session 263 * @txq_id: Tx queue used by the BA session
265 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 264 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
@@ -273,7 +272,7 @@ struct iwl_mvm_tid_data {
273 u16 next_reclaimed; 272 u16 next_reclaimed;
274 /* The rest is Tx AGG related */ 273 /* The rest is Tx AGG related */
275 u32 rate_n_flags; 274 u32 rate_n_flags;
276 u8 reduced_tpc; 275 bool amsdu_in_ampdu_allowed;
277 enum iwl_mvm_agg_state state; 276 enum iwl_mvm_agg_state state;
278 u16 txq_id; 277 u16 txq_id;
279 u16 ssn; 278 u16 ssn;
@@ -294,6 +293,16 @@ struct iwl_mvm_key_pn {
294}; 293};
295 294
296/** 295/**
296 * struct iwl_mvm_rxq_dup_data - per station per rx queue data
297 * @last_seq: last sequence per tid for duplicate packet detection
298 * @last_sub_frame: last subframe packet
299 */
300struct iwl_mvm_rxq_dup_data {
301 __le16 last_seq[IWL_MAX_TID_COUNT + 1];
302 u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
303} ____cacheline_aligned_in_smp;
304
305/**
297 * struct iwl_mvm_sta - representation of a station in the driver 306 * struct iwl_mvm_sta - representation of a station in the driver
298 * @sta_id: the index of the station in the fw (will be replaced by id_n_color) 307 * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
299 * @tfd_queue_msk: the tfd queues used by the station 308 * @tfd_queue_msk: the tfd queues used by the station
@@ -311,6 +320,7 @@ struct iwl_mvm_key_pn {
311 * @tx_protection: reference counter for controlling the Tx protection. 320 * @tx_protection: reference counter for controlling the Tx protection.
312 * @tt_tx_protection: is thermal throttling enable Tx protection? 321 * @tt_tx_protection: is thermal throttling enable Tx protection?
313 * @disable_tx: is tx to this STA disabled? 322 * @disable_tx: is tx to this STA disabled?
323 * @tlc_amsdu: true if A-MSDU is allowed
314 * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) 324 * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
315 * @sleep_tx_count: the number of frames that we told the firmware to let out 325 * @sleep_tx_count: the number of frames that we told the firmware to let out
316 * even when that station is asleep. This is useful in case the queue 326 * even when that station is asleep. This is useful in case the queue
@@ -318,6 +328,7 @@ struct iwl_mvm_key_pn {
318 * we are sending frames from an AMPDU queue and there was a hole in 328 * we are sending frames from an AMPDU queue and there was a hole in
319 * the BA window. To be used for UAPSD only. 329 * the BA window. To be used for UAPSD only.
320 * @ptk_pn: per-queue PTK PN data structures 330 * @ptk_pn: per-queue PTK PN data structures
331 * @dup_data: per queue duplicate packet detection data
321 * 332 *
322 * When mac80211 creates a station it reserves some space (hw->sta_data_size) 333 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
323 * in the structure for use by driver. This structure is placed in that 334 * in the structure for use by driver. This structure is placed in that
@@ -337,14 +348,15 @@ struct iwl_mvm_sta {
337 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; 348 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
338 struct iwl_lq_sta lq_sta; 349 struct iwl_lq_sta lq_sta;
339 struct ieee80211_vif *vif; 350 struct ieee80211_vif *vif;
340
341 struct iwl_mvm_key_pn __rcu *ptk_pn[4]; 351 struct iwl_mvm_key_pn __rcu *ptk_pn[4];
352 struct iwl_mvm_rxq_dup_data *dup_data;
342 353
343 /* Temporary, until the new TLC will control the Tx protection */ 354 /* Temporary, until the new TLC will control the Tx protection */
344 s8 tx_protection; 355 s8 tx_protection;
345 bool tt_tx_protection; 356 bool tt_tx_protection;
346 357
347 bool disable_tx; 358 bool disable_tx;
359 bool tlc_amsdu;
348 u8 agg_tids; 360 u8 agg_tids;
349 u8 sleep_tx_count; 361 u8 sleep_tx_count;
350}; 362};
@@ -405,7 +417,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
405int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 417int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
406 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 418 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
407int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 419int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
408 struct ieee80211_sta *sta, u16 tid, u8 buf_size); 420 struct ieee80211_sta *sta, u16 tid, u8 buf_size,
421 bool amsdu);
409int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 422int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
410 struct ieee80211_sta *sta, u16 tid); 423 struct ieee80211_sta *sta, u16 tid);
411int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 424int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 758d05a8c6aa..999bcb898be8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -33,7 +34,7 @@
33 * 34 *
34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36 * Copyright(c) 2015 Intel Deutschland GmbH 37 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
37 * All rights reserved. 38 * All rights reserved.
38 * 39 *
39 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -64,6 +65,8 @@
64 * 65 *
65 *****************************************************************************/ 66 *****************************************************************************/
66 67
68#include <linux/sort.h>
69
67#include "mvm.h" 70#include "mvm.h"
68 71
69#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ 72#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ
@@ -79,8 +82,10 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
79 IWL_ERR(mvm, "Enter CT Kill\n"); 82 IWL_ERR(mvm, "Enter CT Kill\n");
80 iwl_mvm_set_hw_ctkill_state(mvm, true); 83 iwl_mvm_set_hw_ctkill_state(mvm, true);
81 84
82 tt->throttle = false; 85 if (!iwl_mvm_is_tt_in_fw(mvm)) {
83 tt->dynamic_smps = false; 86 tt->throttle = false;
87 tt->dynamic_smps = false;
88 }
84 89
85 /* Don't schedule an exit work if we're in test mode, since 90 /* Don't schedule an exit work if we're in test mode, since
86 * the temperature will not change unless we manually set it 91 * the temperature will not change unless we manually set it
@@ -116,18 +121,21 @@ void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
116static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, 121static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
117 struct iwl_rx_packet *pkt) 122 struct iwl_rx_packet *pkt)
118{ 123{
119 struct iwl_dts_measurement_notif *notif; 124 struct iwl_dts_measurement_notif_v1 *notif_v1;
120 int len = iwl_rx_packet_payload_len(pkt); 125 int len = iwl_rx_packet_payload_len(pkt);
121 int temp; 126 int temp;
122 127
123 if (WARN_ON_ONCE(len < sizeof(*notif))) { 128 /* we can use notif_v1 only, because v2 only adds an additional
129 * parameter, which is not used in this function.
130 */
131 if (WARN_ON_ONCE(len < sizeof(*notif_v1))) {
124 IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); 132 IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
125 return -EINVAL; 133 return -EINVAL;
126 } 134 }
127 135
128 notif = (void *)pkt->data; 136 notif_v1 = (void *)pkt->data;
129 137
130 temp = le32_to_cpu(notif->temp); 138 temp = le32_to_cpu(notif_v1->temp);
131 139
132 /* shouldn't be negative, but since it's s32, make sure it isn't */ 140 /* shouldn't be negative, but since it's s32, make sure it isn't */
133 if (WARN_ON_ONCE(temp < 0)) 141 if (WARN_ON_ONCE(temp < 0))
@@ -158,17 +166,74 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
158void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 166void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
159{ 167{
160 struct iwl_rx_packet *pkt = rxb_addr(rxb); 168 struct iwl_rx_packet *pkt = rxb_addr(rxb);
169 struct iwl_dts_measurement_notif_v2 *notif_v2;
170 int len = iwl_rx_packet_payload_len(pkt);
161 int temp; 171 int temp;
172 u32 ths_crossed;
162 173
163 /* the notification is handled synchronously in ctkill, so skip here */ 174 /* the notification is handled synchronously in ctkill, so skip here */
164 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) 175 if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
165 return; 176 return;
166 177
167 temp = iwl_mvm_temp_notif_parse(mvm, pkt); 178 temp = iwl_mvm_temp_notif_parse(mvm, pkt);
168 if (temp < 0) 179
180 if (!iwl_mvm_is_tt_in_fw(mvm)) {
181 if (temp >= 0)
182 iwl_mvm_tt_temp_changed(mvm, temp);
169 return; 183 return;
184 }
185
186 if (WARN_ON_ONCE(len < sizeof(*notif_v2))) {
187 IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
188 return;
189 }
190
191 notif_v2 = (void *)pkt->data;
192 ths_crossed = le32_to_cpu(notif_v2->threshold_idx);
170 193
171 iwl_mvm_tt_temp_changed(mvm, temp); 194 /* 0xFF in ths_crossed means the notification is not related
195 * to a trip, so we can ignore it here.
196 */
197 if (ths_crossed == 0xFF)
198 return;
199
200 IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n",
201 temp, ths_crossed);
202
203#ifdef CONFIG_THERMAL
204 if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
205 return;
206
207 /*
208 * We are now handling a temperature notification from the firmware
209 * in ASYNC and hold the mutex. thermal_notify_framework will call
210 * us back through get_temp() which ought to send a SYNC command to
211 * the firmware and hence to take the mutex.
212 * Avoid the deadlock by unlocking the mutex here.
213 */
214 mutex_unlock(&mvm->mutex);
215 thermal_notify_framework(mvm->tz_device.tzone,
216 mvm->tz_device.fw_trips_index[ths_crossed]);
217 mutex_lock(&mvm->mutex);
218#endif /* CONFIG_THERMAL */
219}
220
221void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
222{
223 struct iwl_rx_packet *pkt = rxb_addr(rxb);
224 struct ct_kill_notif *notif;
225 int len = iwl_rx_packet_payload_len(pkt);
226
227 if (WARN_ON_ONCE(len != sizeof(*notif))) {
228 IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
229 return;
230 }
231
232 notif = (struct ct_kill_notif *)pkt->data;
233 IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
234 notif->temperature);
235
236 iwl_mvm_enter_ctkill(mvm);
172} 237}
173 238
174static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) 239static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -236,6 +301,12 @@ static void check_exit_ctkill(struct work_struct *work)
236 tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); 301 tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
237 mvm = container_of(tt, struct iwl_mvm, thermal_throttle); 302 mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
238 303
304 if (iwl_mvm_is_tt_in_fw(mvm)) {
305 iwl_mvm_exit_ctkill(mvm);
306
307 return;
308 }
309
239 duration = tt->params.ct_kill_duration; 310 duration = tt->params.ct_kill_duration;
240 311
241 mutex_lock(&mvm->mutex); 312 mutex_lock(&mvm->mutex);
@@ -435,7 +506,365 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = {
435 .support_tx_backoff = true, 506 .support_tx_backoff = true,
436}; 507};
437 508
438void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) 509#ifdef CONFIG_THERMAL
510static int compare_temps(const void *a, const void *b)
511{
512 return ((s16)le16_to_cpu(*(__le16 *)a) -
513 (s16)le16_to_cpu(*(__le16 *)b));
514}
515
516int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
517{
518 struct temp_report_ths_cmd cmd = {0};
519 int ret, i, j, idx = 0;
520
521 lockdep_assert_held(&mvm->mutex);
522
523 /* The driver holds array of temperature trips that are unsorted
524 * and uncompressed, the FW should get it compressed and sorted
525 */
526
527 /* compress temp_trips to cmd array, remove uninitialized values*/
528 for (i = 0; i < IWL_MAX_DTS_TRIPS; i++)
529 if (mvm->tz_device.temp_trips[i] != S16_MIN) {
530 cmd.thresholds[idx++] =
531 cpu_to_le16(mvm->tz_device.temp_trips[i]);
532 }
533 cmd.num_temps = cpu_to_le32(idx);
534
535 if (!idx)
536 goto send;
537
538 /*sort cmd array*/
539 sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
540
541 /* we should save the indexes of trips because we sort
542 * and compress the orginal array
543 */
544 for (i = 0; i < idx; i++) {
545 for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
546 if (le16_to_cpu(cmd.thresholds[i]) ==
547 mvm->tz_device.temp_trips[j])
548 mvm->tz_device.fw_trips_index[i] = j;
549 }
550 }
551
552send:
553 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
554 TEMP_REPORTING_THRESHOLDS_CMD),
555 0, sizeof(cmd), &cmd);
556 if (ret)
557 IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
558 ret);
559
560 return ret;
561}
562
563static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
564 int *temperature)
565{
566 struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
567 int ret;
568 int temp;
569
570 mutex_lock(&mvm->mutex);
571
572 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
573 ret = -EIO;
574 goto out;
575 }
576
577 ret = iwl_mvm_get_temp(mvm, &temp);
578 if (ret)
579 goto out;
580
581 *temperature = temp * 1000;
582
583out:
584 mutex_unlock(&mvm->mutex);
585 return ret;
586}
587
588static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device,
589 int trip, int *temp)
590{
591 struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
592
593 if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
594 return -EINVAL;
595
596 *temp = mvm->tz_device.temp_trips[trip] * 1000;
597
598 return 0;
599}
600
601static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device,
602 int trip, enum thermal_trip_type *type)
603{
604 if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
605 return -EINVAL;
606
607 *type = THERMAL_TRIP_PASSIVE;
608
609 return 0;
610}
611
612static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
613 int trip, int temp)
614{
615 struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
616 struct iwl_mvm_thermal_device *tzone;
617 int i, ret;
618 s16 temperature;
619
620 mutex_lock(&mvm->mutex);
621
622 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
623 ret = -EIO;
624 goto out;
625 }
626
627 if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) {
628 ret = -EINVAL;
629 goto out;
630 }
631
632 if ((temp / 1000) > S16_MAX) {
633 ret = -EINVAL;
634 goto out;
635 }
636
637 temperature = (s16)(temp / 1000);
638 tzone = &mvm->tz_device;
639
640 if (!tzone) {
641 ret = -EIO;
642 goto out;
643 }
644
645 /* no updates*/
646 if (tzone->temp_trips[trip] == temperature) {
647 ret = 0;
648 goto out;
649 }
650
651 /* already existing temperature */
652 for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
653 if (tzone->temp_trips[i] == temperature) {
654 ret = -EINVAL;
655 goto out;
656 }
657 }
658
659 tzone->temp_trips[trip] = temperature;
660
661 ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
662out:
663 mutex_unlock(&mvm->mutex);
664 return ret;
665}
666
667static struct thermal_zone_device_ops tzone_ops = {
668 .get_temp = iwl_mvm_tzone_get_temp,
669 .get_trip_temp = iwl_mvm_tzone_get_trip_temp,
670 .get_trip_type = iwl_mvm_tzone_get_trip_type,
671 .set_trip_temp = iwl_mvm_tzone_set_trip_temp,
672};
673
674/* make all trips writable */
675#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
676
677static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
678{
679 int i;
680 char name[] = "iwlwifi";
681
682 if (!iwl_mvm_is_tt_in_fw(mvm)) {
683 mvm->tz_device.tzone = NULL;
684
685 return;
686 }
687
688 BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
689
690 mvm->tz_device.tzone = thermal_zone_device_register(name,
691 IWL_MAX_DTS_TRIPS,
692 IWL_WRITABLE_TRIPS_MSK,
693 mvm, &tzone_ops,
694 NULL, 0, 0);
695 if (IS_ERR(mvm->tz_device.tzone)) {
696 IWL_DEBUG_TEMP(mvm,
697 "Failed to register to thermal zone (err = %ld)\n",
698 PTR_ERR(mvm->tz_device.tzone));
699 return;
700 }
701
702 /* 0 is a valid temperature,
703 * so initialize the array with S16_MIN which invalid temperature
704 */
705 for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++)
706 mvm->tz_device.temp_trips[i] = S16_MIN;
707}
708
709static const u32 iwl_mvm_cdev_budgets[] = {
710 2000, /* cooling state 0 */
711 1800, /* cooling state 1 */
712 1600, /* cooling state 2 */
713 1400, /* cooling state 3 */
714 1200, /* cooling state 4 */
715 1000, /* cooling state 5 */
716 900, /* cooling state 6 */
717 800, /* cooling state 7 */
718 700, /* cooling state 8 */
719 650, /* cooling state 9 */
720 600, /* cooling state 10 */
721 550, /* cooling state 11 */
722 500, /* cooling state 12 */
723 450, /* cooling state 13 */
724 400, /* cooling state 14 */
725 350, /* cooling state 15 */
726 300, /* cooling state 16 */
727 250, /* cooling state 17 */
728 200, /* cooling state 18 */
729 150, /* cooling state 19 */
730};
731
732int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget)
733{
734 struct iwl_mvm_ctdp_cmd cmd = {
735 .operation = cpu_to_le32(op),
736 .budget = cpu_to_le32(budget),
737 .window_size = 0,
738 };
739 int ret;
740 u32 status;
741
742 lockdep_assert_held(&mvm->mutex);
743
744 ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
745 CTDP_CONFIG_CMD),
746 sizeof(cmd), &cmd, &status);
747
748 if (ret) {
749 IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
750 return ret;
751 }
752
753 if (op == CTDP_CMD_OPERATION_START)
754 mvm->cooling_dev.cur_state = budget;
755
756 else if (op == CTDP_CMD_OPERATION_REPORT)
757 IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
758
759 return 0;
760}
761
762static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
763 unsigned long *state)
764{
765 *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
766
767 return 0;
768}
769
770static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
771 unsigned long *state)
772{
773 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
774
775 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
776 return -EBUSY;
777
778 *state = mvm->cooling_dev.cur_state;
779 return 0;
780}
781
782static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
783 unsigned long new_state)
784{
785 struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
786 int ret;
787
788 if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
789 return -EIO;
790
791 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
792 return -EBUSY;
793
794 mutex_lock(&mvm->mutex);
795
796 if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
797 ret = -EINVAL;
798 goto unlock;
799 }
800
801 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
802 iwl_mvm_cdev_budgets[new_state]);
803
804unlock:
805 mutex_unlock(&mvm->mutex);
806 return ret;
807}
808
809static struct thermal_cooling_device_ops tcooling_ops = {
810 .get_max_state = iwl_mvm_tcool_get_max_state,
811 .get_cur_state = iwl_mvm_tcool_get_cur_state,
812 .set_cur_state = iwl_mvm_tcool_set_cur_state,
813};
814
815int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
816{
817 char name[] = "iwlwifi";
818
819 if (!iwl_mvm_is_ctdp_supported(mvm)) {
820 mvm->cooling_dev.cdev = NULL;
821
822 return 0;
823 }
824
825 BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
826
827 mvm->cooling_dev.cdev =
828 thermal_cooling_device_register(name,
829 mvm,
830 &tcooling_ops);
831
832 if (IS_ERR(mvm->cooling_dev.cdev)) {
833 IWL_DEBUG_TEMP(mvm,
834 "Failed to register to cooling device (err = %ld)\n",
835 PTR_ERR(mvm->cooling_dev.cdev));
836 return PTR_ERR(mvm->cooling_dev.cdev);
837 }
838
839 return 0;
840}
841
842static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
843{
844 if (!iwl_mvm_is_tt_in_fw(mvm))
845 return;
846
847 if (mvm->tz_device.tzone) {
848 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
849 thermal_zone_device_unregister(mvm->tz_device.tzone);
850 mvm->tz_device.tzone = NULL;
851 }
852}
853
854static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
855{
856 if (!iwl_mvm_is_ctdp_supported(mvm))
857 return;
858
859 if (mvm->cooling_dev.cdev) {
860 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
861 thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
862 mvm->cooling_dev.cdev = NULL;
863 }
864}
865#endif /* CONFIG_THERMAL */
866
867void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
439{ 868{
440 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; 869 struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
441 870
@@ -450,10 +879,20 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
450 tt->dynamic_smps = false; 879 tt->dynamic_smps = false;
451 tt->min_backoff = min_backoff; 880 tt->min_backoff = min_backoff;
452 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); 881 INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
882
883#ifdef CONFIG_THERMAL
884 iwl_mvm_cooling_device_register(mvm);
885 iwl_mvm_thermal_zone_register(mvm);
886#endif
453} 887}
454 888
455void iwl_mvm_tt_exit(struct iwl_mvm *mvm) 889void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
456{ 890{
457 cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); 891 cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
458 IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); 892 IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
893
894#ifdef CONFIG_THERMAL
895 iwl_mvm_cooling_device_unregister(mvm);
896 iwl_mvm_thermal_zone_unregister(mvm);
897#endif
459} 898}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 4fbaadda4e99..271e8da6d140 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -65,6 +65,7 @@
65#include <linux/ieee80211.h> 65#include <linux/ieee80211.h>
66#include <linux/etherdevice.h> 66#include <linux/etherdevice.h>
67#include <linux/tcp.h> 67#include <linux/tcp.h>
68#include <net/ip.h>
68 69
69#include "iwl-trans.h" 70#include "iwl-trans.h"
70#include "iwl-eeprom-parse.h" 71#include "iwl-eeprom-parse.h"
@@ -182,7 +183,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
182 183
183 tx_cmd->tx_flags = cpu_to_le32(tx_flags); 184 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
184 /* Total # bytes to be transmitted */ 185 /* Total # bytes to be transmitted */
185 tx_cmd->len = cpu_to_le16((u16)skb->len); 186 tx_cmd->len = cpu_to_le16((u16)skb->len +
187 (uintptr_t)info->driver_data[0]);
186 tx_cmd->next_frame_len = 0; 188 tx_cmd->next_frame_len = 0;
187 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 189 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
188 tx_cmd->sta_id = sta_id; 190 tx_cmd->sta_id = sta_id;
@@ -372,6 +374,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
372 info->hw_queue != info->control.vif->cab_queue))) 374 info->hw_queue != info->control.vif->cab_queue)))
373 return -1; 375 return -1;
374 376
377 /* This holds the amsdu headers length */
378 info->driver_data[0] = (void *)(uintptr_t)0;
379
375 /* 380 /*
376 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used 381 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
377 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel 382 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
@@ -425,36 +430,206 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
425 return -1; 430 return -1;
426 } 431 }
427 432
433 /*
434 * Increase the pending frames counter, so that later when a reply comes
435 * in and the counter is decreased - we don't start getting negative
436 * values.
437 * Note that we don't need to make sure it isn't agg'd, since we're
438 * TXing non-sta
439 */
440 atomic_inc(&mvm->pending_frames[sta_id]);
441
428 return 0; 442 return 0;
429} 443}
430 444
431static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso, 445#ifdef CONFIG_INET
446static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
432 struct ieee80211_sta *sta, 447 struct ieee80211_sta *sta,
433 struct sk_buff_head *mpdus_skb) 448 struct sk_buff_head *mpdus_skb)
434{ 449{
450 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
451 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
452 struct ieee80211_hdr *hdr = (void *)skb->data;
453 unsigned int mss = skb_shinfo(skb)->gso_size;
435 struct sk_buff *tmp, *next; 454 struct sk_buff *tmp, *next;
436 char cb[sizeof(skb_gso->cb)]; 455 char cb[sizeof(skb->cb)];
456 unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
457 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
458 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
459 u16 amsdu_add, snap_ip_tcp, pad, i = 0;
460 unsigned int dbg_max_amsdu_len;
461 u8 *qc, tid, txf;
462
463 snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
464 tcp_hdrlen(skb);
465
466 qc = ieee80211_get_qos_ctl(hdr);
467 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
468 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
469 return -EINVAL;
470
471 if (!sta->max_amsdu_len ||
472 !ieee80211_is_data_qos(hdr->frame_control) ||
473 !mvmsta->tlc_amsdu) {
474 num_subframes = 1;
475 pad = 0;
476 goto segment;
477 }
478
479 /*
480 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
481 * during an BA session.
482 */
483 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
484 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) {
485 num_subframes = 1;
486 pad = 0;
487 goto segment;
488 }
489
490 max_amsdu_len = sta->max_amsdu_len;
491 dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
492
493 /* the Tx FIFO to which this A-MSDU will be routed */
494 txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
495
496 /*
497 * Don't send an AMSDU that will be longer than the TXF.
498 * Add a security margin of 256 for the TX command + headers.
499 * We also want to have the start of the next packet inside the
500 * fifo to be able to send bursts.
501 */
502 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
503 mvm->shared_mem_cfg.txfifo_size[txf] - 256);
504
505 if (dbg_max_amsdu_len)
506 max_amsdu_len = min_t(unsigned int, max_amsdu_len,
507 dbg_max_amsdu_len);
437 508
438 memcpy(cb, skb_gso->cb, sizeof(cb)); 509 /*
439 next = skb_gso_segment(skb_gso, 0); 510 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
440 if (IS_ERR(next)) 511 * supported. This is a spec requirement (IEEE 802.11-2015
512 * section 8.7.3 NOTE 3).
513 */
514 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
515 !sta->vht_cap.vht_supported)
516 max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
517
518 /* Sub frame header + SNAP + IP header + TCP header + MSS */
519 subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
520 pad = (4 - subf_len) & 0x3;
521
522 /*
523 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
524 * N * subf_len + (N - 1) * pad.
525 */
526 num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
527 if (num_subframes > 1)
528 *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
529
530 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
531 tcp_hdrlen(skb) + skb->data_len;
532
533 /*
534 * Make sure we have enough TBs for the A-MSDU:
535 * 2 for each subframe
536 * 1 more for each fragment
537 * 1 more for the potential data in the header
538 */
539 num_subframes =
540 min_t(unsigned int, num_subframes,
541 (mvm->trans->max_skb_frags - 1 -
542 skb_shinfo(skb)->nr_frags) / 2);
543
544 /* This skb fits in one single A-MSDU */
545 if (num_subframes * mss >= tcp_payload_len) {
546 /*
547 * Compute the length of all the data added for the A-MSDU.
548 * This will be used to compute the length to write in the TX
549 * command. We have: SNAP + IP + TCP for n -1 subframes and
550 * ETH header for n subframes. Note that the original skb
551 * already had one set of SNAP / IP / TCP headers.
552 */
553 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
554 info = IEEE80211_SKB_CB(skb);
555 amsdu_add = num_subframes * sizeof(struct ethhdr) +
556 (num_subframes - 1) * (snap_ip_tcp + pad);
557 /* This holds the amsdu headers length */
558 info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
559
560 __skb_queue_tail(mpdus_skb, skb);
561 return 0;
562 }
563
564 /*
565 * Trick the segmentation function to make it
566 * create SKBs that can fit into one A-MSDU.
567 */
568segment:
569 skb_shinfo(skb)->gso_size = num_subframes * mss;
570 memcpy(cb, skb->cb, sizeof(cb));
571
572 next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
573 skb_shinfo(skb)->gso_size = mss;
574 if (WARN_ON_ONCE(IS_ERR(next)))
441 return -EINVAL; 575 return -EINVAL;
442 else if (next) 576 else if (next)
443 consume_skb(skb_gso); 577 consume_skb(skb);
444 578
445 while (next) { 579 while (next) {
446 tmp = next; 580 tmp = next;
447 next = tmp->next; 581 next = tmp->next;
582
448 memcpy(tmp->cb, cb, sizeof(tmp->cb)); 583 memcpy(tmp->cb, cb, sizeof(tmp->cb));
584 /*
585 * Compute the length of all the data added for the A-MSDU.
586 * This will be used to compute the length to write in the TX
587 * command. We have: SNAP + IP + TCP for n -1 subframes and
588 * ETH header for n subframes.
589 */
590 tcp_payload_len = skb_tail_pointer(tmp) -
591 skb_transport_header(tmp) -
592 tcp_hdrlen(tmp) + tmp->data_len;
593
594 if (ipv4)
595 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
596
597 if (tcp_payload_len > mss) {
598 num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
599 info = IEEE80211_SKB_CB(tmp);
600 amsdu_add = num_subframes * sizeof(struct ethhdr) +
601 (num_subframes - 1) * (snap_ip_tcp + pad);
602 info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
603 skb_shinfo(tmp)->gso_size = mss;
604 } else {
605 qc = ieee80211_get_qos_ctl((void *)tmp->data);
606
607 if (ipv4)
608 ip_send_check(ip_hdr(tmp));
609 *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
610 skb_shinfo(tmp)->gso_size = 0;
611 }
449 612
450 tmp->prev = NULL; 613 tmp->prev = NULL;
451 tmp->next = NULL; 614 tmp->next = NULL;
452 615
453 __skb_queue_tail(mpdus_skb, tmp); 616 __skb_queue_tail(mpdus_skb, tmp);
617 i++;
454 } 618 }
455 619
456 return 0; 620 return 0;
457} 621}
622#else /* CONFIG_INET */
623static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
624 struct ieee80211_sta *sta,
625 struct sk_buff_head *mpdus_skb)
626{
627 /* Impossible to get TSO with CONFIG_INET */
628 WARN_ON(1);
629
630 return -1;
631}
632#endif
458 633
459/* 634/*
460 * Sets the fields in the Tx cmd that are crypto related 635 * Sets the fields in the Tx cmd that are crypto related
@@ -560,6 +735,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
560 struct ieee80211_sta *sta) 735 struct ieee80211_sta *sta)
561{ 736{
562 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 737 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
738 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
563 struct sk_buff_head mpdus_skbs; 739 struct sk_buff_head mpdus_skbs;
564 unsigned int payload_len; 740 unsigned int payload_len;
565 int ret; 741 int ret;
@@ -570,6 +746,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
570 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 746 if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
571 return -1; 747 return -1;
572 748
749 /* This holds the amsdu headers length */
750 info->driver_data[0] = (void *)(uintptr_t)0;
751
573 if (!skb_is_gso(skb)) 752 if (!skb_is_gso(skb))
574 return iwl_mvm_tx_mpdu(mvm, skb, sta); 753 return iwl_mvm_tx_mpdu(mvm, skb, sta);
575 754
@@ -589,7 +768,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
589 return ret; 768 return ret;
590 769
591 while (!skb_queue_empty(&mpdus_skbs)) { 770 while (!skb_queue_empty(&mpdus_skbs)) {
592 struct sk_buff *skb = __skb_dequeue(&mpdus_skbs); 771 skb = __skb_dequeue(&mpdus_skbs);
593 772
594 ret = iwl_mvm_tx_mpdu(mvm, skb, sta); 773 ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
595 if (ret) { 774 if (ret) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 753ec6785912..d33b6baf5f98 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -811,6 +811,45 @@ static int iwl_pci_runtime_resume(struct device *device)
811 811
812 return 0; 812 return 0;
813} 813}
814
815static int iwl_pci_system_prepare(struct device *device)
816{
817 struct pci_dev *pdev = to_pci_dev(device);
818 struct iwl_trans *trans = pci_get_drvdata(pdev);
819
820 IWL_DEBUG_RPM(trans, "preparing for system suspend\n");
821
822 /* This is called before entering system suspend and before
823 * the runtime resume is called. Set the suspending flag to
824 * prevent the wakelock from being taken.
825 */
826 trans->suspending = true;
827
828 /* Wake the device up from runtime suspend before going to
829 * platform suspend. This is needed because we don't know
830 * whether wowlan any is set and, if it's not, mac80211 will
831 * disconnect (in which case, we can't be in D0i3).
832 */
833 pm_runtime_resume(device);
834
835 return 0;
836}
837
838static void iwl_pci_system_complete(struct device *device)
839{
840 struct pci_dev *pdev = to_pci_dev(device);
841 struct iwl_trans *trans = pci_get_drvdata(pdev);
842
843 IWL_DEBUG_RPM(trans, "completing system suspend\n");
844
845 /* This is called as a counterpart to the prepare op. It is
846 * called either when suspending fails or when suspend
847 * completed successfully. Now there's no risk of grabbing
848 * the wakelock anymore, so we can release the suspending
849 * flag.
850 */
851 trans->suspending = false;
852}
814#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 853#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
815 854
816static const struct dev_pm_ops iwl_dev_pm_ops = { 855static const struct dev_pm_ops iwl_dev_pm_ops = {
@@ -820,6 +859,8 @@ static const struct dev_pm_ops iwl_dev_pm_ops = {
820 SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, 859 SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
821 iwl_pci_runtime_resume, 860 iwl_pci_runtime_resume,
822 NULL) 861 NULL)
862 .prepare = iwl_pci_system_prepare,
863 .complete = iwl_pci_system_complete,
823#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 864#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
824}; 865};
825 866
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 542bbc5e2b24..6677f3122226 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -336,6 +336,14 @@ struct iwl_tso_hdr_page {
336 * @fw_mon_phys: physical address of the buffer for the firmware monitor 336 * @fw_mon_phys: physical address of the buffer for the firmware monitor
337 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 337 * @fw_mon_page: points to the first page of the buffer for the firmware monitor
338 * @fw_mon_size: size of the buffer for the firmware monitor 338 * @fw_mon_size: size of the buffer for the firmware monitor
339 * @msix_entries: array of MSI-X entries
340 * @msix_enabled: true if managed to enable MSI-X
341 * @allocated_vector: the number of interrupt vector allocated by the OS
342 * @default_irq_num: default irq for non rx interrupt
343 * @fh_init_mask: initial unmasked fh causes
344 * @hw_init_mask: initial unmasked hw causes
345 * @fh_mask: current unmasked fh causes
346 * @hw_mask: current unmasked hw causes
339 */ 347 */
340struct iwl_trans_pcie { 348struct iwl_trans_pcie {
341 struct iwl_rxq *rxq; 349 struct iwl_rxq *rxq;
@@ -402,6 +410,15 @@ struct iwl_trans_pcie {
402 dma_addr_t fw_mon_phys; 410 dma_addr_t fw_mon_phys;
403 struct page *fw_mon_page; 411 struct page *fw_mon_page;
404 u32 fw_mon_size; 412 u32 fw_mon_size;
413
414 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
415 bool msix_enabled;
416 u32 allocated_vector;
417 u32 default_irq_num;
418 u32 fh_init_mask;
419 u32 hw_init_mask;
420 u32 fh_mask;
421 u32 hw_mask;
405}; 422};
406 423
407static inline struct iwl_trans_pcie * 424static inline struct iwl_trans_pcie *
@@ -430,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
430* RX 447* RX
431******************************************************/ 448******************************************************/
432int iwl_pcie_rx_init(struct iwl_trans *trans); 449int iwl_pcie_rx_init(struct iwl_trans *trans);
450irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
433irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 451irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
452irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
453irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
434int iwl_pcie_rx_stop(struct iwl_trans *trans); 454int iwl_pcie_rx_stop(struct iwl_trans *trans);
435void iwl_pcie_rx_free(struct iwl_trans *trans); 455void iwl_pcie_rx_free(struct iwl_trans *trans);
436 456
@@ -485,15 +505,24 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans);
485******************************************************/ 505******************************************************/
486static inline void iwl_disable_interrupts(struct iwl_trans *trans) 506static inline void iwl_disable_interrupts(struct iwl_trans *trans)
487{ 507{
488 clear_bit(STATUS_INT_ENABLED, &trans->status); 508 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
489
490 /* disable interrupts from uCode/NIC to host */
491 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
492 509
493 /* acknowledge/clear/reset any interrupts still pending 510 clear_bit(STATUS_INT_ENABLED, &trans->status);
494 * from uCode or flow handler (Rx/Tx DMA) */ 511 if (!trans_pcie->msix_enabled) {
495 iwl_write32(trans, CSR_INT, 0xffffffff); 512 /* disable interrupts from uCode/NIC to host */
496 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 513 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
514
515 /* acknowledge/clear/reset any interrupts still pending
516 * from uCode or flow handler (Rx/Tx DMA) */
517 iwl_write32(trans, CSR_INT, 0xffffffff);
518 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
519 } else {
520 /* disable all the interrupt we might use */
521 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
522 trans_pcie->fh_init_mask);
523 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
524 trans_pcie->hw_init_mask);
525 }
497 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 526 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
498} 527}
499 528
@@ -503,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
503 532
504 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 533 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
505 set_bit(STATUS_INT_ENABLED, &trans->status); 534 set_bit(STATUS_INT_ENABLED, &trans->status);
506 trans_pcie->inta_mask = CSR_INI_SET_MASK; 535 if (!trans_pcie->msix_enabled) {
507 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 536 trans_pcie->inta_mask = CSR_INI_SET_MASK;
537 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
538 } else {
539 /*
540 * fh/hw_mask keeps all the unmasked causes.
541 * Unlike msi, in msix cause is enabled when it is unset.
542 */
543 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
544 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
545 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
546 ~trans_pcie->fh_mask);
547 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
548 ~trans_pcie->hw_mask);
549 }
550}
551
552static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
553{
554 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
555
556 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
557 trans_pcie->hw_mask = msk;
558}
559
560static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
561{
562 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
563
564 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
565 trans_pcie->fh_mask = msk;
508} 566}
509 567
510static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) 568static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
@@ -512,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
512 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 570 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
513 571
514 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); 572 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
515 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; 573 if (!trans_pcie->msix_enabled) {
516 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 574 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
575 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
576 } else {
577 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
578 trans_pcie->hw_init_mask);
579 iwl_enable_fh_int_msk_msix(trans,
580 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
581 }
517} 582}
518 583
519static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 584static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
@@ -521,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
521 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 586 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
522 587
523 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 588 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
524 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 589 if (!trans_pcie->msix_enabled) {
525 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 590 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
591 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
592 } else {
593 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
594 trans_pcie->fh_init_mask);
595 iwl_enable_hw_int_msk_msix(trans,
596 MSIX_HW_INT_CAUSES_REG_RF_KILL);
597 }
526} 598}
527 599
528static inline void iwl_wake_queue(struct iwl_trans *trans, 600static inline void iwl_wake_queue(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 07973ef826c1..489b07a9e471 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -783,16 +783,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
783 * Single frame mode 783 * Single frame mode
784 * Rx buffer size 4 or 8k or 12k 784 * Rx buffer size 4 or 8k or 12k
785 * Min RB size 4 or 8 785 * Min RB size 4 or 8
786 * Drop frames that exceed RB size
786 * 512 RBDs 787 * 512 RBDs
787 */ 788 */
788 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 789 iwl_write_prph(trans, RFH_RXF_DMA_CFG,
789 RFH_DMA_EN_ENABLE_VAL | 790 RFH_DMA_EN_ENABLE_VAL |
790 rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | 791 rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
791 RFH_RXF_DMA_MIN_RB_4_8 | 792 RFH_RXF_DMA_MIN_RB_4_8 |
793 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
792 RFH_RXF_DMA_RBDCB_SIZE_512); 794 RFH_RXF_DMA_RBDCB_SIZE_512);
793 795
796 /*
797 * Activate DMA snooping.
798 * Set RX DMA chunk size to 128 bit
799 * Default queue is 0
800 */
794 iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | 801 iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
795 RFH_GEN_CFG_SERVICE_DMA_SNOOP); 802 RFH_GEN_CFG_RB_CHUNK_SIZE |
803 (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
804 RFH_GEN_CFG_SERVICE_DMA_SNOOP);
805 /* Enable the relevant rx queues */
796 iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); 806 iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
797 807
798 /* Set interrupt coalescing timer to default (2048 usecs) */ 808 /* Set interrupt coalescing timer to default (2048 usecs) */
@@ -1135,10 +1145,10 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1135/* 1145/*
1136 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1146 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1137 */ 1147 */
1138static void iwl_pcie_rx_handle(struct iwl_trans *trans) 1148static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1139{ 1149{
1140 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1141 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 1151 struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1142 u32 r, i, j, count = 0; 1152 u32 r, i, j, count = 0;
1143 bool emergency = false; 1153 bool emergency = false;
1144 1154
@@ -1149,9 +1159,12 @@ restart:
1149 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 1159 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
1150 i = rxq->read; 1160 i = rxq->read;
1151 1161
1162 /* W/A 9000 device step A0 wrap-around bug */
1163 r &= (rxq->queue_size - 1);
1164
1152 /* Rx interrupt, but nothing sent from uCode */ 1165 /* Rx interrupt, but nothing sent from uCode */
1153 if (i == r) 1166 if (i == r)
1154 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 1167 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1155 1168
1156 while (i != r) { 1169 while (i != r) {
1157 struct iwl_rx_mem_buffer *rxb; 1170 struct iwl_rx_mem_buffer *rxb;
@@ -1164,15 +1177,18 @@ restart:
1164 * used_bd is a 32 bit but only 12 are used to retrieve 1177 * used_bd is a 32 bit but only 12 are used to retrieve
1165 * the vid 1178 * the vid
1166 */ 1179 */
1167 u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); 1180 u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
1168 1181
1182 if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table),
1183 "Invalid rxb index from HW %u\n", (u32)vid))
1184 goto out;
1169 rxb = trans_pcie->global_table[vid]; 1185 rxb = trans_pcie->global_table[vid];
1170 } else { 1186 } else {
1171 rxb = rxq->queue[i]; 1187 rxb = rxq->queue[i];
1172 rxq->queue[i] = NULL; 1188 rxq->queue[i] = NULL;
1173 } 1189 }
1174 1190
1175 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); 1191 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1176 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1192 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
1177 1193
1178 i = (i + 1) & (rxq->queue_size - 1); 1194 i = (i + 1) & (rxq->queue_size - 1);
@@ -1235,7 +1251,7 @@ restart:
1235 goto restart; 1251 goto restart;
1236 } 1252 }
1237 } 1253 }
1238 1254out:
1239 /* Backtrack one entry */ 1255 /* Backtrack one entry */
1240 rxq->read = i; 1256 rxq->read = i;
1241 spin_unlock(&rxq->lock); 1257 spin_unlock(&rxq->lock);
@@ -1259,6 +1275,54 @@ restart:
1259 napi_gro_flush(&rxq->napi, false); 1275 napi_gro_flush(&rxq->napi, false);
1260} 1276}
1261 1277
1278static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1279{
1280 u8 queue = entry->entry;
1281 struct msix_entry *entries = entry - queue;
1282
1283 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1284}
1285
1286static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
1287 struct msix_entry *entry)
1288{
1289 /*
1290 * Before sending the interrupt the HW disables it to prevent
1291 * a nested interrupt. This is done by writing 1 to the corresponding
1292 * bit in the mask register. After handling the interrupt, it should be
1293 * re-enabled by clearing this bit. This register is defined as
1294 * write 1 clear (W1C) register, meaning that it's being clear
1295 * by writing 1 to the bit.
1296 */
1297 iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
1298}
1299
1300/*
1301 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1302 * This interrupt handler should be used with RSS queue only.
1303 */
1304irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1305{
1306 struct msix_entry *entry = dev_id;
1307 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1308 struct iwl_trans *trans = trans_pcie->trans;
1309
1310 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1311 return IRQ_NONE;
1312
1313 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1314
1315 local_bh_disable();
1316 iwl_pcie_rx_handle(trans, entry->entry);
1317 local_bh_enable();
1318
1319 iwl_pcie_clear_irq(trans, entry);
1320
1321 lock_map_release(&trans->sync_cmd_lockdep_map);
1322
1323 return IRQ_HANDLED;
1324}
1325
1262/* 1326/*
1263 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1327 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1264 */ 1328 */
@@ -1589,7 +1653,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1589 isr_stats->rx++; 1653 isr_stats->rx++;
1590 1654
1591 local_bh_disable(); 1655 local_bh_disable();
1592 iwl_pcie_rx_handle(trans); 1656 iwl_pcie_rx_handle(trans, 0);
1593 local_bh_enable(); 1657 local_bh_enable();
1594 } 1658 }
1595 1659
@@ -1732,3 +1796,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data)
1732 1796
1733 return IRQ_WAKE_THREAD; 1797 return IRQ_WAKE_THREAD;
1734} 1798}
1799
1800irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
1801{
1802 return IRQ_WAKE_THREAD;
1803}
1804
1805irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
1806{
1807 struct msix_entry *entry = dev_id;
1808 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1809 struct iwl_trans *trans = trans_pcie->trans;
1810 struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
1811 u32 inta_fh, inta_hw;
1812
1813 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1814
1815 spin_lock(&trans_pcie->irq_lock);
1816 inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
1817 inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
1818 /*
1819 * Clear causes registers to avoid being handling the same cause.
1820 */
1821 iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
1822 iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
1823 spin_unlock(&trans_pcie->irq_lock);
1824
1825 if (unlikely(!(inta_fh | inta_hw))) {
1826 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1827 lock_map_release(&trans->sync_cmd_lockdep_map);
1828 return IRQ_NONE;
1829 }
1830
1831 if (iwl_have_debug_level(IWL_DL_ISR))
1832 IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
1833 inta_fh,
1834 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
1835
1836 /* This "Tx" DMA channel is used only for loading uCode */
1837 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
1838 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1839 isr_stats->tx++;
1840 /*
1841 * Wake up uCode load routine,
1842 * now that load is complete
1843 */
1844 trans_pcie->ucode_write_complete = true;
1845 wake_up(&trans_pcie->ucode_write_waitq);
1846 }
1847
1848 /* Error detected by uCode */
1849 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
1850 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
1851 IWL_ERR(trans,
1852 "Microcode SW error detected. Restarting 0x%X.\n",
1853 inta_fh);
1854 isr_stats->sw++;
1855 iwl_pcie_irq_handle_error(trans);
1856 }
1857
1858 /* After checking FH register check HW register */
1859 if (iwl_have_debug_level(IWL_DL_ISR))
1860 IWL_DEBUG_ISR(trans,
1861 "ISR inta_hw 0x%08x, enabled 0x%08x\n",
1862 inta_hw,
1863 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
1864
1865 /* Alive notification via Rx interrupt will do the real work */
1866 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
1867 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1868 isr_stats->alive++;
1869 }
1870
1871 /* uCode wakes up after power-down sleep */
1872 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
1873 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1874 iwl_pcie_rxq_check_wrptr(trans);
1875 iwl_pcie_txq_check_wrptrs(trans);
1876
1877 isr_stats->wakeup++;
1878 }
1879
1880 /* Chip got too hot and stopped itself */
1881 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
1882 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1883 isr_stats->ctkill++;
1884 }
1885
1886 /* HW RF KILL switch toggled */
1887 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
1888 bool hw_rfkill;
1889
1890 hw_rfkill = iwl_is_rfkill_set(trans);
1891 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1892 hw_rfkill ? "disable radio" : "enable radio");
1893
1894 isr_stats->rfkill++;
1895
1896 mutex_lock(&trans_pcie->mutex);
1897 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1898 mutex_unlock(&trans_pcie->mutex);
1899 if (hw_rfkill) {
1900 set_bit(STATUS_RFKILL, &trans->status);
1901 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1902 &trans->status))
1903 IWL_DEBUG_RF_KILL(trans,
1904 "Rfkill while SYNC HCMD in flight\n");
1905 wake_up(&trans_pcie->wait_command_queue);
1906 } else {
1907 clear_bit(STATUS_RFKILL, &trans->status);
1908 }
1909 }
1910
1911 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
1912 IWL_ERR(trans,
1913 "Hardware error detected. Restarting.\n");
1914
1915 isr_stats->hw++;
1916 iwl_pcie_irq_handle_error(trans);
1917 }
1918
1919 iwl_pcie_clear_irq(trans, entry);
1920
1921 lock_map_release(&trans->sync_cmd_lockdep_map);
1922
1923 return IRQ_HANDLED;
1924}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 58591ca051fd..e67957d6ac79 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -616,38 +616,38 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
616 dma_addr_t phy_addr, u32 byte_cnt) 616 dma_addr_t phy_addr, u32 byte_cnt)
617{ 617{
618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 618 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
619 unsigned long flags;
619 int ret; 620 int ret;
620 621
621 trans_pcie->ucode_write_complete = false; 622 trans_pcie->ucode_write_complete = false;
622 623
623 iwl_write_direct32(trans, 624 if (!iwl_trans_grab_nic_access(trans, &flags))
624 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 625 return -EIO;
625 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 626
627 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
628 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
626 629
627 iwl_write_direct32(trans, 630 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
628 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 631 dst_addr);
629 dst_addr);
630 632
631 iwl_write_direct32(trans, 633 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
632 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 634 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
633 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
634 635
635 iwl_write_direct32(trans, 636 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
636 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 637 (iwl_get_dma_hi_addr(phy_addr)
637 (iwl_get_dma_hi_addr(phy_addr) 638 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
638 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
639 639
640 iwl_write_direct32(trans, 640 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
641 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 641 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
642 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | 642 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
643 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | 643 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
644 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
645 644
646 iwl_write_direct32(trans, 645 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
647 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 646 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
648 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 647 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
649 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 648 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
650 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 649
650 iwl_trans_release_nic_access(trans, &flags);
651 651
652 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 652 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
653 trans_pcie->ucode_write_complete, 5 * HZ); 653 trans_pcie->ucode_write_complete, 5 * HZ);
@@ -1123,6 +1123,20 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1123 iwl_pcie_prepare_card_hw(trans); 1123 iwl_pcie_prepare_card_hw(trans);
1124} 1124}
1125 1125
1126static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129
1130 if (trans_pcie->msix_enabled) {
1131 int i;
1132
1133 for (i = 0; i < trans_pcie->allocated_vector; i++)
1134 synchronize_irq(trans_pcie->msix_entries[i].vector);
1135 } else {
1136 synchronize_irq(trans_pcie->pci_dev->irq);
1137 }
1138}
1139
1126static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1140static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1127 const struct fw_img *fw, bool run_in_rfkill) 1141 const struct fw_img *fw, bool run_in_rfkill)
1128{ 1142{
@@ -1149,7 +1163,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1149 iwl_disable_interrupts(trans); 1163 iwl_disable_interrupts(trans);
1150 1164
1151 /* Make sure it finished running */ 1165 /* Make sure it finished running */
1152 synchronize_irq(trans_pcie->pci_dev->irq); 1166 iwl_pcie_synchronize_irqs(trans);
1153 1167
1154 mutex_lock(&trans_pcie->mutex); 1168 mutex_lock(&trans_pcie->mutex);
1155 1169
@@ -1252,8 +1266,6 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1252static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1266static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1253 bool reset) 1267 bool reset)
1254{ 1268{
1255 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1256
1257 if (!reset) { 1269 if (!reset) {
1258 /* Enable persistence mode to avoid reset */ 1270 /* Enable persistence mode to avoid reset */
1259 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1271 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -1271,7 +1283,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1271 1283
1272 iwl_pcie_disable_ict(trans); 1284 iwl_pcie_disable_ict(trans);
1273 1285
1274 synchronize_irq(trans_pcie->pci_dev->irq); 1286 iwl_pcie_synchronize_irqs(trans);
1275 1287
1276 iwl_clear_bit(trans, CSR_GP_CNTRL, 1288 iwl_clear_bit(trans, CSR_GP_CNTRL,
1277 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1289 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1350,6 +1362,153 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1350 return 0; 1362 return 0;
1351} 1363}
1352 1364
1365struct iwl_causes_list {
1366 u32 cause_num;
1367 u32 mask_reg;
1368 u8 addr;
1369};
1370
1371static struct iwl_causes_list causes_list[] = {
1372 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
1373 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
1374 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
1375 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
1376 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
1377 {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11},
1378 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
1379 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
1380 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
1381 {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
1382 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1383 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1384 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1385 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1386};
1387
1388static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1389{
1390 u32 val, max_rx_vector, i;
1391 struct iwl_trans *trans = trans_pcie->trans;
1392
1393 max_rx_vector = trans_pcie->allocated_vector - 1;
1394
1395 if (!trans_pcie->msix_enabled)
1396 return;
1397
1398 iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1399
1400 /*
1401 * Each cause from the list above and the RX causes is represented as
1402 * a byte in the IVAR table. We access the first (N - 1) bytes and map
1403 * them to the (N - 1) vectors so these vectors will be used as rx
1404 * vectors. Then access all non rx causes and map them to the
1405 * default queue (N'th queue).
1406 */
1407 for (i = 0; i < max_rx_vector; i++) {
1408 iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i));
1409 iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD,
1410 BIT(MSIX_FH_INT_CAUSES_Q(i)));
1411 }
1412
1413 for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
1414 val = trans_pcie->default_irq_num |
1415 MSIX_NON_AUTO_CLEAR_CAUSE;
1416 iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
1417 iwl_clear_bit(trans, causes_list[i].mask_reg,
1418 causes_list[i].cause_num);
1419 }
1420 trans_pcie->fh_init_mask =
1421 ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1422 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1423 trans_pcie->hw_init_mask =
1424 ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1425 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1426}
1427
1428static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1429 struct iwl_trans *trans)
1430{
1431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1432 u16 pci_cmd;
1433 int max_vector;
1434 int ret, i;
1435
1436 if (trans->cfg->mq_rx_supported) {
1437 max_vector = min_t(u32, (num_possible_cpus() + 1),
1438 IWL_MAX_RX_HW_QUEUES);
1439 for (i = 0; i < max_vector; i++)
1440 trans_pcie->msix_entries[i].entry = i;
1441
1442 ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1443 MSIX_MIN_INTERRUPT_VECTORS,
1444 max_vector);
1445 if (ret > 1) {
1446 IWL_DEBUG_INFO(trans,
1447 "Enable MSI-X allocate %d interrupt vector\n",
1448 ret);
1449 trans_pcie->allocated_vector = ret;
1450 trans_pcie->default_irq_num =
1451 trans_pcie->allocated_vector - 1;
1452 trans_pcie->trans->num_rx_queues =
1453 trans_pcie->allocated_vector - 1;
1454 trans_pcie->msix_enabled = true;
1455
1456 return;
1457 }
1458 IWL_DEBUG_INFO(trans,
1459 "ret = %d %s move to msi mode\n", ret,
1460 (ret == 1) ?
1461 "can't allocate more than 1 interrupt vector" :
1462 "failed to enable msi-x mode");
1463 pci_disable_msix(pdev);
1464 }
1465
1466 ret = pci_enable_msi(pdev);
1467 if (ret) {
1468 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
1469 /* enable rfkill interrupt: hw bug w/a */
1470 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1471 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1472 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1473 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1474 }
1475 }
1476}
1477
1478static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1479 struct iwl_trans_pcie *trans_pcie)
1480{
1481 int i, last_vector;
1482
1483 last_vector = trans_pcie->trans->num_rx_queues;
1484
1485 for (i = 0; i < trans_pcie->allocated_vector; i++) {
1486 int ret;
1487
1488 ret = request_threaded_irq(trans_pcie->msix_entries[i].vector,
1489 iwl_pcie_msix_isr,
1490 (i == last_vector) ?
1491 iwl_pcie_irq_msix_handler :
1492 iwl_pcie_irq_rx_msix_handler,
1493 IRQF_SHARED,
1494 DRV_NAME,
1495 &trans_pcie->msix_entries[i]);
1496 if (ret) {
1497 int j;
1498
1499 IWL_ERR(trans_pcie->trans,
1500 "Error allocating IRQ %d\n", i);
1501 for (j = 0; j < i; j++)
1502 free_irq(trans_pcie->msix_entries[i].vector,
1503 &trans_pcie->msix_entries[i]);
1504 pci_disable_msix(pdev);
1505 return ret;
1506 }
1507 }
1508
1509 return 0;
1510}
1511
1353static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) 1512static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1354{ 1513{
1355 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1371,6 +1530,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1371 1530
1372 iwl_pcie_apm_init(trans); 1531 iwl_pcie_apm_init(trans);
1373 1532
1533 iwl_pcie_init_msix(trans_pcie);
1374 /* From now on, the op_mode will be kept updated about RF kill state */ 1534 /* From now on, the op_mode will be kept updated about RF kill state */
1375 iwl_enable_rfkill_int(trans); 1535 iwl_enable_rfkill_int(trans);
1376 1536
@@ -1425,7 +1585,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1425 1585
1426 mutex_unlock(&trans_pcie->mutex); 1586 mutex_unlock(&trans_pcie->mutex);
1427 1587
1428 synchronize_irq(trans_pcie->pci_dev->irq); 1588 iwl_pcie_synchronize_irqs(trans);
1429} 1589}
1430 1590
1431static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1591static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1506,15 +1666,25 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
1506 /* TODO: check if this is really needed */ 1666 /* TODO: check if this is really needed */
1507 pm_runtime_disable(trans->dev); 1667 pm_runtime_disable(trans->dev);
1508 1668
1509 synchronize_irq(trans_pcie->pci_dev->irq); 1669 iwl_pcie_synchronize_irqs(trans);
1510 1670
1511 iwl_pcie_tx_free(trans); 1671 iwl_pcie_tx_free(trans);
1512 iwl_pcie_rx_free(trans); 1672 iwl_pcie_rx_free(trans);
1513 1673
1514 free_irq(trans_pcie->pci_dev->irq, trans); 1674 if (trans_pcie->msix_enabled) {
1515 iwl_pcie_free_ict(trans); 1675 for (i = 0; i < trans_pcie->allocated_vector; i++)
1676 free_irq(trans_pcie->msix_entries[i].vector,
1677 &trans_pcie->msix_entries[i]);
1678
1679 pci_disable_msix(trans_pcie->pci_dev);
1680 trans_pcie->msix_enabled = false;
1681 } else {
1682 free_irq(trans_pcie->pci_dev->irq, trans);
1516 1683
1517 pci_disable_msi(trans_pcie->pci_dev); 1684 iwl_pcie_free_ict(trans);
1685
1686 pci_disable_msi(trans_pcie->pci_dev);
1687 }
1518 iounmap(trans_pcie->hw_base); 1688 iounmap(trans_pcie->hw_base);
1519 pci_release_regions(trans_pcie->pci_dev); 1689 pci_release_regions(trans_pcie->pci_dev);
1520 pci_disable_device(trans_pcie->pci_dev); 1690 pci_disable_device(trans_pcie->pci_dev);
@@ -2069,7 +2239,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2069 } else { 2239 } else {
2070 pos += scnprintf(buf + pos, bufsz - pos, 2240 pos += scnprintf(buf + pos, bufsz - pos,
2071 "\tclosed_rb_num: Not Allocated\n"); 2241 "\tclosed_rb_num: Not Allocated\n");
2072 } 2242 }
2073 } 2243 }
2074 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2244 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2075 kfree(buf); 2245 kfree(buf);
@@ -2615,7 +2785,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2615{ 2785{
2616 struct iwl_trans_pcie *trans_pcie; 2786 struct iwl_trans_pcie *trans_pcie;
2617 struct iwl_trans *trans; 2787 struct iwl_trans *trans;
2618 u16 pci_cmd;
2619 int ret, addr_size; 2788 int ret, addr_size;
2620 2789
2621 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2790 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
@@ -2698,17 +2867,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2698 trans_pcie->pci_dev = pdev; 2867 trans_pcie->pci_dev = pdev;
2699 iwl_disable_interrupts(trans); 2868 iwl_disable_interrupts(trans);
2700 2869
2701 ret = pci_enable_msi(pdev);
2702 if (ret) {
2703 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2704 /* enable rfkill interrupt: hw bug w/a */
2705 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2706 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2707 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2708 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2709 }
2710 }
2711
2712 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 2870 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2713 /* 2871 /*
2714 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 2872 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
@@ -2760,6 +2918,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2760 } 2918 }
2761 } 2919 }
2762 2920
2921 iwl_pcie_set_interrupt_capa(pdev, trans);
2763 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 2922 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2764 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 2923 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2765 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 2924 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
@@ -2769,19 +2928,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2769 2928
2770 init_waitqueue_head(&trans_pcie->d0i3_waitq); 2929 init_waitqueue_head(&trans_pcie->d0i3_waitq);
2771 2930
2772 ret = iwl_pcie_alloc_ict(trans); 2931 if (trans_pcie->msix_enabled) {
2773 if (ret) 2932 if (iwl_pcie_init_msix_handler(pdev, trans_pcie))
2774 goto out_pci_disable_msi; 2933 goto out_pci_release_regions;
2775 2934 } else {
2776 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2935 ret = iwl_pcie_alloc_ict(trans);
2777 iwl_pcie_irq_handler, 2936 if (ret)
2778 IRQF_SHARED, DRV_NAME, trans); 2937 goto out_pci_disable_msi;
2779 if (ret) {
2780 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2781 goto out_free_ict;
2782 }
2783 2938
2784 trans_pcie->inta_mask = CSR_INI_SET_MASK; 2939 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2940 iwl_pcie_irq_handler,
2941 IRQF_SHARED, DRV_NAME, trans);
2942 if (ret) {
2943 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2944 goto out_free_ict;
2945 }
2946 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2947 }
2785 2948
2786#ifdef CONFIG_IWLWIFI_PCIE_RTPM 2949#ifdef CONFIG_IWLWIFI_PCIE_RTPM
2787 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; 2950 trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 837a7d536874..16ad820ca824 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1062,10 +1062,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1062 1062
1063 if (iwl_queue_space(&txq->q) > txq->q.low_mark && 1063 if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
1064 test_bit(txq_id, trans_pcie->queue_stopped)) { 1064 test_bit(txq_id, trans_pcie->queue_stopped)) {
1065 struct sk_buff_head skbs; 1065 struct sk_buff_head overflow_skbs;
1066 1066
1067 __skb_queue_head_init(&skbs); 1067 __skb_queue_head_init(&overflow_skbs);
1068 skb_queue_splice_init(&txq->overflow_q, &skbs); 1068 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1069 1069
1070 /* 1070 /*
1071 * This is tricky: we are in reclaim path which is non 1071 * This is tricky: we are in reclaim path which is non
@@ -1076,8 +1076,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1076 */ 1076 */
1077 spin_unlock_bh(&txq->lock); 1077 spin_unlock_bh(&txq->lock);
1078 1078
1079 while (!skb_queue_empty(&skbs)) { 1079 while (!skb_queue_empty(&overflow_skbs)) {
1080 struct sk_buff *skb = __skb_dequeue(&skbs); 1080 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1081 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1081 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1082 u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; 1082 u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
1083 struct iwl_device_cmd *dev_cmd = 1083 struct iwl_device_cmd *dev_cmd =