diff options
-rw-r--r-- | drivers/net/wireless/iwlwifi/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-4965.c | 25 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-core.h | 16 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-dev.h | 26 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-rx.c | 371 | ||||
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl4965-base.c | 545 |
6 files changed, 517 insertions, 467 deletions
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile index b0b2b5ebfa61..0211a7f7147d 100644 --- a/drivers/net/wireless/iwlwifi/Makefile +++ b/drivers/net/wireless/iwlwifi/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | obj-$(CONFIG_IWLCORE) += iwlcore.o | 1 | obj-$(CONFIG_IWLCORE) += iwlcore.o |
2 | iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o | 2 | iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o |
3 | iwlcore-objs += iwl-rx.o | ||
3 | iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o | 4 | iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o |
4 | iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o | 5 | iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o |
5 | iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o | 6 | iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o |
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c index 70c0455b622d..773bb3229cfe 100644 --- a/drivers/net/wireless/iwlwifi/iwl-4965.c +++ b/drivers/net/wireless/iwlwifi/iwl-4965.c | |||
@@ -372,7 +372,7 @@ int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src) | |||
372 | return ret; | 372 | return ret; |
373 | } | 373 | } |
374 | 374 | ||
375 | static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq) | 375 | static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq) |
376 | { | 376 | { |
377 | int ret; | 377 | int ret; |
378 | unsigned long flags; | 378 | unsigned long flags; |
@@ -625,7 +625,7 @@ static void iwl4965_nic_config(struct iwl_priv *priv) | |||
625 | int iwl4965_hw_nic_init(struct iwl_priv *priv) | 625 | int iwl4965_hw_nic_init(struct iwl_priv *priv) |
626 | { | 626 | { |
627 | unsigned long flags; | 627 | unsigned long flags; |
628 | struct iwl4965_rx_queue *rxq = &priv->rxq; | 628 | struct iwl_rx_queue *rxq = &priv->rxq; |
629 | int ret; | 629 | int ret; |
630 | 630 | ||
631 | /* nic_init */ | 631 | /* nic_init */ |
@@ -645,22 +645,22 @@ int iwl4965_hw_nic_init(struct iwl_priv *priv) | |||
645 | 645 | ||
646 | /* Allocate the RX queue, or reset if it is already allocated */ | 646 | /* Allocate the RX queue, or reset if it is already allocated */ |
647 | if (!rxq->bd) { | 647 | if (!rxq->bd) { |
648 | ret = iwl4965_rx_queue_alloc(priv); | 648 | ret = iwl_rx_queue_alloc(priv); |
649 | if (ret) { | 649 | if (ret) { |
650 | IWL_ERROR("Unable to initialize Rx queue\n"); | 650 | IWL_ERROR("Unable to initialize Rx queue\n"); |
651 | return -ENOMEM; | 651 | return -ENOMEM; |
652 | } | 652 | } |
653 | } else | 653 | } else |
654 | iwl4965_rx_queue_reset(priv, rxq); | 654 | iwl_rx_queue_reset(priv, rxq); |
655 | 655 | ||
656 | iwl4965_rx_replenish(priv); | 656 | iwl_rx_replenish(priv); |
657 | 657 | ||
658 | iwl4965_rx_init(priv, rxq); | 658 | iwl4965_rx_init(priv, rxq); |
659 | 659 | ||
660 | spin_lock_irqsave(&priv->lock, flags); | 660 | spin_lock_irqsave(&priv->lock, flags); |
661 | 661 | ||
662 | rxq->need_update = 1; | 662 | rxq->need_update = 1; |
663 | iwl4965_rx_queue_update_write_ptr(priv, rxq); | 663 | iwl_rx_queue_update_write_ptr(priv, rxq); |
664 | 664 | ||
665 | spin_unlock_irqrestore(&priv->lock, flags); | 665 | spin_unlock_irqrestore(&priv->lock, flags); |
666 | 666 | ||
@@ -2516,7 +2516,8 @@ static void iwl4965_rx_calc_noise(struct iwl_priv *priv) | |||
2516 | priv->last_rx_noise); | 2516 | priv->last_rx_noise); |
2517 | } | 2517 | } |
2518 | 2518 | ||
2519 | void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) | 2519 | void iwl4965_hw_rx_statistics(struct iwl_priv *priv, |
2520 | struct iwl_rx_mem_buffer *rxb) | ||
2520 | { | 2521 | { |
2521 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2522 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2522 | int change; | 2523 | int change; |
@@ -2803,7 +2804,7 @@ static u32 iwl4965_translate_rx_status(u32 decrypt_in) | |||
2803 | 2804 | ||
2804 | static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, | 2805 | static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data, |
2805 | int include_phy, | 2806 | int include_phy, |
2806 | struct iwl4965_rx_mem_buffer *rxb, | 2807 | struct iwl_rx_mem_buffer *rxb, |
2807 | struct ieee80211_rx_status *stats) | 2808 | struct ieee80211_rx_status *stats) |
2808 | { | 2809 | { |
2809 | struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; | 2810 | struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; |
@@ -3109,7 +3110,7 @@ static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv, | |||
3109 | /* Called for REPLY_RX (legacy ABG frames), or | 3110 | /* Called for REPLY_RX (legacy ABG frames), or |
3110 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ | 3111 | * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ |
3111 | static void iwl4965_rx_reply_rx(struct iwl_priv *priv, | 3112 | static void iwl4965_rx_reply_rx(struct iwl_priv *priv, |
3112 | struct iwl4965_rx_mem_buffer *rxb) | 3113 | struct iwl_rx_mem_buffer *rxb) |
3113 | { | 3114 | { |
3114 | struct ieee80211_hdr *header; | 3115 | struct ieee80211_hdr *header; |
3115 | struct ieee80211_rx_status rx_status; | 3116 | struct ieee80211_rx_status rx_status; |
@@ -3278,7 +3279,7 @@ static void iwl4965_rx_reply_rx(struct iwl_priv *priv, | |||
3278 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). | 3279 | /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). |
3279 | * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ | 3280 | * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ |
3280 | static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, | 3281 | static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, |
3281 | struct iwl4965_rx_mem_buffer *rxb) | 3282 | struct iwl_rx_mem_buffer *rxb) |
3282 | { | 3283 | { |
3283 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3284 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
3284 | priv->last_phy_res[0] = 1; | 3285 | priv->last_phy_res[0] = 1; |
@@ -3286,7 +3287,7 @@ static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv, | |||
3286 | sizeof(struct iwl4965_rx_phy_res)); | 3287 | sizeof(struct iwl4965_rx_phy_res)); |
3287 | } | 3288 | } |
3288 | static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, | 3289 | static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv, |
3289 | struct iwl4965_rx_mem_buffer *rxb) | 3290 | struct iwl_rx_mem_buffer *rxb) |
3290 | 3291 | ||
3291 | { | 3292 | { |
3292 | #ifdef CONFIG_IWL4965_RUN_TIME_CALIB | 3293 | #ifdef CONFIG_IWL4965_RUN_TIME_CALIB |
@@ -3495,7 +3496,7 @@ static inline int iwl4965_queue_dec_wrap(int index, int n_bd) | |||
3495 | * of frames sent via aggregation. | 3496 | * of frames sent via aggregation. |
3496 | */ | 3497 | */ |
3497 | static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, | 3498 | static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv, |
3498 | struct iwl4965_rx_mem_buffer *rxb) | 3499 | struct iwl_rx_mem_buffer *rxb) |
3499 | { | 3500 | { |
3500 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3501 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
3501 | struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | 3502 | struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 369f1821584f..2356cadc1def 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h | |||
@@ -174,6 +174,21 @@ void iwlcore_free_geos(struct iwl_priv *priv); | |||
174 | int iwl_setup(struct iwl_priv *priv); | 174 | int iwl_setup(struct iwl_priv *priv); |
175 | 175 | ||
176 | /***************************************************** | 176 | /***************************************************** |
177 | * RX | ||
178 | ******************************************************/ | ||
179 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
180 | int iwl_rx_queue_alloc(struct iwl_priv *priv); | ||
181 | void iwl_rx_handle(struct iwl_priv *priv); | ||
182 | int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, | ||
183 | struct iwl_rx_queue *q); | ||
184 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); | ||
185 | void iwl_rx_replenish(struct iwl_priv *priv); | ||
186 | /* FIXME: remove when TX is moved to iwl core */ | ||
187 | int iwl_rx_queue_restock(struct iwl_priv *priv); | ||
188 | int iwl_rx_queue_space(const struct iwl_rx_queue *q); | ||
189 | void iwl_rx_allocate(struct iwl_priv *priv); | ||
190 | |||
191 | /***************************************************** | ||
177 | * S e n d i n g H o s t C o m m a n d s * | 192 | * S e n d i n g H o s t C o m m a n d s * |
178 | *****************************************************/ | 193 | *****************************************************/ |
179 | 194 | ||
@@ -265,4 +280,5 @@ static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) | |||
265 | return priv->cfg->ops->hcmd->rxon_assoc(priv); | 280 | return priv->cfg->ops->hcmd->rxon_assoc(priv); |
266 | } | 281 | } |
267 | 282 | ||
283 | |||
268 | #endif /* __iwl_core_h__ */ | 284 | #endif /* __iwl_core_h__ */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 78aba21bc18f..1d80ad79114d 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h | |||
@@ -91,7 +91,7 @@ extern struct iwl_cfg iwl5350_agn_cfg; | |||
91 | #define DEFAULT_SHORT_RETRY_LIMIT 7U | 91 | #define DEFAULT_SHORT_RETRY_LIMIT 7U |
92 | #define DEFAULT_LONG_RETRY_LIMIT 4U | 92 | #define DEFAULT_LONG_RETRY_LIMIT 4U |
93 | 93 | ||
94 | struct iwl4965_rx_mem_buffer { | 94 | struct iwl_rx_mem_buffer { |
95 | dma_addr_t dma_addr; | 95 | dma_addr_t dma_addr; |
96 | struct sk_buff *skb; | 96 | struct sk_buff *skb; |
97 | struct list_head list; | 97 | struct list_head list; |
@@ -358,7 +358,7 @@ struct iwl_host_cmd { | |||
358 | #define SUP_RATE_11G_MAX_NUM_CHANNELS 12 | 358 | #define SUP_RATE_11G_MAX_NUM_CHANNELS 12 |
359 | 359 | ||
360 | /** | 360 | /** |
361 | * struct iwl4965_rx_queue - Rx queue | 361 | * struct iwl_rx_queue - Rx queue |
362 | * @processed: Internal index to last handled Rx packet | 362 | * @processed: Internal index to last handled Rx packet |
363 | * @read: Shared index to newest available Rx buffer | 363 | * @read: Shared index to newest available Rx buffer |
364 | * @write: Shared index to oldest written Rx packet | 364 | * @write: Shared index to oldest written Rx packet |
@@ -367,13 +367,13 @@ struct iwl_host_cmd { | |||
367 | * @rx_used: List of Rx buffers with no SKB | 367 | * @rx_used: List of Rx buffers with no SKB |
368 | * @need_update: flag to indicate we need to update read/write index | 368 | * @need_update: flag to indicate we need to update read/write index |
369 | * | 369 | * |
370 | * NOTE: rx_free and rx_used are used as a FIFO for iwl4965_rx_mem_buffers | 370 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers |
371 | */ | 371 | */ |
372 | struct iwl4965_rx_queue { | 372 | struct iwl_rx_queue { |
373 | __le32 *bd; | 373 | __le32 *bd; |
374 | dma_addr_t dma_addr; | 374 | dma_addr_t dma_addr; |
375 | struct iwl4965_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | 375 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; |
376 | struct iwl4965_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | 376 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; |
377 | u32 processed; | 377 | u32 processed; |
378 | u32 read; | 378 | u32 read; |
379 | u32 write; | 379 | u32 write; |
@@ -643,26 +643,20 @@ extern int iwl4965_is_network_packet(struct iwl_priv *priv, | |||
643 | struct ieee80211_hdr *header); | 643 | struct ieee80211_hdr *header); |
644 | extern int iwl4965_power_init_handle(struct iwl_priv *priv); | 644 | extern int iwl4965_power_init_handle(struct iwl_priv *priv); |
645 | extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv, | 645 | extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv, |
646 | struct iwl4965_rx_mem_buffer *rxb, | 646 | struct iwl_rx_mem_buffer *rxb, |
647 | void *data, short len, | 647 | void *data, short len, |
648 | struct ieee80211_rx_status *stats, | 648 | struct ieee80211_rx_status *stats, |
649 | u16 phy_flags); | 649 | u16 phy_flags); |
650 | extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv, | 650 | extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv, |
651 | struct ieee80211_hdr *header); | 651 | struct ieee80211_hdr *header); |
652 | extern int iwl4965_rx_queue_alloc(struct iwl_priv *priv); | ||
653 | extern void iwl4965_rx_queue_reset(struct iwl_priv *priv, | ||
654 | struct iwl4965_rx_queue *rxq); | ||
655 | extern int iwl4965_calc_db_from_ratio(int sig_ratio); | 652 | extern int iwl4965_calc_db_from_ratio(int sig_ratio); |
656 | extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm); | 653 | extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm); |
657 | extern int iwl4965_tx_queue_init(struct iwl_priv *priv, | 654 | extern int iwl4965_tx_queue_init(struct iwl_priv *priv, |
658 | struct iwl4965_tx_queue *txq, int count, u32 id); | 655 | struct iwl4965_tx_queue *txq, int count, u32 id); |
659 | extern void iwl4965_rx_replenish(void *data); | ||
660 | extern void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq); | 656 | extern void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq); |
661 | extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, | 657 | extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv, |
662 | struct ieee80211_hdr *hdr, | 658 | struct ieee80211_hdr *hdr, |
663 | const u8 *dest, int left); | 659 | const u8 *dest, int left); |
664 | extern int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, | ||
665 | struct iwl4965_rx_queue *q); | ||
666 | extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr); | 660 | extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr); |
667 | extern void iwl4965_update_chain_flags(struct iwl_priv *priv); | 661 | extern void iwl4965_update_chain_flags(struct iwl_priv *priv); |
668 | int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src); | 662 | int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src); |
@@ -722,7 +716,7 @@ extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv, | |||
722 | extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv); | 716 | extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv); |
723 | extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); | 717 | extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); |
724 | extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv, | 718 | extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv, |
725 | struct iwl4965_rx_mem_buffer *rxb); | 719 | struct iwl_rx_mem_buffer *rxb); |
726 | extern void iwl4965_disable_events(struct iwl_priv *priv); | 720 | extern void iwl4965_disable_events(struct iwl_priv *priv); |
727 | extern int iwl4965_get_temperature(const struct iwl_priv *priv); | 721 | extern int iwl4965_get_temperature(const struct iwl_priv *priv); |
728 | 722 | ||
@@ -960,7 +954,7 @@ struct iwl_priv { | |||
960 | bool add_radiotap; | 954 | bool add_radiotap; |
961 | 955 | ||
962 | void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, | 956 | void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, |
963 | struct iwl4965_rx_mem_buffer *rxb); | 957 | struct iwl_rx_mem_buffer *rxb); |
964 | 958 | ||
965 | struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; | 959 | struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; |
966 | 960 | ||
@@ -1077,7 +1071,7 @@ struct iwl_priv { | |||
1077 | int activity_timer_active; | 1071 | int activity_timer_active; |
1078 | 1072 | ||
1079 | /* Rx and Tx DMA processing queues */ | 1073 | /* Rx and Tx DMA processing queues */ |
1080 | struct iwl4965_rx_queue rxq; | 1074 | struct iwl_rx_queue rxq; |
1081 | struct iwl4965_tx_queue txq[IWL_MAX_NUM_QUEUES]; | 1075 | struct iwl4965_tx_queue txq[IWL_MAX_NUM_QUEUES]; |
1082 | unsigned long txq_ctx_active_msk; | 1076 | unsigned long txq_ctx_active_msk; |
1083 | struct iwl4965_kw kw; /* keep warm address */ | 1077 | struct iwl4965_kw kw; /* keep warm address */ |
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c new file mode 100644 index 000000000000..667b592e6ade --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * James P. Ketrenos <ipw2100-admin@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #include <net/mac80211.h> | ||
31 | #include "iwl-eeprom.h" | ||
32 | #include "iwl-dev.h" | ||
33 | #include "iwl-core.h" | ||
34 | #include "iwl-sta.h" | ||
35 | #include "iwl-io.h" | ||
36 | #include "iwl-helpers.h" | ||
37 | /************************** RX-FUNCTIONS ****************************/ | ||
38 | /* | ||
39 | * Rx theory of operation | ||
40 | * | ||
41 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | ||
42 | * each of which point to Receive Buffers to be filled by the NIC. These get | ||
43 | * used not only for Rx frames, but for any command response or notification | ||
44 | * from the NIC. The driver and NIC manage the Rx buffers by means | ||
45 | * of indexes into the circular buffer. | ||
46 | * | ||
47 | * Rx Queue Indexes | ||
48 | * The host/firmware share two index registers for managing the Rx buffers. | ||
49 | * | ||
50 | * The READ index maps to the first position that the firmware may be writing | ||
51 | * to -- the driver can read up to (but not including) this position and get | ||
52 | * good data. | ||
53 | * The READ index is managed by the firmware once the card is enabled. | ||
54 | * | ||
55 | * The WRITE index maps to the last position the driver has read from -- the | ||
56 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
57 | * | ||
58 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
59 | * WRITE = READ. | ||
60 | * | ||
61 | * During initialization, the host sets up the READ queue position to the first | ||
62 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
63 | * | ||
64 | * When the firmware places a packet in a buffer, it will advance the READ index | ||
65 | * and fire the RX interrupt. The driver can then query the READ index and | ||
66 | * process as many packets as possible, moving the WRITE index forward as it | ||
67 | * resets the Rx queue buffers with new memory. | ||
68 | * | ||
69 | * The management in the driver is as follows: | ||
70 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
71 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
72 | * to replenish the iwl->rxq->rx_free. | ||
73 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
74 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
75 | * 'processed' and 'read' driver indexes as well) | ||
76 | * + A received packet is processed and handed to the kernel network stack, | ||
77 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
78 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
79 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
80 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
81 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
82 | * | ||
83 | * | ||
84 | * Driver sequence: | ||
85 | * | ||
86 | * iwl_rx_queue_alloc() Allocates rx_free | ||
87 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
88 | * iwl_rx_queue_restock | ||
89 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
90 | * queue, updates firmware pointers, and updates | ||
91 | * the WRITE index. If insufficient rx_free buffers | ||
92 | * are available, schedules iwl_rx_replenish | ||
93 | * | ||
94 | * -- enable interrupts -- | ||
95 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | ||
96 | * READ INDEX, detaching the SKB from the pool. | ||
97 | * Moves the packet buffer from queue to rx_used. | ||
98 | * Calls iwl_rx_queue_restock to refill any empty | ||
99 | * slots. | ||
100 | * ... | ||
101 | * | ||
102 | */ | ||
103 | |||
104 | /** | ||
105 | * iwl_rx_queue_space - Return number of free slots available in queue. | ||
106 | */ | ||
107 | int iwl_rx_queue_space(const struct iwl_rx_queue *q) | ||
108 | { | ||
109 | int s = q->read - q->write; | ||
110 | if (s <= 0) | ||
111 | s += RX_QUEUE_SIZE; | ||
112 | /* keep some buffer to not confuse full and empty queue */ | ||
113 | s -= 2; | ||
114 | if (s < 0) | ||
115 | s = 0; | ||
116 | return s; | ||
117 | } | ||
118 | EXPORT_SYMBOL(iwl_rx_queue_space); | ||
119 | |||
120 | /** | ||
121 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | ||
122 | */ | ||
123 | int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) | ||
124 | { | ||
125 | u32 reg = 0; | ||
126 | int ret = 0; | ||
127 | unsigned long flags; | ||
128 | |||
129 | spin_lock_irqsave(&q->lock, flags); | ||
130 | |||
131 | if (q->need_update == 0) | ||
132 | goto exit_unlock; | ||
133 | |||
134 | /* If power-saving is in use, make sure device is awake */ | ||
135 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
136 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
137 | |||
138 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
139 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
140 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
141 | goto exit_unlock; | ||
142 | } | ||
143 | |||
144 | ret = iwl_grab_nic_access(priv); | ||
145 | if (ret) | ||
146 | goto exit_unlock; | ||
147 | |||
148 | /* Device expects a multiple of 8 */ | ||
149 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, | ||
150 | q->write & ~0x7); | ||
151 | iwl_release_nic_access(priv); | ||
152 | |||
153 | /* Else device is assumed to be awake */ | ||
154 | } else | ||
155 | /* Device expects a multiple of 8 */ | ||
156 | iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); | ||
157 | |||
158 | |||
159 | q->need_update = 0; | ||
160 | |||
161 | exit_unlock: | ||
162 | spin_unlock_irqrestore(&q->lock, flags); | ||
163 | return ret; | ||
164 | } | ||
165 | EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); | ||
166 | /** | ||
167 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
168 | */ | ||
169 | static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
170 | dma_addr_t dma_addr) | ||
171 | { | ||
172 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | ||
177 | * | ||
178 | * If there are slots in the RX queue that need to be restocked, | ||
179 | * and we have free pre-allocated buffers, fill the ranks as much | ||
180 | * as we can, pulling from rx_free. | ||
181 | * | ||
182 | * This moves the 'write' index forward to catch up with 'processed', and | ||
183 | * also updates the memory address in the firmware to reference the new | ||
184 | * target buffer. | ||
185 | */ | ||
186 | int iwl_rx_queue_restock(struct iwl_priv *priv) | ||
187 | { | ||
188 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
189 | struct list_head *element; | ||
190 | struct iwl_rx_mem_buffer *rxb; | ||
191 | unsigned long flags; | ||
192 | int write; | ||
193 | int ret = 0; | ||
194 | |||
195 | spin_lock_irqsave(&rxq->lock, flags); | ||
196 | write = rxq->write & ~0x7; | ||
197 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
198 | /* Get next free Rx buffer, remove from free list */ | ||
199 | element = rxq->rx_free.next; | ||
200 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
201 | list_del(element); | ||
202 | |||
203 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
204 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); | ||
205 | rxq->queue[rxq->write] = rxb; | ||
206 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
207 | rxq->free_count--; | ||
208 | } | ||
209 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
210 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
211 | * refill it */ | ||
212 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
213 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
214 | |||
215 | |||
216 | /* If we've added more space for the firmware to place data, tell it. | ||
217 | * Increment device's write pointer in multiples of 8. */ | ||
218 | if ((write != (rxq->write & ~0x7)) | ||
219 | || (abs(rxq->write - rxq->read) > 7)) { | ||
220 | spin_lock_irqsave(&rxq->lock, flags); | ||
221 | rxq->need_update = 1; | ||
222 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
223 | ret = iwl_rx_queue_update_write_ptr(priv, rxq); | ||
224 | } | ||
225 | |||
226 | return ret; | ||
227 | } | ||
228 | EXPORT_SYMBOL(iwl_rx_queue_restock); | ||
229 | |||
230 | |||
231 | /** | ||
232 | * iwl_rx_replenish - Move all used packet from rx_used to rx_free | ||
233 | * | ||
234 | * When moving to rx_free an SKB is allocated for the slot. | ||
235 | * | ||
236 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
237 | * This is called as a scheduled work item (except for during initialization) | ||
238 | */ | ||
239 | void iwl_rx_allocate(struct iwl_priv *priv) | ||
240 | { | ||
241 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
242 | struct list_head *element; | ||
243 | struct iwl_rx_mem_buffer *rxb; | ||
244 | unsigned long flags; | ||
245 | spin_lock_irqsave(&rxq->lock, flags); | ||
246 | while (!list_empty(&rxq->rx_used)) { | ||
247 | element = rxq->rx_used.next; | ||
248 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
249 | |||
250 | /* Alloc a new receive buffer */ | ||
251 | rxb->skb = alloc_skb(priv->hw_params.rx_buf_size, | ||
252 | __GFP_NOWARN | GFP_ATOMIC); | ||
253 | if (!rxb->skb) { | ||
254 | if (net_ratelimit()) | ||
255 | printk(KERN_CRIT DRV_NAME | ||
256 | ": Can not allocate SKB buffers\n"); | ||
257 | /* We don't reschedule replenish work here -- we will | ||
258 | * call the restock method and if it still needs | ||
259 | * more buffers it will schedule replenish */ | ||
260 | break; | ||
261 | } | ||
262 | priv->alloc_rxb_skb++; | ||
263 | list_del(element); | ||
264 | |||
265 | /* Get physical address of RB/SKB */ | ||
266 | rxb->dma_addr = | ||
267 | pci_map_single(priv->pci_dev, rxb->skb->data, | ||
268 | priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); | ||
269 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
270 | rxq->free_count++; | ||
271 | } | ||
272 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
273 | } | ||
274 | EXPORT_SYMBOL(iwl_rx_allocate); | ||
275 | |||
276 | void iwl_rx_replenish(struct iwl_priv *priv) | ||
277 | { | ||
278 | unsigned long flags; | ||
279 | |||
280 | iwl_rx_allocate(priv); | ||
281 | |||
282 | spin_lock_irqsave(&priv->lock, flags); | ||
283 | iwl_rx_queue_restock(priv); | ||
284 | spin_unlock_irqrestore(&priv->lock, flags); | ||
285 | } | ||
286 | EXPORT_SYMBOL(iwl_rx_replenish); | ||
287 | |||
288 | |||
289 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
290 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
291 | * This free routine walks the list of POOL entries and if SKB is set to | ||
292 | * non NULL it is unmapped and freed | ||
293 | */ | ||
294 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
295 | { | ||
296 | int i; | ||
297 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
298 | if (rxq->pool[i].skb != NULL) { | ||
299 | pci_unmap_single(priv->pci_dev, | ||
300 | rxq->pool[i].dma_addr, | ||
301 | priv->hw_params.rx_buf_size, | ||
302 | PCI_DMA_FROMDEVICE); | ||
303 | dev_kfree_skb(rxq->pool[i].skb); | ||
304 | } | ||
305 | } | ||
306 | |||
307 | pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
308 | rxq->dma_addr); | ||
309 | rxq->bd = NULL; | ||
310 | } | ||
311 | EXPORT_SYMBOL(iwl_rx_queue_free); | ||
312 | |||
313 | int iwl_rx_queue_alloc(struct iwl_priv *priv) | ||
314 | { | ||
315 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
316 | struct pci_dev *dev = priv->pci_dev; | ||
317 | int i; | ||
318 | |||
319 | spin_lock_init(&rxq->lock); | ||
320 | INIT_LIST_HEAD(&rxq->rx_free); | ||
321 | INIT_LIST_HEAD(&rxq->rx_used); | ||
322 | |||
323 | /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ | ||
324 | rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); | ||
325 | if (!rxq->bd) | ||
326 | return -ENOMEM; | ||
327 | |||
328 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
329 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) | ||
330 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
331 | |||
332 | /* Set us so that we have processed and used all buffers, but have | ||
333 | * not restocked the Rx queue with fresh buffers */ | ||
334 | rxq->read = rxq->write = 0; | ||
335 | rxq->free_count = 0; | ||
336 | rxq->need_update = 0; | ||
337 | return 0; | ||
338 | } | ||
339 | EXPORT_SYMBOL(iwl_rx_queue_alloc); | ||
340 | |||
341 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
342 | { | ||
343 | unsigned long flags; | ||
344 | int i; | ||
345 | spin_lock_irqsave(&rxq->lock, flags); | ||
346 | INIT_LIST_HEAD(&rxq->rx_free); | ||
347 | INIT_LIST_HEAD(&rxq->rx_used); | ||
348 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
349 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
350 | /* In the reset function, these buffers may have been allocated | ||
351 | * to an SKB, so we need to unmap and free potential storage */ | ||
352 | if (rxq->pool[i].skb != NULL) { | ||
353 | pci_unmap_single(priv->pci_dev, | ||
354 | rxq->pool[i].dma_addr, | ||
355 | priv->hw_params.rx_buf_size, | ||
356 | PCI_DMA_FROMDEVICE); | ||
357 | priv->alloc_rxb_skb--; | ||
358 | dev_kfree_skb(rxq->pool[i].skb); | ||
359 | rxq->pool[i].skb = NULL; | ||
360 | } | ||
361 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
362 | } | ||
363 | |||
364 | /* Set us so that we have processed and used all buffers, but have | ||
365 | * not restocked the Rx queue with fresh buffers */ | ||
366 | rxq->read = rxq->write = 0; | ||
367 | rxq->free_count = 0; | ||
368 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
369 | } | ||
370 | EXPORT_SYMBOL(iwl_rx_queue_reset); | ||
371 | |||
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c index 54534270d46f..aa0393589dae 100644 --- a/drivers/net/wireless/iwlwifi/iwl4965-base.c +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c | |||
@@ -2736,7 +2736,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, | |||
2736 | * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response | 2736 | * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response |
2737 | */ | 2737 | */ |
2738 | static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | 2738 | static void iwl4965_rx_reply_tx(struct iwl_priv *priv, |
2739 | struct iwl4965_rx_mem_buffer *rxb) | 2739 | struct iwl_rx_mem_buffer *rxb) |
2740 | { | 2740 | { |
2741 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2741 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2742 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 2742 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
@@ -2849,7 +2849,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv, | |||
2849 | 2849 | ||
2850 | 2850 | ||
2851 | static void iwl4965_rx_reply_alive(struct iwl_priv *priv, | 2851 | static void iwl4965_rx_reply_alive(struct iwl_priv *priv, |
2852 | struct iwl4965_rx_mem_buffer *rxb) | 2852 | struct iwl_rx_mem_buffer *rxb) |
2853 | { | 2853 | { |
2854 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2854 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2855 | struct iwl4965_alive_resp *palive; | 2855 | struct iwl4965_alive_resp *palive; |
@@ -2885,7 +2885,7 @@ static void iwl4965_rx_reply_alive(struct iwl_priv *priv, | |||
2885 | } | 2885 | } |
2886 | 2886 | ||
2887 | static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv, | 2887 | static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv, |
2888 | struct iwl4965_rx_mem_buffer *rxb) | 2888 | struct iwl_rx_mem_buffer *rxb) |
2889 | { | 2889 | { |
2890 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2890 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2891 | 2891 | ||
@@ -2894,7 +2894,7 @@ static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv, | |||
2894 | } | 2894 | } |
2895 | 2895 | ||
2896 | static void iwl4965_rx_reply_error(struct iwl_priv *priv, | 2896 | static void iwl4965_rx_reply_error(struct iwl_priv *priv, |
2897 | struct iwl4965_rx_mem_buffer *rxb) | 2897 | struct iwl_rx_mem_buffer *rxb) |
2898 | { | 2898 | { |
2899 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2899 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2900 | 2900 | ||
@@ -2909,7 +2909,7 @@ static void iwl4965_rx_reply_error(struct iwl_priv *priv, | |||
2909 | 2909 | ||
2910 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | 2910 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x |
2911 | 2911 | ||
2912 | static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb) | 2912 | static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) |
2913 | { | 2913 | { |
2914 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2914 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2915 | struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; | 2915 | struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; |
@@ -2921,7 +2921,7 @@ static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer * | |||
2921 | } | 2921 | } |
2922 | 2922 | ||
2923 | static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, | 2923 | static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, |
2924 | struct iwl4965_rx_mem_buffer *rxb) | 2924 | struct iwl_rx_mem_buffer *rxb) |
2925 | { | 2925 | { |
2926 | #ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT | 2926 | #ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT |
2927 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2927 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
@@ -2939,7 +2939,7 @@ static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv, | |||
2939 | } | 2939 | } |
2940 | 2940 | ||
2941 | static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, | 2941 | static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, |
2942 | struct iwl4965_rx_mem_buffer *rxb) | 2942 | struct iwl_rx_mem_buffer *rxb) |
2943 | { | 2943 | { |
2944 | #ifdef CONFIG_IWLWIFI_DEBUG | 2944 | #ifdef CONFIG_IWLWIFI_DEBUG |
2945 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2945 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
@@ -2950,7 +2950,7 @@ static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv, | |||
2950 | } | 2950 | } |
2951 | 2951 | ||
2952 | static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, | 2952 | static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv, |
2953 | struct iwl4965_rx_mem_buffer *rxb) | 2953 | struct iwl_rx_mem_buffer *rxb) |
2954 | { | 2954 | { |
2955 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2955 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
2956 | IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " | 2956 | IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " |
@@ -2985,7 +2985,7 @@ static void iwl4965_bg_beacon_update(struct work_struct *work) | |||
2985 | } | 2985 | } |
2986 | 2986 | ||
2987 | static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, | 2987 | static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, |
2988 | struct iwl4965_rx_mem_buffer *rxb) | 2988 | struct iwl_rx_mem_buffer *rxb) |
2989 | { | 2989 | { |
2990 | #ifdef CONFIG_IWLWIFI_DEBUG | 2990 | #ifdef CONFIG_IWLWIFI_DEBUG |
2991 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 2991 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
@@ -3008,7 +3008,7 @@ static void iwl4965_rx_beacon_notif(struct iwl_priv *priv, | |||
3008 | 3008 | ||
3009 | /* Service response to REPLY_SCAN_CMD (0x80) */ | 3009 | /* Service response to REPLY_SCAN_CMD (0x80) */ |
3010 | static void iwl4965_rx_reply_scan(struct iwl_priv *priv, | 3010 | static void iwl4965_rx_reply_scan(struct iwl_priv *priv, |
3011 | struct iwl4965_rx_mem_buffer *rxb) | 3011 | struct iwl_rx_mem_buffer *rxb) |
3012 | { | 3012 | { |
3013 | #ifdef CONFIG_IWLWIFI_DEBUG | 3013 | #ifdef CONFIG_IWLWIFI_DEBUG |
3014 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3014 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
@@ -3021,7 +3021,7 @@ static void iwl4965_rx_reply_scan(struct iwl_priv *priv, | |||
3021 | 3021 | ||
3022 | /* Service SCAN_START_NOTIFICATION (0x82) */ | 3022 | /* Service SCAN_START_NOTIFICATION (0x82) */ |
3023 | static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv, | 3023 | static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv, |
3024 | struct iwl4965_rx_mem_buffer *rxb) | 3024 | struct iwl_rx_mem_buffer *rxb) |
3025 | { | 3025 | { |
3026 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3026 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
3027 | struct iwl4965_scanstart_notification *notif = | 3027 | struct iwl4965_scanstart_notification *notif = |
@@ -3038,7 +3038,7 @@ static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv, | |||
3038 | 3038 | ||
3039 | /* Service SCAN_RESULTS_NOTIFICATION (0x83) */ | 3039 | /* Service SCAN_RESULTS_NOTIFICATION (0x83) */ |
3040 | static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv, | 3040 | static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv, |
3041 | struct iwl4965_rx_mem_buffer *rxb) | 3041 | struct iwl_rx_mem_buffer *rxb) |
3042 | { | 3042 | { |
3043 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3043 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
3044 | struct iwl4965_scanresults_notification *notif = | 3044 | struct iwl4965_scanresults_notification *notif = |
@@ -3063,7 +3063,7 @@ static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv, | |||
3063 | 3063 | ||
3064 | /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ | 3064 | /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ |
3065 | static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, | 3065 | static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv, |
3066 | struct iwl4965_rx_mem_buffer *rxb) | 3066 | struct iwl_rx_mem_buffer *rxb) |
3067 | { | 3067 | { |
3068 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3068 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
3069 | struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw; | 3069 | struct iwl4965_scancomplete_notification *scan_notif = (void *)pkt->u.raw; |
@@ -3121,7 +3121,7 @@ reschedule: | |||
3121 | /* Handle notification from uCode that card's power state is changing | 3121 | /* Handle notification from uCode that card's power state is changing |
3122 | * due to software, hardware, or critical temperature RFKILL */ | 3122 | * due to software, hardware, or critical temperature RFKILL */ |
3123 | static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, | 3123 | static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, |
3124 | struct iwl4965_rx_mem_buffer *rxb) | 3124 | struct iwl_rx_mem_buffer *rxb) |
3125 | { | 3125 | { |
3126 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; | 3126 | struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; |
3127 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | 3127 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); |
@@ -3241,7 +3241,7 @@ static void iwl4965_setup_rx_handlers(struct iwl_priv *priv) | |||
3241 | * if the callback returns 1 | 3241 | * if the callback returns 1 |
3242 | */ | 3242 | */ |
3243 | static void iwl4965_tx_cmd_complete(struct iwl_priv *priv, | 3243 | static void iwl4965_tx_cmd_complete(struct iwl_priv *priv, |
3244 | struct iwl4965_rx_mem_buffer *rxb) | 3244 | struct iwl_rx_mem_buffer *rxb) |
3245 | { | 3245 | { |
3246 | struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; | 3246 | struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; |
3247 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 3247 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
@@ -3278,438 +3278,28 @@ static void iwl4965_tx_cmd_complete(struct iwl_priv *priv, | |||
3278 | } | 3278 | } |
3279 | } | 3279 | } |
3280 | 3280 | ||
3281 | /************************** RX-FUNCTIONS ****************************/ | ||
3282 | /* | ||
3283 | * Rx theory of operation | ||
3284 | * | ||
3285 | * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), | ||
3286 | * each of which point to Receive Buffers to be filled by 4965. These get | ||
3287 | * used not only for Rx frames, but for any command response or notification | ||
3288 | * from the 4965. The driver and 4965 manage the Rx buffers by means | ||
3289 | * of indexes into the circular buffer. | ||
3290 | * | ||
3291 | * Rx Queue Indexes | ||
3292 | * The host/firmware share two index registers for managing the Rx buffers. | ||
3293 | * | ||
3294 | * The READ index maps to the first position that the firmware may be writing | ||
3295 | * to -- the driver can read up to (but not including) this position and get | ||
3296 | * good data. | ||
3297 | * The READ index is managed by the firmware once the card is enabled. | ||
3298 | * | ||
3299 | * The WRITE index maps to the last position the driver has read from -- the | ||
3300 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
3301 | * | ||
3302 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
3303 | * WRITE = READ. | ||
3304 | * | ||
3305 | * During initialization, the host sets up the READ queue position to the first | ||
3306 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
3307 | * | ||
3308 | * When the firmware places a packet in a buffer, it will advance the READ index | ||
3309 | * and fire the RX interrupt. The driver can then query the READ index and | ||
3310 | * process as many packets as possible, moving the WRITE index forward as it | ||
3311 | * resets the Rx queue buffers with new memory. | ||
3312 | * | ||
3313 | * The management in the driver is as follows: | ||
3314 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
3315 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
3316 | * to replenish the iwl->rxq->rx_free. | ||
3317 | * + In iwl4965_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
3318 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
3319 | * 'processed' and 'read' driver indexes as well) | ||
3320 | * + A received packet is processed and handed to the kernel network stack, | ||
3321 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
3322 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
3323 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
3324 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
3325 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
3326 | * | ||
3327 | * | ||
3328 | * Driver sequence: | ||
3329 | * | ||
3330 | * iwl4965_rx_queue_alloc() Allocates rx_free | ||
3331 | * iwl4965_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
3332 | * iwl4965_rx_queue_restock | ||
3333 | * iwl4965_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
3334 | * queue, updates firmware pointers, and updates | ||
3335 | * the WRITE index. If insufficient rx_free buffers | ||
3336 | * are available, schedules iwl4965_rx_replenish | ||
3337 | * | ||
3338 | * -- enable interrupts -- | ||
3339 | * ISR - iwl4965_rx() Detach iwl4965_rx_mem_buffers from pool up to the | ||
3340 | * READ INDEX, detaching the SKB from the pool. | ||
3341 | * Moves the packet buffer from queue to rx_used. | ||
3342 | * Calls iwl4965_rx_queue_restock to refill any empty | ||
3343 | * slots. | ||
3344 | * ... | ||
3345 | * | ||
3346 | */ | ||
3347 | |||
3348 | /** | ||
3349 | * iwl4965_rx_queue_space - Return number of free slots available in queue. | ||
3350 | */ | ||
3351 | static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q) | ||
3352 | { | ||
3353 | int s = q->read - q->write; | ||
3354 | if (s <= 0) | ||
3355 | s += RX_QUEUE_SIZE; | ||
3356 | /* keep some buffer to not confuse full and empty queue */ | ||
3357 | s -= 2; | ||
3358 | if (s < 0) | ||
3359 | s = 0; | ||
3360 | return s; | ||
3361 | } | ||
3362 | |||
3363 | /** | ||
3364 | * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue | ||
3365 | */ | ||
3366 | int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q) | ||
3367 | { | ||
3368 | u32 reg = 0; | ||
3369 | int rc = 0; | ||
3370 | unsigned long flags; | ||
3371 | |||
3372 | spin_lock_irqsave(&q->lock, flags); | ||
3373 | |||
3374 | if (q->need_update == 0) | ||
3375 | goto exit_unlock; | ||
3376 | |||
3377 | /* If power-saving is in use, make sure device is awake */ | ||
3378 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
3379 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
3380 | |||
3381 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
3382 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
3383 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
3384 | goto exit_unlock; | ||
3385 | } | ||
3386 | |||
3387 | rc = iwl_grab_nic_access(priv); | ||
3388 | if (rc) | ||
3389 | goto exit_unlock; | ||
3390 | |||
3391 | /* Device expects a multiple of 8 */ | ||
3392 | iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, | ||
3393 | q->write & ~0x7); | ||
3394 | iwl_release_nic_access(priv); | ||
3395 | |||
3396 | /* Else device is assumed to be awake */ | ||
3397 | } else | ||
3398 | /* Device expects a multiple of 8 */ | ||
3399 | iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); | ||
3400 | |||
3401 | |||
3402 | q->need_update = 0; | ||
3403 | |||
3404 | exit_unlock: | ||
3405 | spin_unlock_irqrestore(&q->lock, flags); | ||
3406 | return rc; | ||
3407 | } | ||
3408 | |||
3409 | /** | ||
3410 | * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
3411 | */ | ||
3412 | static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
3413 | dma_addr_t dma_addr) | ||
3414 | { | ||
3415 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
3416 | } | ||
3417 | |||
3418 | |||
3419 | /** | ||
3420 | * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool | ||
3421 | * | ||
3422 | * If there are slots in the RX queue that need to be restocked, | ||
3423 | * and we have free pre-allocated buffers, fill the ranks as much | ||
3424 | * as we can, pulling from rx_free. | ||
3425 | * | ||
3426 | * This moves the 'write' index forward to catch up with 'processed', and | ||
3427 | * also updates the memory address in the firmware to reference the new | ||
3428 | * target buffer. | ||
3429 | */ | ||
3430 | static int iwl4965_rx_queue_restock(struct iwl_priv *priv) | ||
3431 | { | ||
3432 | struct iwl4965_rx_queue *rxq = &priv->rxq; | ||
3433 | struct list_head *element; | ||
3434 | struct iwl4965_rx_mem_buffer *rxb; | ||
3435 | unsigned long flags; | ||
3436 | int write, rc; | ||
3437 | |||
3438 | spin_lock_irqsave(&rxq->lock, flags); | ||
3439 | write = rxq->write & ~0x7; | ||
3440 | while ((iwl4965_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
3441 | /* Get next free Rx buffer, remove from free list */ | ||
3442 | element = rxq->rx_free.next; | ||
3443 | rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list); | ||
3444 | list_del(element); | ||
3445 | |||
3446 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
3447 | rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv, rxb->dma_addr); | ||
3448 | rxq->queue[rxq->write] = rxb; | ||
3449 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
3450 | rxq->free_count--; | ||
3451 | } | ||
3452 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
3453 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
3454 | * refill it */ | ||
3455 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
3456 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
3457 | |||
3458 | |||
3459 | /* If we've added more space for the firmware to place data, tell it. | ||
3460 | * Increment device's write pointer in multiples of 8. */ | ||
3461 | if ((write != (rxq->write & ~0x7)) | ||
3462 | || (abs(rxq->write - rxq->read) > 7)) { | ||
3463 | spin_lock_irqsave(&rxq->lock, flags); | ||
3464 | rxq->need_update = 1; | ||
3465 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
3466 | rc = iwl4965_rx_queue_update_write_ptr(priv, rxq); | ||
3467 | if (rc) | ||
3468 | return rc; | ||
3469 | } | ||
3470 | |||
3471 | return 0; | ||
3472 | } | ||
3473 | |||
3474 | /** | ||
3475 | * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free | ||
3476 | * | ||
3477 | * When moving to rx_free an SKB is allocated for the slot. | ||
3478 | * | ||
3479 | * Also restock the Rx queue via iwl4965_rx_queue_restock. | ||
3480 | * This is called as a scheduled work item (except for during initialization) | ||
3481 | */ | ||
3482 | static void iwl4965_rx_allocate(struct iwl_priv *priv) | ||
3483 | { | ||
3484 | struct iwl4965_rx_queue *rxq = &priv->rxq; | ||
3485 | struct list_head *element; | ||
3486 | struct iwl4965_rx_mem_buffer *rxb; | ||
3487 | unsigned long flags; | ||
3488 | spin_lock_irqsave(&rxq->lock, flags); | ||
3489 | while (!list_empty(&rxq->rx_used)) { | ||
3490 | element = rxq->rx_used.next; | ||
3491 | rxb = list_entry(element, struct iwl4965_rx_mem_buffer, list); | ||
3492 | |||
3493 | /* Alloc a new receive buffer */ | ||
3494 | rxb->skb = | ||
3495 | alloc_skb(priv->hw_params.rx_buf_size, | ||
3496 | __GFP_NOWARN | GFP_ATOMIC); | ||
3497 | if (!rxb->skb) { | ||
3498 | if (net_ratelimit()) | ||
3499 | printk(KERN_CRIT DRV_NAME | ||
3500 | ": Can not allocate SKB buffers\n"); | ||
3501 | /* We don't reschedule replenish work here -- we will | ||
3502 | * call the restock method and if it still needs | ||
3503 | * more buffers it will schedule replenish */ | ||
3504 | break; | ||
3505 | } | ||
3506 | priv->alloc_rxb_skb++; | ||
3507 | list_del(element); | ||
3508 | |||
3509 | /* Get physical address of RB/SKB */ | ||
3510 | rxb->dma_addr = | ||
3511 | pci_map_single(priv->pci_dev, rxb->skb->data, | ||
3512 | priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); | ||
3513 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
3514 | rxq->free_count++; | ||
3515 | } | ||
3516 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
3517 | } | ||
3518 | |||
3519 | /* | 3281 | /* |
3520 | * this should be called while priv->lock is locked | 3282 | * this should be called while priv->lock is locked |
3521 | */ | 3283 | */ |
3522 | static void __iwl4965_rx_replenish(void *data) | 3284 | static void __iwl_rx_replenish(struct iwl_priv *priv) |
3523 | { | ||
3524 | struct iwl_priv *priv = data; | ||
3525 | |||
3526 | iwl4965_rx_allocate(priv); | ||
3527 | iwl4965_rx_queue_restock(priv); | ||
3528 | } | ||
3529 | |||
3530 | |||
3531 | void iwl4965_rx_replenish(void *data) | ||
3532 | { | ||
3533 | struct iwl_priv *priv = data; | ||
3534 | unsigned long flags; | ||
3535 | |||
3536 | iwl4965_rx_allocate(priv); | ||
3537 | |||
3538 | spin_lock_irqsave(&priv->lock, flags); | ||
3539 | iwl4965_rx_queue_restock(priv); | ||
3540 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3541 | } | ||
3542 | |||
3543 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
3544 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
3545 | * This free routine walks the list of POOL entries and if SKB is set to | ||
3546 | * non NULL it is unmapped and freed | ||
3547 | */ | ||
3548 | static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq) | ||
3549 | { | ||
3550 | int i; | ||
3551 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
3552 | if (rxq->pool[i].skb != NULL) { | ||
3553 | pci_unmap_single(priv->pci_dev, | ||
3554 | rxq->pool[i].dma_addr, | ||
3555 | priv->hw_params.rx_buf_size, | ||
3556 | PCI_DMA_FROMDEVICE); | ||
3557 | dev_kfree_skb(rxq->pool[i].skb); | ||
3558 | } | ||
3559 | } | ||
3560 | |||
3561 | pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
3562 | rxq->dma_addr); | ||
3563 | rxq->bd = NULL; | ||
3564 | } | ||
3565 | |||
3566 | int iwl4965_rx_queue_alloc(struct iwl_priv *priv) | ||
3567 | { | ||
3568 | struct iwl4965_rx_queue *rxq = &priv->rxq; | ||
3569 | struct pci_dev *dev = priv->pci_dev; | ||
3570 | int i; | ||
3571 | |||
3572 | spin_lock_init(&rxq->lock); | ||
3573 | INIT_LIST_HEAD(&rxq->rx_free); | ||
3574 | INIT_LIST_HEAD(&rxq->rx_used); | ||
3575 | |||
3576 | /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ | ||
3577 | rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); | ||
3578 | if (!rxq->bd) | ||
3579 | return -ENOMEM; | ||
3580 | |||
3581 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
3582 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) | ||
3583 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
3584 | |||
3585 | /* Set us so that we have processed and used all buffers, but have | ||
3586 | * not restocked the Rx queue with fresh buffers */ | ||
3587 | rxq->read = rxq->write = 0; | ||
3588 | rxq->free_count = 0; | ||
3589 | rxq->need_update = 0; | ||
3590 | return 0; | ||
3591 | } | ||
3592 | |||
3593 | void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq) | ||
3594 | { | ||
3595 | unsigned long flags; | ||
3596 | int i; | ||
3597 | spin_lock_irqsave(&rxq->lock, flags); | ||
3598 | INIT_LIST_HEAD(&rxq->rx_free); | ||
3599 | INIT_LIST_HEAD(&rxq->rx_used); | ||
3600 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
3601 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
3602 | /* In the reset function, these buffers may have been allocated | ||
3603 | * to an SKB, so we need to unmap and free potential storage */ | ||
3604 | if (rxq->pool[i].skb != NULL) { | ||
3605 | pci_unmap_single(priv->pci_dev, | ||
3606 | rxq->pool[i].dma_addr, | ||
3607 | priv->hw_params.rx_buf_size, | ||
3608 | PCI_DMA_FROMDEVICE); | ||
3609 | priv->alloc_rxb_skb--; | ||
3610 | dev_kfree_skb(rxq->pool[i].skb); | ||
3611 | rxq->pool[i].skb = NULL; | ||
3612 | } | ||
3613 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
3614 | } | ||
3615 | |||
3616 | /* Set us so that we have processed and used all buffers, but have | ||
3617 | * not restocked the Rx queue with fresh buffers */ | ||
3618 | rxq->read = rxq->write = 0; | ||
3619 | rxq->free_count = 0; | ||
3620 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
3621 | } | ||
3622 | |||
3623 | /* Convert linear signal-to-noise ratio into dB */ | ||
3624 | static u8 ratio2dB[100] = { | ||
3625 | /* 0 1 2 3 4 5 6 7 8 9 */ | ||
3626 | 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ | ||
3627 | 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ | ||
3628 | 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ | ||
3629 | 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ | ||
3630 | 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ | ||
3631 | 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ | ||
3632 | 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ | ||
3633 | 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ | ||
3634 | 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ | ||
3635 | 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ | ||
3636 | }; | ||
3637 | |||
3638 | /* Calculates a relative dB value from a ratio of linear | ||
3639 | * (i.e. not dB) signal levels. | ||
3640 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | ||
3641 | int iwl4965_calc_db_from_ratio(int sig_ratio) | ||
3642 | { | 3285 | { |
3643 | /* 1000:1 or higher just report as 60 dB */ | 3286 | iwl_rx_allocate(priv); |
3644 | if (sig_ratio >= 1000) | 3287 | iwl_rx_queue_restock(priv); |
3645 | return 60; | ||
3646 | |||
3647 | /* 100:1 or higher, divide by 10 and use table, | ||
3648 | * add 20 dB to make up for divide by 10 */ | ||
3649 | if (sig_ratio >= 100) | ||
3650 | return (20 + (int)ratio2dB[sig_ratio/10]); | ||
3651 | |||
3652 | /* We shouldn't see this */ | ||
3653 | if (sig_ratio < 1) | ||
3654 | return 0; | ||
3655 | |||
3656 | /* Use table for ratios 1:1 - 99:1 */ | ||
3657 | return (int)ratio2dB[sig_ratio]; | ||
3658 | } | 3288 | } |
3659 | 3289 | ||
3660 | #define PERFECT_RSSI (-20) /* dBm */ | ||
3661 | #define WORST_RSSI (-95) /* dBm */ | ||
3662 | #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) | ||
3663 | |||
3664 | /* Calculate an indication of rx signal quality (a percentage, not dBm!). | ||
3665 | * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info | ||
3666 | * about formulas used below. */ | ||
3667 | int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm) | ||
3668 | { | ||
3669 | int sig_qual; | ||
3670 | int degradation = PERFECT_RSSI - rssi_dbm; | ||
3671 | |||
3672 | /* If we get a noise measurement, use signal-to-noise ratio (SNR) | ||
3673 | * as indicator; formula is (signal dbm - noise dbm). | ||
3674 | * SNR at or above 40 is a great signal (100%). | ||
3675 | * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. | ||
3676 | * Weakest usable signal is usually 10 - 15 dB SNR. */ | ||
3677 | if (noise_dbm) { | ||
3678 | if (rssi_dbm - noise_dbm >= 40) | ||
3679 | return 100; | ||
3680 | else if (rssi_dbm < noise_dbm) | ||
3681 | return 0; | ||
3682 | sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; | ||
3683 | |||
3684 | /* Else use just the signal level. | ||
3685 | * This formula is a least squares fit of data points collected and | ||
3686 | * compared with a reference system that had a percentage (%) display | ||
3687 | * for signal quality. */ | ||
3688 | } else | ||
3689 | sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * | ||
3690 | (15 * RSSI_RANGE + 62 * degradation)) / | ||
3691 | (RSSI_RANGE * RSSI_RANGE); | ||
3692 | |||
3693 | if (sig_qual > 100) | ||
3694 | sig_qual = 100; | ||
3695 | else if (sig_qual < 1) | ||
3696 | sig_qual = 0; | ||
3697 | |||
3698 | return sig_qual; | ||
3699 | } | ||
3700 | 3290 | ||
3701 | /** | 3291 | /** |
3702 | * iwl4965_rx_handle - Main entry function for receiving responses from uCode | 3292 | * iwl_rx_handle - Main entry function for receiving responses from uCode |
3703 | * | 3293 | * |
3704 | * Uses the priv->rx_handlers callback function array to invoke | 3294 | * Uses the priv->rx_handlers callback function array to invoke |
3705 | * the appropriate handlers, including command responses, | 3295 | * the appropriate handlers, including command responses, |
3706 | * frame-received notifications, and other notifications. | 3296 | * frame-received notifications, and other notifications. |
3707 | */ | 3297 | */ |
3708 | static void iwl4965_rx_handle(struct iwl_priv *priv) | 3298 | void iwl_rx_handle(struct iwl_priv *priv) |
3709 | { | 3299 | { |
3710 | struct iwl4965_rx_mem_buffer *rxb; | 3300 | struct iwl_rx_mem_buffer *rxb; |
3711 | struct iwl4965_rx_packet *pkt; | 3301 | struct iwl4965_rx_packet *pkt; |
3712 | struct iwl4965_rx_queue *rxq = &priv->rxq; | 3302 | struct iwl_rx_queue *rxq = &priv->rxq; |
3713 | u32 r, i; | 3303 | u32 r, i; |
3714 | int reclaim; | 3304 | int reclaim; |
3715 | unsigned long flags; | 3305 | unsigned long flags; |
@@ -3725,7 +3315,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv) | |||
3725 | if (i == r) | 3315 | if (i == r) |
3726 | IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); | 3316 | IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); |
3727 | 3317 | ||
3728 | if (iwl4965_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) | 3318 | if (iwl_rx_queue_space(rxq) > (RX_QUEUE_SIZE / 2)) |
3729 | fill_rx = 1; | 3319 | fill_rx = 1; |
3730 | 3320 | ||
3731 | while (i != r) { | 3321 | while (i != r) { |
@@ -3804,7 +3394,7 @@ static void iwl4965_rx_handle(struct iwl_priv *priv) | |||
3804 | count++; | 3394 | count++; |
3805 | if (count >= 8) { | 3395 | if (count >= 8) { |
3806 | priv->rxq.read = i; | 3396 | priv->rxq.read = i; |
3807 | __iwl4965_rx_replenish(priv); | 3397 | __iwl_rx_replenish(priv); |
3808 | count = 0; | 3398 | count = 0; |
3809 | } | 3399 | } |
3810 | } | 3400 | } |
@@ -3812,7 +3402,84 @@ static void iwl4965_rx_handle(struct iwl_priv *priv) | |||
3812 | 3402 | ||
3813 | /* Backtrack one entry */ | 3403 | /* Backtrack one entry */ |
3814 | priv->rxq.read = i; | 3404 | priv->rxq.read = i; |
3815 | iwl4965_rx_queue_restock(priv); | 3405 | iwl_rx_queue_restock(priv); |
3406 | } | ||
3407 | /* Convert linear signal-to-noise ratio into dB */ | ||
3408 | static u8 ratio2dB[100] = { | ||
3409 | /* 0 1 2 3 4 5 6 7 8 9 */ | ||
3410 | 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ | ||
3411 | 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ | ||
3412 | 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ | ||
3413 | 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ | ||
3414 | 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ | ||
3415 | 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ | ||
3416 | 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ | ||
3417 | 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ | ||
3418 | 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ | ||
3419 | 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ | ||
3420 | }; | ||
3421 | |||
3422 | /* Calculates a relative dB value from a ratio of linear | ||
3423 | * (i.e. not dB) signal levels. | ||
3424 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | ||
3425 | int iwl4965_calc_db_from_ratio(int sig_ratio) | ||
3426 | { | ||
3427 | /* 1000:1 or higher just report as 60 dB */ | ||
3428 | if (sig_ratio >= 1000) | ||
3429 | return 60; | ||
3430 | |||
3431 | /* 100:1 or higher, divide by 10 and use table, | ||
3432 | * add 20 dB to make up for divide by 10 */ | ||
3433 | if (sig_ratio >= 100) | ||
3434 | return (20 + (int)ratio2dB[sig_ratio/10]); | ||
3435 | |||
3436 | /* We shouldn't see this */ | ||
3437 | if (sig_ratio < 1) | ||
3438 | return 0; | ||
3439 | |||
3440 | /* Use table for ratios 1:1 - 99:1 */ | ||
3441 | return (int)ratio2dB[sig_ratio]; | ||
3442 | } | ||
3443 | |||
3444 | #define PERFECT_RSSI (-20) /* dBm */ | ||
3445 | #define WORST_RSSI (-95) /* dBm */ | ||
3446 | #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) | ||
3447 | |||
3448 | /* Calculate an indication of rx signal quality (a percentage, not dBm!). | ||
3449 | * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info | ||
3450 | * about formulas used below. */ | ||
3451 | int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm) | ||
3452 | { | ||
3453 | int sig_qual; | ||
3454 | int degradation = PERFECT_RSSI - rssi_dbm; | ||
3455 | |||
3456 | /* If we get a noise measurement, use signal-to-noise ratio (SNR) | ||
3457 | * as indicator; formula is (signal dbm - noise dbm). | ||
3458 | * SNR at or above 40 is a great signal (100%). | ||
3459 | * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. | ||
3460 | * Weakest usable signal is usually 10 - 15 dB SNR. */ | ||
3461 | if (noise_dbm) { | ||
3462 | if (rssi_dbm - noise_dbm >= 40) | ||
3463 | return 100; | ||
3464 | else if (rssi_dbm < noise_dbm) | ||
3465 | return 0; | ||
3466 | sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; | ||
3467 | |||
3468 | /* Else use just the signal level. | ||
3469 | * This formula is a least squares fit of data points collected and | ||
3470 | * compared with a reference system that had a percentage (%) display | ||
3471 | * for signal quality. */ | ||
3472 | } else | ||
3473 | sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * | ||
3474 | (15 * RSSI_RANGE + 62 * degradation)) / | ||
3475 | (RSSI_RANGE * RSSI_RANGE); | ||
3476 | |||
3477 | if (sig_qual > 100) | ||
3478 | sig_qual = 100; | ||
3479 | else if (sig_qual < 1) | ||
3480 | sig_qual = 0; | ||
3481 | |||
3482 | return sig_qual; | ||
3816 | } | 3483 | } |
3817 | 3484 | ||
3818 | /** | 3485 | /** |
@@ -4248,7 +3915,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) | |||
4248 | /* uCode wakes up after power-down sleep */ | 3915 | /* uCode wakes up after power-down sleep */ |
4249 | if (inta & CSR_INT_BIT_WAKEUP) { | 3916 | if (inta & CSR_INT_BIT_WAKEUP) { |
4250 | IWL_DEBUG_ISR("Wakeup interrupt\n"); | 3917 | IWL_DEBUG_ISR("Wakeup interrupt\n"); |
4251 | iwl4965_rx_queue_update_write_ptr(priv, &priv->rxq); | 3918 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); |
4252 | iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]); | 3919 | iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[0]); |
4253 | iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]); | 3920 | iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[1]); |
4254 | iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]); | 3921 | iwl4965_tx_queue_update_write_ptr(priv, &priv->txq[2]); |
@@ -4263,7 +3930,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) | |||
4263 | * Rx "responses" (frame-received notification), and other | 3930 | * Rx "responses" (frame-received notification), and other |
4264 | * notifications from uCode come through here*/ | 3931 | * notifications from uCode come through here*/ |
4265 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | 3932 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { |
4266 | iwl4965_rx_handle(priv); | 3933 | iwl_rx_handle(priv); |
4267 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | 3934 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); |
4268 | } | 3935 | } |
4269 | 3936 | ||
@@ -5452,7 +5119,7 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data) | |||
5452 | return; | 5119 | return; |
5453 | 5120 | ||
5454 | mutex_lock(&priv->mutex); | 5121 | mutex_lock(&priv->mutex); |
5455 | iwl4965_rx_replenish(priv); | 5122 | iwl_rx_replenish(priv); |
5456 | mutex_unlock(&priv->mutex); | 5123 | mutex_unlock(&priv->mutex); |
5457 | } | 5124 | } |
5458 | 5125 | ||
@@ -7309,7 +6976,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) | |||
7309 | iwl4965_dealloc_ucode_pci(priv); | 6976 | iwl4965_dealloc_ucode_pci(priv); |
7310 | 6977 | ||
7311 | if (priv->rxq.bd) | 6978 | if (priv->rxq.bd) |
7312 | iwl4965_rx_queue_free(priv, &priv->rxq); | 6979 | iwl_rx_queue_free(priv, &priv->rxq); |
7313 | iwl4965_hw_txq_ctx_free(priv); | 6980 | iwl4965_hw_txq_ctx_free(priv); |
7314 | 6981 | ||
7315 | iwlcore_clear_stations_table(priv); | 6982 | iwlcore_clear_stations_table(priv); |